summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qdss_mhi.txt15
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi89
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi86
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-camera-sensor-qrd-vr1.dtsi50
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-common.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-gpu.dtsi54
-rw-r--r--arch/arm/boot/dts/qcom/sdm660.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-pinctrl.dtsi100
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts24
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dtsi58
-rw-r--r--arch/arm/configs/msmcortex_defconfig1
-rw-r--r--arch/arm/configs/sdm660-perf_defconfig1
-rw-r--r--arch/arm/configs/sdm660_defconfig1
-rw-r--r--arch/arm64/configs/msm-auto-gvm-perf_defconfig13
-rw-r--r--arch/arm64/configs/msm-auto-gvm_defconfig13
-rw-r--r--arch/arm64/configs/msm-auto-perf_defconfig19
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig1
-rw-r--r--arch/arm64/configs/msmcortex_defconfig1
-rw-r--r--arch/arm64/configs/msmcortex_mediabox-perf_defconfig2
-rw-r--r--arch/arm64/configs/msmcortex_mediabox_defconfig2
-rw-r--r--arch/arm64/configs/sdm660-perf_defconfig1
-rw-r--r--arch/arm64/configs/sdm660_defconfig1
-rw-r--r--arch/arm64/include/asm/signal32.h2
-rw-r--r--arch/mips/include/asm/irq.h2
-rw-r--r--arch/parisc/kernel/syscall.S55
-rw-r--r--arch/sparc/include/asm/setup.h5
-rw-r--r--arch/sparc/kernel/smp_64.c6
-rw-r--r--block/bsg-lib.c1
-rw-r--r--crypto/Kconfig1
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c3
-rw-r--r--drivers/android/binder.c144
-rw-r--r--drivers/android/binder_alloc.c8
-rw-r--r--drivers/android/binder_alloc.h2
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/char/diag/diag_memorydevice.c1
-rw-r--r--drivers/char/diag/diagchar.h5
-rw-r--r--drivers/char/diag/diagchar_core.c35
-rw-r--r--drivers/char/diag/diagfwd.c10
-rw-r--r--drivers/clk/qcom/gpucc-sdm660.c1
-rw-r--r--drivers/clocksource/cs5535-clockevt.c3
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c2
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.c76
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c2
-rw-r--r--drivers/gpu/msm/adreno.c20
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.c2
-rw-r--r--drivers/i2c/busses/i2c-at91.c3
-rw-r--r--drivers/i2c/busses/i2c-ismt.c5
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c6
-rw-r--r--drivers/irqchip/irq-crossbar.c3
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c37
-rw-r--r--drivers/leds/leds-qpnp-flash.c82
-rw-r--r--drivers/leds/leds-qpnp-wled.c97
-rw-r--r--drivers/md/linear.c3
-rw-r--r--drivers/media/cec/cec-core.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c5
-rw-r--r--drivers/media/platform/msm/sde/cec/sde_hdmi_cec.c18
-rw-r--r--drivers/mmc/core/core.c17
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c10
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/wireless/ath/regd.c16
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c58
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c22
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/sysfs.c39
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c61
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c197
-rw-r--r--drivers/net/wireless/cnss2/debug.c4
-rw-r--r--drivers/net/wireless/cnss2/main.c214
-rw-r--r--drivers/net/wireless/cnss2/main.h7
-rw-r--r--drivers/net/wireless/cnss2/pci.c12
-rw-r--r--drivers/net/wireless/cnss2/qmi.c12
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c2
-rw-r--r--drivers/nfc/nq-nci.c53
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c5
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_utils.c8
-rw-r--r--drivers/power/supply/qcom/fg-core.h2
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c37
-rw-r--r--drivers/regulator/qpnp-regulator.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/soc/qcom/Kconfig8
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/glink_private.h2
-rw-r--r--drivers/soc/qcom/glink_smem_native_xprt.c2
-rw-r--r--drivers/soc/qcom/glink_ssr.c29
-rw-r--r--drivers/soc/qcom/icnss.c6
-rw-r--r--drivers/soc/qcom/pil-q6v5-mss.c8
-rw-r--r--drivers/soc/qcom/qdss_bridge.c463
-rw-r--r--drivers/soc/qcom/qdss_bridge.h37
-rw-r--r--drivers/soc/qcom/scm.c11
-rw-r--r--drivers/soc/qcom/subsys-pil-tz.c57
-rw-r--r--drivers/spi/Kconfig14
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi.c234
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c6
-rw-r--r--drivers/tty/goldfish.c2
-rw-r--r--drivers/tty/serial/msm_serial_hs.c49
-rw-r--r--drivers/tty/serial/sunhv.c6
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/config.c6
-rw-r--r--drivers/usb/core/devio.c6
-rw-r--r--drivers/usb/core/hub.c11
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c6
-rw-r--r--drivers/usb/gadget/configfs.c6
-rw-r--r--drivers/usb/musb/musb_core.c6
-rw-r--r--drivers/usb/musb/sunxi.c2
-rw-r--r--drivers/usb/serial/metro-usb.c1
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_host.c41
-rw-r--r--drivers/watchdog/kempld_wdt.c9
-rw-r--r--fs/block_dev.c15
-rw-r--r--fs/btrfs/send.c5
-rw-r--r--fs/ceph/mds_client.c9
-rw-r--r--fs/ext4/crypto_key.c6
-rw-r--r--fs/fscache/object-list.c7
-rw-r--r--fs/nfsd/nfs4callback.c8
-rw-r--r--fs/nfsd/nfs4state.c10
-rw-r--r--fs/nfsd/state.h1
-rw-r--r--fs/ocfs2/dlmglue.c105
-rw-r--r--fs/ocfs2/dlmglue.h18
-rw-r--r--fs/ocfs2/ocfs2.h1
-rw-r--r--include/asm-generic/percpu.h24
-rw-r--r--include/linux/key.h49
-rw-r--r--include/linux/mbus.h4
-rw-r--r--include/linux/memblock.h2
-rw-r--r--include/linux/mmc/core.h2
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/spi/spi.h35
-rw-r--r--include/linux/trace_events.h1
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/net/cnss2.h33
-rw-r--r--include/net/sctp/ulpevent.h6
-rw-r--r--include/trace/events/sched.h155
-rw-r--r--include/uapi/linux/mroute6.h1
-rw-r--r--include/uapi/linux/msm_kgsl.h1
-rw-r--r--include/uapi/linux/rds.h3
-rw-r--r--kernel/bpf/verifier.c3
-rw-r--r--kernel/events/core.c3
-rw-r--r--kernel/locking/lockdep.c11
-rw-r--r--kernel/sched/auto_group.c23
-rw-r--r--kernel/sched/core.c191
-rw-r--r--kernel/sched/cpufreq_sched.c2
-rw-r--r--kernel/sched/cpufreq_schedutil.c3
-rw-r--r--kernel/sched/deadline.c3
-rw-r--r--kernel/sched/fair.c375
-rw-r--r--kernel/sched/idle_task.c3
-rw-r--r--kernel/sched/rt.c3
-rw-r--r--kernel/sched/sched.h49
-rw-r--r--kernel/sched/stop_task.c3
-rw-r--r--kernel/sched/walt.c411
-rw-r--r--kernel/sched/walt.h2
-rw-r--r--lib/digsig.c6
-rw-r--r--mm/memblock.c5
-rw-r--r--mm/slab_common.c5
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dns_resolver/dns_key.c2
-rw-r--r--net/ipv4/ip_vti.c3
-rw-r--r--net/ipv6/ip6_gre.c21
-rw-r--r--net/ipv6/ip6_vti.c3
-rw-r--r--net/l2tp/l2tp_core.c14
-rw-r--r--net/l2tp/l2tp_core.h5
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/packet/af_packet.c28
-rw-r--r--net/tipc/msg.c2
-rw-r--r--security/keys/big_key.c4
-rw-r--r--security/keys/encrypted-keys/encrypted.c9
-rw-r--r--security/keys/gc.c8
-rw-r--r--security/keys/key.c41
-rw-r--r--security/keys/keyctl.c9
-rw-r--r--security/keys/keyring.c10
-rw-r--r--security/keys/proc.c7
-rw-r--r--security/keys/process_keys.c2
-rw-r--r--security/keys/request_key.c7
-rw-r--r--security/keys/request_key_auth.c2
-rw-r--r--security/keys/trusted.c2
-rw-r--r--security/keys/user_defined.c4
-rw-r--r--sound/core/seq/seq_lock.c4
-rw-r--r--sound/core/seq/seq_lock.h12
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/soc/msm/apq8096-auto.c85
-rw-r--r--sound/usb/quirks.c1
199 files changed, 3471 insertions, 1552 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/qdss_mhi.txt b/Documentation/devicetree/bindings/arm/msm/qdss_mhi.txt
new file mode 100644
index 000000000000..928a4f4269a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qdss_mhi.txt
@@ -0,0 +1,15 @@
+Qualcomm Technologies, Inc. QDSS bridge Driver
+
+This device will enable routing debug data from modem
+subsystem to APSS host.
+
+Required properties:
+-compatible : "qcom,qdss-mhi".
+-qcom,mhi : phandle of MHI Device to connect to.
+
+Example:
+ qcom,qdss-mhi {
+ compatible = "qcom,qdss-mhi";
+ qcom,mhi = <&mhi_0>;
+ };
+
diff --git a/Makefile b/Makefile
index c826d1ecc91d..c16d200334ef 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 93
+SUBLEVEL = 95
EXTRAVERSION =
NAME = Blurry Fish Butt
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 24e19deb1f28..8d728d63aea0 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -133,7 +133,7 @@ endif
KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
-CHECKFLAGS += -D__arm__
+CHECKFLAGS += -D__arm__ -m32
#Default value
head-y := arch/arm/kernel/head$(MMUEXT).o
diff --git a/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts b/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts
index 082b04791dbd..93302bb3e65a 100644
--- a/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts
+++ b/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts
@@ -42,10 +42,6 @@
i2c@75b6000 { /* BLSP8 */
/* ADV7533 HDMI Bridge Chip removed on ADP Lite */
- adv7533@3d {
- status = "disabled";
- };
-
adv7533@39 {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
index 343c6a2ee2da..a71749375aa5 100644
--- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
@@ -549,77 +549,6 @@
&sde_kms {
qcom,mdss-pref-prim-intf = "dsi";
- qcom,sde-plane-id-map {
- qcom,sde-plane-id@0 {
- reg = <0x0>;
- qcom,display-type = "primary";
- qcom,plane-name = "rgb0", "rgb1";
- qcom,plane-type = "primary";
- };
-
- qcom,sde-plane-id@1 {
- reg = <0x1>;
- qcom,display-type = "primary";
- qcom,plane-name = "vig0", "vig1";
- qcom,plane-type = "overlay";
- };
-
- qcom,sde-plane-id@2 {
- reg = <0x2>;
- qcom,display-type = "primary";
- qcom,plane-name = "cursor0";
- qcom,plane-type = "cursor";
- };
-
- qcom,sde-plane-id@3 {
- reg = <0x3>;
- qcom,display-type = "secondary";
- qcom,plane-name = "rgb2";
- qcom,plane-type = "primary";
- };
-
- qcom,sde-plane-id@4 {
- reg = <0x4>;
- qcom,display-type = "secondary";
- qcom,plane-name = "vig2";
- qcom,plane-type = "overlay";
- };
-
- qcom,sde-plane-id@5 {
- reg = <0x5>;
- qcom,display-type = "secondary";
- qcom,plane-name = "dma0";
- qcom,plane-type = "overlay";
- };
-
- qcom,sde-plane-id@6 {
- reg = <0x6>;
- qcom,display-type = "secondary";
- qcom,plane-name = "cursor1";
- qcom,plane-type = "cursor";
- };
-
- qcom,sde-plane-id@7 {
- reg = <0x7>;
- qcom,display-type = "tertiary";
- qcom,plane-name = "rgb3";
- qcom,plane-type = "primary";
- };
-
- qcom,sde-plane-id@8 {
- reg = <0x8>;
- qcom,display-type = "tertiary";
- qcom,plane-name = "vig3";
- qcom,plane-type = "overlay";
- };
-
- qcom,sde-plane-id@9 {
- reg = <0x9>;
- qcom,display-type = "tertiary";
- qcom,plane-name = "dma1";
- qcom,plane-type = "overlay";
- };
- };
};
&dsi_adv_7533_1 {
@@ -1055,24 +984,6 @@
gpio-key,wakeup;
debounce-interval = <15>;
};
-
- cam_snapshot {
- label = "cam_snapshot";
- gpios = <&pm8994_gpios 4 0x1>;
- linux,input-type = <1>;
- linux,code = <766>;
- gpio-key,wakeup;
- debounce-interval = <15>;
- };
-
- cam_focus {
- label = "cam_focus";
- gpios = <&pm8994_gpios 5 0x1>;
- linux,input-type = <1>;
- linux,code = <528>;
- gpio-key,wakeup;
- debounce-interval = <15>;
- };
};
sound-9335 {
diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
index 682a745b30e8..386cc7bc7b21 100644
--- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
@@ -335,77 +335,6 @@
&sde_kms {
qcom,mdss-pref-prim-intf = "dsi";
- qcom,sde-plane-id-map {
- qcom,sde-plane-id@0 {
- reg = <0x0>;
- qcom,display-type = "primary";
- qcom,plane-name = "rgb0", "rgb1";
- qcom,plane-type = "primary";
- };
-
- qcom,sde-plane-id@1 {
- reg = <0x1>;
- qcom,display-type = "primary";
- qcom,plane-name = "vig0", "vig1";
- qcom,plane-type = "overlay";
- };
-
- qcom,sde-plane-id@2 {
- reg = <0x2>;
- qcom,display-type = "primary";
- qcom,plane-name = "cursor0";
- qcom,plane-type = "cursor";
- };
-
- qcom,sde-plane-id@3 {
- reg = <0x3>;
- qcom,display-type = "secondary";
- qcom,plane-name = "rgb2";
- qcom,plane-type = "primary";
- };
-
- qcom,sde-plane-id@4 {
- reg = <0x4>;
- qcom,display-type = "secondary";
- qcom,plane-name = "vig2";
- qcom,plane-type = "overlay";
- };
-
- qcom,sde-plane-id@5 {
- reg = <0x5>;
- qcom,display-type = "secondary";
- qcom,plane-name = "dma0";
- qcom,plane-type = "overlay";
- };
-
- qcom,sde-plane-id@6 {
- reg = <0x6>;
- qcom,display-type = "secondary";
- qcom,plane-name = "cursor1";
- qcom,plane-type = "cursor";
- };
-
- qcom,sde-plane-id@7 {
- reg = <0x7>;
- qcom,display-type = "tertiary";
- qcom,plane-name = "rgb3";
- qcom,plane-type = "primary";
- };
-
- qcom,sde-plane-id@8 {
- reg = <0x8>;
- qcom,display-type = "tertiary";
- qcom,plane-name = "vig3";
- qcom,plane-type = "overlay";
- };
-
- qcom,sde-plane-id@9 {
- reg = <0x9>;
- qcom,display-type = "tertiary";
- qcom,plane-name = "dma1";
- qcom,plane-type = "overlay";
- };
- };
};
&dsi_adv_7533_1 {
@@ -789,6 +718,14 @@
&adv7533_0_switch_suspend>;
adi,irq-gpio = <&tlmm 71 0x2002>;
adi,switch-gpio = <&tlmm 72 0x1>;
+ vddio-supply = <&pm8994_l17>;
+ qcom,supply-names = "vddio";
+ qcom,min-voltage-level = <1800000>;
+ qcom,max-voltage-level = <1800000>;
+ qcom,enable-load = <100000>;
+ qcom,disable-load = <100>;
+ qcom,post-on-sleep = <60>;
+
};
adv7533@39 {
@@ -809,6 +746,13 @@
&adv7533_1_switch_suspend>;
adi,irq-gpio = <&tlmm 73 0x2002>;
adi,switch-gpio = <&tlmm 74 0x0>;
+ vddio-supply = <&pm8994_l17>;
+ qcom,supply-names = "vddio";
+ qcom,min-voltage-level = <1800000>;
+ qcom,max-voltage-level = <1800000>;
+ qcom,enable-load = <100000>;
+ qcom,disable-load = <100>;
+
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-qrd-vr1.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-qrd-vr1.dtsi
index 2af3bf277096..0f90b02c5d20 100644
--- a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-qrd-vr1.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-qrd-vr1.dtsi
@@ -216,6 +216,55 @@
qcom,clock-rates = <24000000 0>;
};
+ eeprom3: qcom,eeprom@3 {
+ cell-index = <3>;
+ reg = <0x3>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm8998_lvs1>;
+ qcom,cam-vreg-name = "cam_vio";
+ qcom,cam-vreg-min-voltage = <1800000>;
+ qcom,cam-vreg-max-voltage = <1800000>;
+ qcom,cam-vreg-op-mode = <80000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk3_active
+ &cam_sensor_depth_v1_active
+ &cam_sensor_depth_v2_active
+ &cam_sensor_depth_default>;
+ pinctrl-1 = <&cam_sensor_mclk3_suspend
+ &cam_sensor_depth_v1_sleep
+ &cam_sensor_depth_v2_sleep
+ &cam_sensor_depth_sleep>;
+ gpios = <&tlmm 16 0>,
+ <&tlmm 24 0>,
+ <&tlmm 21 0>,
+ <&tlmm 28 0>,
+ <&tlmm 23 0>,
+ <&tlmm 7 0>;
+ qcom,gpio-vana = <1>;
+ qcom,gpio-custom2 = <2>;
+ qcom,gpio-reset = <3>;
+ qcom,gpio-custom3 = <4>;
+ qcom,gpio-custom1 = <5>;
+ qcom,gpio-req-tbl-num = <0 1 2 3 4 5>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0 1 1>;
+ qcom,gpio-req-tbl-label =
+ "CAMIF_MCLK3",
+ "CAM_VANA",
+ "CAM_CUSTOM2",
+ "CAM_RESET1",
+ "CAM_CUSTOM3",
+ "CAM_CUSTOM1";
+ qcom,sensor-position = <1>; /* 0 rear */
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>; /* I2C 1 */
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk3_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk3_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
qcom,camera@0 {
cell-index = <0>;
compatible = "qcom,camera";
@@ -354,6 +403,7 @@
qcom,csiphy-sd-index = <1>;
qcom,csid-sd-index = <3>;
qcom,mount-angle = <90>;
+ qcom,eeprom-src = <&eeprom3>;
cam_vio-supply = <&pm8998_lvs1>;
qcom,cam-vreg-name = "cam_vio";
qcom,cam-vreg-min-voltage = <1800000>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-common.dtsi b/arch/arm/boot/dts/qcom/sdm660-common.dtsi
index d60a22d7cb4d..362aeea96999 100644
--- a/arch/arm/boot/dts/qcom/sdm660-common.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-common.dtsi
@@ -246,7 +246,7 @@
qcom,peer-bam = <0>;
qcom,peer-bam-physical-address = <0x06064000>;
qcom,src-bam-pipe-index = <0>;
- qcom,dst-bam-pipe-index = <2>;
+ qcom,dst-bam-pipe-index = <3>;
qcom,data-fifo-offset = <0x0>;
qcom,data-fifo-size = <0x1800>;
qcom,descriptor-fifo-offset = <0x1800>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi
index 2194cf606d29..5a571c2db634 100644
--- a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi
@@ -551,6 +551,60 @@
qcom,bus-max = <0>;
};
};
+
+ qcom,gpu-pwrlevels-5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <90>;
+
+ qcom,initial-pwrlevel = <2>;
+
+ /* SVS_L1 */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <430000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <10>;
+ qcom,bus-max = <11>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <370000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <6>;
+ qcom,bus-max = <11>;
+ };
+
+ /* Low SVS */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <266000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <6>;
+ };
+
+ /* Min SVS */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <160000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <5>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <19200000>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi
index 48437ac9d31a..730ecac48274 100644
--- a/arch/arm/boot/dts/qcom/sdm660.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660.dtsi
@@ -1987,6 +1987,7 @@
qcom,lpass@15700000 {
compatible = "qcom,pil-tz-generic";
reg = <0x15700000 0x00100>;
+ reg-names = "base_reg";
interrupts = <0 162 1>;
vdd_cx-supply = <&pm660l_l9_level>;
@@ -2019,6 +2020,7 @@
qcom,turing@1a300000 {
compatible = "qcom,pil-tz-generic";
reg = <0x1a300000 0x00100>;
+ reg-names = "base_reg";
interrupts = <0 518 1>;
vdd_cx-supply = <&pm660l_s3_level>;
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-pinctrl.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-pinctrl.dtsi
index ce7741f75b24..02162e64e35f 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-pinctrl.dtsi
@@ -534,5 +534,105 @@
};
};
};
+
+ sdc2_clk_on: sdc2_clk_on {
+ config {
+ pins = "sdc2_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc2_clk_off: sdc2_clk_off {
+ config {
+ pins = "sdc2_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_cmd_on: sdc2_cmd_on {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_cmd_off: sdc2_cmd_off {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_data_on: sdc2_data_on {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_data_off: sdc2_data_off {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_cd_on: sdc2_cd_on {
+ mux {
+ pins = "gpio95";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio95";
+ bias-pull-up; /* pull up */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc2_cd_off: sdc2_cd_off {
+ mux {
+ pins = "gpio95";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio95";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_cd_on_sbc: sdc2_cd_on_sbc {
+ mux {
+ pins = "gpio38";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio38";
+ bias-pull-up; /* pull up */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc2_cd_off_sbc: sdc2_cd_off_sbc {
+ mux {
+ pins = "gpio38";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio38";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts
index bed36341a37c..9057fb315c65 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts
@@ -20,6 +20,7 @@
#include "vplatform-lfv-msm8996-modem.dtsi"
#include <dt-bindings/clock/msm-clocks-8996.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/gpio/gpio.h>
/ {
model = "Qualcomm Technologies, Inc. MSM 8996";
@@ -122,3 +123,26 @@
&blsp1_uart2 {
status = "okay";
};
+
+&sdhc_2 {
+ vdd-supply = <&pm8994_l21>;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <200 800000>;
+
+ vdd-io-supply = <&pm8994_l13>;
+ qcom,vdd-io-voltage-level = <1800000 2950000>;
+ qcom,vdd-io-current-level = <200 22000>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on_sbc>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
+ &sdc2_cd_on_sbc>;
+
+ qcom,clk-rates = <400000 20000000 25000000
+ 50000000 100000000 200000000>;
+ qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+ cd-gpios = <&tlmm 38 GPIO_ACTIVE_LOW>;
+
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dtsi
index c72b5a304d81..08aa412f1ff4 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dtsi
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dtsi
@@ -19,6 +19,10 @@
compatible = "qcom,msm8996";
qcom,msm-id = <246 0x0>;
+ aliases {
+ sdhc2 = &sdhc_2; /* SDC2 SD card slot */
+ };
+
psci {
compatible = "arm,psci";
method = "smc";
@@ -86,6 +90,46 @@
};
};
+ sdhc_2: sdhci@74a4900 {
+ compatible = "qcom,sdhci-msm";
+ reg = <0x74a4900 0x314>, <0x74a4000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 125 0>, <0 221 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clock-names = "iface_clk", "core_clk";
+ clocks = <&clock_gcc clk_gcc_sdcc2_ahb_clk>,
+ <&clock_gcc clk_gcc_sdcc2_apps_clk>;
+
+ qcom,large-address-bus;
+ qcom,bus-width = <4>;
+
+ qcom,devfreq,freq-table = <20000000 200000000>;
+
+ qcom,msm-bus,name = "sdhc2";
+ qcom,msm-bus,num-cases = <8>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
+ <81 512 1600 3200>, /* 400 KB/s*/
+ <81 512 80000 160000>, /* 20 MB/s */
+ <81 512 100000 200000>, /* 25 MB/s */
+ <81 512 200000 400000>, /* 50 MB/s */
+ <81 512 400000 800000>, /* 100 MB/s */
+ <81 512 800000 800000>, /* 200 MB/s */
+ <81 512 2048000 4096000>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+ 100000000 200000000 4294967295>;
+
+ qcom,pm-qos-cpu-groups = <0x03 0x0c>;
+ qcom,pm-qos-legacy-latency-us = <70 70>, <70 70>;
+ qcom,pm-qos-irq-type = "affine_cores";
+ qcom,pm-qos-irq-cpu = <0>;
+ qcom,pm-qos-irq-latency = <70 70>;
+
+ status = "disabled";
+ };
+
sound-adp-agave {
compatible = "qcom,apq8096-asoc-snd-adp-agave";
qcom,model = "apq8096-adp-agave-snd-card";
@@ -817,4 +861,18 @@
regulator-min-microvolt = <1>;
regulator-max-microvolt = <7>;
};
+
+ pm8994_l21: regulator-l21 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pm8994_l21";
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8994_l13: regulator-l13 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pm8994_l13";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
};
diff --git a/arch/arm/configs/msmcortex_defconfig b/arch/arm/configs/msmcortex_defconfig
index 60a7cff4836d..e5ef13193437 100644
--- a/arch/arm/configs/msmcortex_defconfig
+++ b/arch/arm/configs/msmcortex_defconfig
@@ -495,6 +495,7 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arm/configs/sdm660-perf_defconfig b/arch/arm/configs/sdm660-perf_defconfig
index c018f8fe24bb..32686982997d 100644
--- a/arch/arm/configs/sdm660-perf_defconfig
+++ b/arch/arm/configs/sdm660-perf_defconfig
@@ -628,6 +628,7 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arm/configs/sdm660_defconfig b/arch/arm/configs/sdm660_defconfig
index af5adfeb1a41..e1e4cfe88d94 100644
--- a/arch/arm/configs/sdm660_defconfig
+++ b/arch/arm/configs/sdm660_defconfig
@@ -628,6 +628,7 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arm64/configs/msm-auto-gvm-perf_defconfig b/arch/arm64/configs/msm-auto-gvm-perf_defconfig
index 650b9d7bc127..1c4e19d9b859 100644
--- a/arch/arm64/configs/msm-auto-gvm-perf_defconfig
+++ b/arch/arm64/configs/msm-auto-gvm-perf_defconfig
@@ -223,6 +223,19 @@ CONFIG_HID_APPLE=y
CONFIG_HID_ELECOM=y
CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_CQ_HCI=y
CONFIG_UIO=y
CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_VIRTIO_INPUT=y
diff --git a/arch/arm64/configs/msm-auto-gvm_defconfig b/arch/arm64/configs/msm-auto-gvm_defconfig
index 5b0fb5cd910b..05ade778b2ae 100644
--- a/arch/arm64/configs/msm-auto-gvm_defconfig
+++ b/arch/arm64/configs/msm-auto-gvm_defconfig
@@ -225,6 +225,19 @@ CONFIG_HID_APPLE=y
CONFIG_HID_ELECOM=y
CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_CQ_HCI=y
CONFIG_UIO=y
CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_VIRTIO_INPUT=y
diff --git a/arch/arm64/configs/msm-auto-perf_defconfig b/arch/arm64/configs/msm-auto-perf_defconfig
index 616016fc79cd..554be1743f18 100644
--- a/arch/arm64/configs/msm-auto-perf_defconfig
+++ b/arch/arm64/configs/msm-auto-perf_defconfig
@@ -1,5 +1,6 @@
CONFIG_LOCALVERSION="-perf"
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_FHANDLE=y
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
@@ -230,6 +231,8 @@ CONFIG_CFG80211_INTERNAL_REGDB=y
CONFIG_RFKILL=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_DMA_CMA=y
# CONFIG_PNP_DEBUG_MESSAGES is not set
@@ -308,7 +311,6 @@ CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
-CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MUX=y
CONFIG_I2C_QUP=y
@@ -345,7 +347,6 @@ CONFIG_THERMAL_TSENS8974=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_WCD9335_CODEC=y
-CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR_MAX20010=y
@@ -378,15 +379,11 @@ CONFIG_MSM_AIS_CAMERA_SENSOR=y
# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
CONFIG_VIDEO_ADV7481=y
CONFIG_QCOM_KGSL=y
+CONFIG_DRM=y
CONFIG_MSM_BA_V4L2=y
-CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_MSM_DBA=y
+CONFIG_MSM_DBA_ADV7533=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_GENERIC=m
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
@@ -469,7 +466,7 @@ CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_TIMED_GPIO=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
-CONFIG_SW_SYNC_USER=y
+CONFIG_SYNC=y
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_QPNP_REVID=y
@@ -514,7 +511,6 @@ CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
-CONFIG_QCOM_SCM=y
CONFIG_QCOM_SCM_XPU=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
@@ -569,7 +565,6 @@ CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index e16fc58ce913..a6638f3de2c0 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -621,6 +621,7 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 3cbcc10ceb1d..4c986046cd5b 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -646,6 +646,7 @@ CONFIG_TMPFS_POSIX_ACL=y
CONFIG_EFIVAR_FS=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arm64/configs/msmcortex_mediabox-perf_defconfig b/arch/arm64/configs/msmcortex_mediabox-perf_defconfig
index b359c78a0d29..79657f54ff7e 100644
--- a/arch/arm64/configs/msmcortex_mediabox-perf_defconfig
+++ b/arch/arm64/configs/msmcortex_mediabox-perf_defconfig
@@ -226,6 +226,7 @@ CONFIG_MSM_BT_POWER=y
CONFIG_BTFM_SLIM=y
CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
CONFIG_CFG80211_INTERNAL_REGDB=y
CONFIG_CFG80211_WEXT=y
CONFIG_MAC80211=m
@@ -285,6 +286,7 @@ CONFIG_WIL6210=m
CONFIG_ATH10K=m
CONFIG_ATH10K_TARGET_SNOC=m
CONFIG_ATH10K_SNOC=y
+CONFIG_ATH10K_DFS_CERTIFIED=y
CONFIG_CLD_LL_CORE=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
diff --git a/arch/arm64/configs/msmcortex_mediabox_defconfig b/arch/arm64/configs/msmcortex_mediabox_defconfig
index 0d36b8ca455d..127bcc07b81b 100644
--- a/arch/arm64/configs/msmcortex_mediabox_defconfig
+++ b/arch/arm64/configs/msmcortex_mediabox_defconfig
@@ -227,6 +227,7 @@ CONFIG_MSM_BT_POWER=y
CONFIG_BTFM_SLIM=y
CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
CONFIG_CFG80211_INTERNAL_REGDB=y
# CONFIG_CFG80211_CRDA_SUPPORT is not set
CONFIG_CFG80211_WEXT=y
@@ -287,6 +288,7 @@ CONFIG_ATH10K=m
CONFIG_ATH10K_TARGET_SNOC=m
CONFIG_ATH10K_SNOC=y
CONFIG_ATH10K_DEBUG=y
+CONFIG_ATH10K_DFS_CERTIFIED=y
CONFIG_CLD_LL_CORE=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
diff --git a/arch/arm64/configs/sdm660-perf_defconfig b/arch/arm64/configs/sdm660-perf_defconfig
index b40705dd7063..dff3cc7ce071 100644
--- a/arch/arm64/configs/sdm660-perf_defconfig
+++ b/arch/arm64/configs/sdm660-perf_defconfig
@@ -630,6 +630,7 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
diff --git a/arch/arm64/configs/sdm660_defconfig b/arch/arm64/configs/sdm660_defconfig
index 7819916fb841..2f96c5a677d9 100644
--- a/arch/arm64/configs/sdm660_defconfig
+++ b/arch/arm64/configs/sdm660_defconfig
@@ -650,6 +650,7 @@ CONFIG_TMPFS_POSIX_ACL=y
CONFIG_EFIVAR_FS=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h
index eeaa97559bab..81abea0b7650 100644
--- a/arch/arm64/include/asm/signal32.h
+++ b/arch/arm64/include/asm/signal32.h
@@ -22,8 +22,6 @@
#define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500
-extern const compat_ulong_t aarch32_sigret_code[6];
-
int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs);
int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 77edb22f855d..5433ccc9d706 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -18,7 +18,7 @@
#include <irq.h>
#define IRQ_STACK_SIZE THREAD_SIZE
-#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
+#define IRQ_STACK_START (IRQ_STACK_SIZE - 16)
extern void *irq_stack[NR_CPUS];
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index a86b19fccb63..c6b855f7892c 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -479,11 +479,6 @@ lws_start:
comiclr,>> __NR_lws_entries, %r20, %r0
b,n lws_exit_nosys
- /* WARNING: Trashing sr2 and sr3 */
- mfsp %sr7,%r1 /* get userspace into sr3 */
- mtsp %r1,%sr3
- mtsp %r0,%sr2 /* get kernel space into sr2 */
-
/* Load table start */
ldil L%lws_table, %r1
ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
@@ -632,9 +627,9 @@ cas_action:
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
-1: ldw,ma 0(%sr3,%r26), %r28
+1: ldw,ma 0(%r26), %r28
sub,<> %r28, %r25, %r0
-2: stw,ma %r24, 0(%sr3,%r26)
+2: stw,ma %r24, 0(%r26)
/* Free lock */
stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
@@ -711,9 +706,9 @@ lws_compare_and_swap_2:
nop
/* 8bit load */
-4: ldb 0(%sr3,%r25), %r25
+4: ldb 0(%r25), %r25
b cas2_lock_start
-5: ldb 0(%sr3,%r24), %r24
+5: ldb 0(%r24), %r24
nop
nop
nop
@@ -721,9 +716,9 @@ lws_compare_and_swap_2:
nop
/* 16bit load */
-6: ldh 0(%sr3,%r25), %r25
+6: ldh 0(%r25), %r25
b cas2_lock_start
-7: ldh 0(%sr3,%r24), %r24
+7: ldh 0(%r24), %r24
nop
nop
nop
@@ -731,9 +726,9 @@ lws_compare_and_swap_2:
nop
/* 32bit load */
-8: ldw 0(%sr3,%r25), %r25
+8: ldw 0(%r25), %r25
b cas2_lock_start
-9: ldw 0(%sr3,%r24), %r24
+9: ldw 0(%r24), %r24
nop
nop
nop
@@ -742,14 +737,14 @@ lws_compare_and_swap_2:
/* 64bit load */
#ifdef CONFIG_64BIT
-10: ldd 0(%sr3,%r25), %r25
-11: ldd 0(%sr3,%r24), %r24
+10: ldd 0(%r25), %r25
+11: ldd 0(%r24), %r24
#else
- /* Load new value into r22/r23 - high/low */
-10: ldw 0(%sr3,%r25), %r22
-11: ldw 4(%sr3,%r25), %r23
+ /* Load old value into r22/r23 - high/low */
+10: ldw 0(%r25), %r22
+11: ldw 4(%r25), %r23
/* Load new value into fr4 for atomic store later */
-12: flddx 0(%sr3,%r24), %fr4
+12: flddx 0(%r24), %fr4
#endif
cas2_lock_start:
@@ -799,30 +794,30 @@ cas2_action:
ldo 1(%r0),%r28
/* 8bit CAS */
-13: ldb,ma 0(%sr3,%r26), %r29
+13: ldb,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-14: stb,ma %r24, 0(%sr3,%r26)
+14: stb,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 16bit CAS */
-15: ldh,ma 0(%sr3,%r26), %r29
+15: ldh,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-16: sth,ma %r24, 0(%sr3,%r26)
+16: sth,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 32bit CAS */
-17: ldw,ma 0(%sr3,%r26), %r29
+17: ldw,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-18: stw,ma %r24, 0(%sr3,%r26)
+18: stw,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
@@ -830,22 +825,22 @@ cas2_action:
/* 64bit CAS */
#ifdef CONFIG_64BIT
-19: ldd,ma 0(%sr3,%r26), %r29
+19: ldd,ma 0(%r26), %r29
sub,*= %r29, %r25, %r0
b,n cas2_end
-20: std,ma %r24, 0(%sr3,%r26)
+20: std,ma %r24, 0(%r26)
copy %r0, %r28
#else
/* Compare first word */
-19: ldw,ma 0(%sr3,%r26), %r29
+19: ldw 0(%r26), %r29
sub,= %r29, %r22, %r0
b,n cas2_end
/* Compare second word */
-20: ldw,ma 4(%sr3,%r26), %r29
+20: ldw 4(%r26), %r29
sub,= %r29, %r23, %r0
b,n cas2_end
/* Perform the store */
-21: fstdx %fr4, 0(%sr3,%r26)
+21: fstdx %fr4, 0(%r26)
copy %r0, %r28
#endif
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index be0cc1beed41..3fae200dd251 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -59,8 +59,11 @@ extern atomic_t dcpage_flushes;
extern atomic_t dcpage_flushes_xcall;
extern int sysctl_tsb_ratio;
-#endif
+#ifdef CONFIG_SERIAL_SUNHV
+void sunhv_migrate_hvcons_irq(int cpu);
+#endif
+#endif
void sun_do_break(void);
extern int stop_a_enabled;
extern int scons_pwroff;
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 4511caa3b7e9..46866b2097e8 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1443,8 +1443,12 @@ void smp_send_stop(void)
int cpu;
if (tlb_type == hypervisor) {
+ int this_cpu = smp_processor_id();
+#ifdef CONFIG_SERIAL_SUNHV
+ sunhv_migrate_hvcons_irq(this_cpu);
+#endif
for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
+ if (cpu == this_cpu)
continue;
#ifdef CONFIG_SUN_LDOMS
if (ldom_domaining_enabled) {
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 341b8d858e67..650f427d915b 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -147,6 +147,7 @@ static int bsg_create_job(struct device *dev, struct request *req)
failjob_rls_rqst_payload:
kfree(job->request_payload.sg_list);
failjob_rls_job:
+ kfree(job);
return -ENOMEM;
}
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 3240d394426c..248d1a8f9409 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -361,6 +361,7 @@ config CRYPTO_XTS
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
select CRYPTO_GF128MUL
+ select CRYPTO_ECB
help
XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain,
key size 256, 384 or 512 bits. This implementation currently
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index 8f3056cd0399..2516e97c58f1 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -90,6 +90,9 @@ static int pkcs7_check_authattrs(struct pkcs7_message *msg)
bool want;
sinfo = msg->signed_infos;
+ if (!sinfo)
+ goto inconsistent;
+
if (sinfo->authattrs) {
want = true;
msg->have_authattrs = true;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 342e42f6f3d1..9e31197f4b6d 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -601,6 +601,8 @@ enum {
* (protected by @proc->inner_lock)
* @todo: list of work to do for this thread
* (protected by @proc->inner_lock)
+ * @process_todo: whether work in @todo should be processed
+ * (protected by @proc->inner_lock)
* @return_error: transaction errors reported by this thread
* (only accessed by this thread)
* @reply_error: transaction errors reported by target thread
@@ -627,6 +629,7 @@ struct binder_thread {
bool looper_need_return; /* can be written by other thread */
struct binder_transaction *transaction_stack;
struct list_head todo;
+ bool process_todo;
struct binder_error return_error;
struct binder_error reply_error;
wait_queue_head_t wait;
@@ -814,6 +817,16 @@ static bool binder_worklist_empty(struct binder_proc *proc,
return ret;
}
+/**
+ * binder_enqueue_work_ilocked() - Add an item to the work list
+ * @work: struct binder_work to add to list
+ * @target_list: list to add work to
+ *
+ * Adds the work to the specified list. Asserts that work
+ * is not already on a list.
+ *
+ * Requires the proc->inner_lock to be held.
+ */
static void
binder_enqueue_work_ilocked(struct binder_work *work,
struct list_head *target_list)
@@ -824,22 +837,56 @@ binder_enqueue_work_ilocked(struct binder_work *work,
}
/**
- * binder_enqueue_work() - Add an item to the work list
- * @proc: binder_proc associated with list
+ * binder_enqueue_thread_work_ilocked_nowake() - Add thread work
+ * @thread: thread to queue work to
* @work: struct binder_work to add to list
- * @target_list: list to add work to
*
- * Adds the work to the specified list. Asserts that work
- * is not already on a list.
+ * Adds the work to the todo list of the thread. Doesn't set the process_todo
+ * flag, which means that (if it wasn't already set) the thread will go to
+ * sleep without handling this work when it calls read.
+ *
+ * Requires the proc->inner_lock to be held.
*/
static void
-binder_enqueue_work(struct binder_proc *proc,
- struct binder_work *work,
- struct list_head *target_list)
+binder_enqueue_thread_work_ilocked_nowake(struct binder_thread *thread,
+ struct binder_work *work)
{
- binder_inner_proc_lock(proc);
- binder_enqueue_work_ilocked(work, target_list);
- binder_inner_proc_unlock(proc);
+ binder_enqueue_work_ilocked(work, &thread->todo);
+}
+
+/**
+ * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
+ * @thread: thread to queue work to
+ * @work: struct binder_work to add to list
+ *
+ * Adds the work to the todo list of the thread, and enables processing
+ * of the todo queue.
+ *
+ * Requires the proc->inner_lock to be held.
+ */
+static void
+binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
+ struct binder_work *work)
+{
+ binder_enqueue_work_ilocked(work, &thread->todo);
+ thread->process_todo = true;
+}
+
+/**
+ * binder_enqueue_thread_work() - Add an item to the thread work list
+ * @thread: thread to queue work to
+ * @work: struct binder_work to add to list
+ *
+ * Adds the work to the todo list of the thread, and enables processing
+ * of the todo queue.
+ */
+static void
+binder_enqueue_thread_work(struct binder_thread *thread,
+ struct binder_work *work)
+{
+ binder_inner_proc_lock(thread->proc);
+ binder_enqueue_thread_work_ilocked(thread, work);
+ binder_inner_proc_unlock(thread->proc);
}
static void
@@ -954,7 +1001,7 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
static bool binder_has_work_ilocked(struct binder_thread *thread,
bool do_proc_work)
{
- return !binder_worklist_empty_ilocked(&thread->todo) ||
+ return thread->process_todo ||
thread->looper_need_return ||
(do_proc_work &&
!binder_worklist_empty_ilocked(&thread->proc->todo));
@@ -1371,6 +1418,17 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
binder_dequeue_work_ilocked(&node->work);
+ /*
+ * Note: this function is the only place where we queue
+ * directly to a thread->todo without using the
+ * corresponding binder_enqueue_thread_work() helper
+ * functions; in this case it's ok to not set the
+ * process_todo flag, since we know this node work will
+ * always be followed by other work that starts queue
+ * processing: in case of synchronous transactions, a
+ * BR_REPLY or BR_ERROR; in case of oneway
+ * transactions, a BR_TRANSACTION_COMPLETE.
+ */
binder_enqueue_work_ilocked(&node->work, target_list);
}
} else {
@@ -1382,6 +1440,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
node->debug_id);
return -EINVAL;
}
+ /*
+ * See comment above
+ */
binder_enqueue_work_ilocked(&node->work, target_list);
}
}
@@ -2071,9 +2132,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
binder_pop_transaction_ilocked(target_thread, t);
if (target_thread->reply_error.cmd == BR_OK) {
target_thread->reply_error.cmd = error_code;
- binder_enqueue_work_ilocked(
- &target_thread->reply_error.work,
- &target_thread->todo);
+ binder_enqueue_thread_work_ilocked(
+ target_thread,
+ &target_thread->reply_error.work);
wake_up_interruptible(&target_thread->wait);
} else {
WARN(1, "Unexpected reply error: %u\n",
@@ -2712,11 +2773,10 @@ static bool binder_proc_transaction(struct binder_transaction *t,
struct binder_proc *proc,
struct binder_thread *thread)
{
- struct list_head *target_list = NULL;
struct binder_node *node = t->buffer->target_node;
struct binder_priority node_prio;
bool oneway = !!(t->flags & TF_ONE_WAY);
- bool wakeup = true;
+ bool pending_async = false;
BUG_ON(!node);
binder_node_lock(node);
@@ -2726,8 +2786,7 @@ static bool binder_proc_transaction(struct binder_transaction *t,
if (oneway) {
BUG_ON(thread);
if (node->has_async_transaction) {
- target_list = &node->async_todo;
- wakeup = false;
+ pending_async = true;
} else {
node->has_async_transaction = 1;
}
@@ -2741,22 +2800,20 @@ static bool binder_proc_transaction(struct binder_transaction *t,
return false;
}
- if (!thread && !target_list)
+ if (!thread && !pending_async)
thread = binder_select_thread_ilocked(proc);
if (thread) {
- target_list = &thread->todo;
binder_transaction_priority(thread->task, t, node_prio,
node->inherit_rt);
- } else if (!target_list) {
- target_list = &proc->todo;
+ binder_enqueue_thread_work_ilocked(thread, &t->work);
+ } else if (!pending_async) {
+ binder_enqueue_work_ilocked(&t->work, &proc->todo);
} else {
- BUG_ON(target_list != &node->async_todo);
+ binder_enqueue_work_ilocked(&t->work, &node->async_todo);
}
- binder_enqueue_work_ilocked(&t->work, target_list);
-
- if (wakeup)
+ if (!pending_async)
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
binder_inner_proc_unlock(proc);
@@ -3258,10 +3315,10 @@ static void binder_transaction(struct binder_proc *proc,
}
}
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- binder_enqueue_work(proc, tcomplete, &thread->todo);
t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {
+ binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc);
if (target_thread->is_dead) {
binder_inner_proc_unlock(target_proc);
@@ -3269,7 +3326,7 @@ static void binder_transaction(struct binder_proc *proc,
}
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction_ilocked(target_thread, in_reply_to);
- binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
+ binder_enqueue_thread_work_ilocked(target_thread, &t->work);
binder_inner_proc_unlock(target_proc);
wake_up_interruptible_sync(&target_thread->wait);
binder_restore_priority(current, in_reply_to->saved_priority);
@@ -3277,6 +3334,7 @@ static void binder_transaction(struct binder_proc *proc,
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
binder_inner_proc_lock(proc);
+ binder_enqueue_thread_work_ilocked_nowake(thread, tcomplete);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
@@ -3290,6 +3348,7 @@ static void binder_transaction(struct binder_proc *proc,
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
+ binder_enqueue_thread_work(thread, tcomplete);
if (!binder_proc_transaction(t, target_proc, NULL))
goto err_dead_proc_or_thread;
}
@@ -3369,15 +3428,11 @@ err_invalid_target_handle:
if (in_reply_to) {
binder_restore_priority(current, in_reply_to->saved_priority);
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
- binder_enqueue_work(thread->proc,
- &thread->return_error.work,
- &thread->todo);
+ binder_enqueue_thread_work(thread, &thread->return_error.work);
binder_send_failed_reply(in_reply_to, return_error);
} else {
thread->return_error.cmd = return_error;
- binder_enqueue_work(thread->proc,
- &thread->return_error.work,
- &thread->todo);
+ binder_enqueue_thread_work(thread, &thread->return_error.work);
}
}
@@ -3681,10 +3736,9 @@ static int binder_thread_write(struct binder_proc *proc,
WARN_ON(thread->return_error.cmd !=
BR_OK);
thread->return_error.cmd = BR_ERROR;
- binder_enqueue_work(
- thread->proc,
- &thread->return_error.work,
- &thread->todo);
+ binder_enqueue_thread_work(
+ thread,
+ &thread->return_error.work);
binder_debug(
BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
@@ -3764,9 +3818,9 @@ static int binder_thread_write(struct binder_proc *proc,
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
- binder_enqueue_work_ilocked(
- &death->work,
- &thread->todo);
+ binder_enqueue_thread_work_ilocked(
+ thread,
+ &death->work);
else {
binder_enqueue_work_ilocked(
&death->work,
@@ -3821,8 +3875,8 @@ static int binder_thread_write(struct binder_proc *proc,
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
- binder_enqueue_work_ilocked(
- &death->work, &thread->todo);
+ binder_enqueue_thread_work_ilocked(
+ thread, &death->work);
else {
binder_enqueue_work_ilocked(
&death->work,
@@ -3996,6 +4050,8 @@ retry:
break;
}
w = binder_dequeue_work_head_ilocked(list);
+ if (binder_worklist_empty_ilocked(&thread->todo))
+ thread->process_todo = false;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index b95da16fd938..3a4279d219f7 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -282,6 +282,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
goto err_vm_insert_page_failed;
}
+ if (index + 1 > alloc->pages_high)
+ alloc->pages_high = index + 1;
+
trace_binder_alloc_page_end(alloc, index);
/* vm_insert_page does not seem to increment the refcount */
}
@@ -561,7 +564,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK do not share page with %pK or %pK\n",
alloc->pid, buffer->data,
- prev->data, next->data);
+ prev->data, next ? next->data : NULL);
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
buffer_start_page(buffer) + PAGE_SIZE);
}
@@ -855,6 +858,7 @@ void binder_alloc_print_pages(struct seq_file *m,
}
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
+ seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
/**
@@ -986,7 +990,7 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
return ret;
}
-struct shrinker binder_shrinker = {
+static struct shrinker binder_shrinker = {
.count_objects = binder_shrink_count,
.scan_objects = binder_shrink_scan,
.seeks = DEFAULT_SEEKS,
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 2dd33b6df104..0b145307f1fd 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -92,6 +92,7 @@ struct binder_lru_page {
* @pages: array of binder_lru_page
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
+ * @pages_high: high watermark of offset in @pages
*
* Bookkeeping structure for per-proc address space management for binder
* buffers. It is normally initialized during binder_init() and binder_mmap()
@@ -112,6 +113,7 @@ struct binder_alloc {
size_t buffer_size;
uint32_t buffer_free;
int pid;
+ size_t pages_high;
};
#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c43c3d2baf73..0d628becf37f 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
if (mbus->hw_io_coherency)
w->mbus_attr |= ATTR_HW_COHERENCY;
w->base = base & DDR_BASE_CS_LOW_MASK;
- w->size = (size | ~DDR_SIZE_MASK) + 1;
+ w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
}
}
mvebu_mbus_dram_info.num_cs = cs;
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index 986aeed169f5..072c55ca3c4e 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -202,6 +202,7 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
found = 1;
driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
pr_debug("diag: wake up logging process\n");
wake_up_interruptible(&driver->wait_q);
}
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index d81a39e2c637..80f004b8435e 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -26,6 +26,8 @@
#include <asm/atomic.h>
#include "diagfwd_bridge.h"
+#define THRESHOLD_CLIENT_LIMIT 50
+
/* Size of the USB buffers used for read and write*/
#define USB_MAX_OUT_BUF 4096
#define APPS_BUF_SIZE 4096
@@ -34,7 +36,7 @@
#define DIAG_MAX_REQ_SIZE (16 * 1024)
#define DIAG_MAX_RSP_SIZE (16 * 1024)
-#define APF_DIAG_PADDING 256
+#define APF_DIAG_PADDING 0
/*
* In the worst case, the HDLC buffer can be atmost twice the size of the
* original packet. Add 3 bytes for 16 bit CRC (2 bytes) and a delimiter
@@ -508,6 +510,7 @@ struct diagchar_dev {
wait_queue_head_t wait_q;
struct diag_client_map *client_map;
int *data_ready;
+ atomic_t data_ready_notif[THRESHOLD_CLIENT_LIMIT];
int num_clients;
int polling_reg_flag;
int use_device_tree;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 4111e599877a..ae0182ae77db 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -139,7 +139,6 @@ module_param(poolsize_qsc_usb, uint, 0);
/* This is the max number of user-space clients supported at initialization*/
static unsigned int max_clients = 15;
-static unsigned int threshold_client_limit = 50;
module_param(max_clients, uint, 0);
/* Timer variables */
@@ -328,7 +327,7 @@ static int diagchar_open(struct inode *inode, struct file *file)
if (i < driver->num_clients) {
diag_add_client(i, file);
} else {
- if (i < threshold_client_limit) {
+ if (i < THRESHOLD_CLIENT_LIMIT) {
driver->num_clients++;
temp = krealloc(driver->client_map
, (driver->num_clients) * sizeof(struct
@@ -358,11 +357,17 @@ static int diagchar_open(struct inode *inode, struct file *file)
}
}
driver->data_ready[i] = 0x0;
+ atomic_set(&driver->data_ready_notif[i], 0);
driver->data_ready[i] |= MSG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= EVENT_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= LOG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
if (driver->ref_count == 0)
diag_mempool_init();
@@ -1866,6 +1871,7 @@ static int diag_ioctl_lsm_deinit(void)
}
driver->data_ready[i] |= DEINIT_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
mutex_unlock(&driver->diagchar_mutex);
wake_up_interruptible(&driver->wait_q);
@@ -3029,16 +3035,6 @@ static int diag_user_process_apps_data(const char __user *buf, int len,
return 0;
}
-static int check_data_ready(int index)
-{
- int data_type = 0;
-
- mutex_lock(&driver->diagchar_mutex);
- data_type = driver->data_ready[index];
- mutex_unlock(&driver->diagchar_mutex);
- return data_type;
-}
-
static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
@@ -3065,7 +3061,8 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
pr_err("diag: bad address from user side\n");
return -EFAULT;
}
- wait_event_interruptible(driver->wait_q, (check_data_ready(index)) > 0);
+ wait_event_interruptible(driver->wait_q,
+ atomic_read(&driver->data_ready_notif[index]) > 0);
mutex_lock(&driver->diagchar_mutex);
@@ -3076,6 +3073,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
/*Copy the type of data being passed*/
data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
/* place holder for number of data field */
ret += sizeof(int);
@@ -3089,11 +3087,13 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
/* In case, the thread wakes up and the logging mode is
not memory device any more, the condition needs to be cleared */
driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
}
if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(current->tgid);
@@ -3110,6 +3110,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
data_type = driver->data_ready[index] & DEINIT_TYPE;
COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
driver->data_ready[index] ^= DEINIT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
mutex_unlock(&driver->diagchar_mutex);
diag_remove_client_entry(file);
return ret;
@@ -3125,6 +3126,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
if (write_len > 0)
ret += write_len;
driver->data_ready[index] ^= MSG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3144,6 +3146,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
event_mask.mask_len);
}
driver->data_ready[index] ^= EVENT_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3157,6 +3160,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
if (write_len > 0)
ret += write_len;
driver->data_ready[index] ^= LOG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3168,6 +3172,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
*(driver->apps_req_buf),
driver->apps_req_buf_len);
driver->data_ready[index] ^= PKT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
driver->in_busy_pktdata = 0;
goto exit;
}
@@ -3179,6 +3184,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->dci_pkt_buf),
driver->dci_pkt_length);
driver->data_ready[index] ^= DCI_PKT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
driver->in_busy_dcipktdata = 0;
goto exit;
}
@@ -3191,6 +3197,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
COPY_USER_SPACE_OR_EXIT(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC].
event_mask_composite), DCI_EVENT_MASK_SIZE);
driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3202,6 +3209,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
COPY_USER_SPACE_OR_EXIT(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC].
log_mask_composite), DCI_LOG_MASK_SIZE);
driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3233,6 +3241,7 @@ exit:
exit_stat = diag_copy_dci(buf, count, entry, &ret);
mutex_lock(&driver->diagchar_mutex);
driver->data_ready[index] ^= DCI_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
mutex_unlock(&driver->diagchar_mutex);
if (exit_stat == 1) {
mutex_unlock(&driver->dci_mutex);
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index ef08f939c36e..40412ba87897 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -226,6 +226,7 @@ void chk_logging_wakeup(void)
* situation.
*/
driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
pr_debug("diag: Force wakeup of logging process\n");
wake_up_interruptible(&driver->wait_q);
break;
@@ -480,8 +481,10 @@ void diag_update_userspace_clients(unsigned int type)
mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < driver->num_clients; i++)
- if (driver->client_map[i].pid != 0)
+ if (driver->client_map[i].pid != 0) {
driver->data_ready[i] |= type;
+ atomic_inc(&driver->data_ready_notif[i]);
+ }
wake_up_interruptible(&driver->wait_q);
mutex_unlock(&driver->diagchar_mutex);
}
@@ -498,6 +501,8 @@ void diag_update_md_clients(unsigned int type)
driver->client_map[j].pid ==
driver->md_session_map[i]->pid) {
driver->data_ready[j] |= type;
+ atomic_inc(
+ &driver->data_ready_notif[j]);
break;
}
}
@@ -513,6 +518,7 @@ void diag_update_sleeping_process(int process_id, int data_type)
for (i = 0; i < driver->num_clients; i++)
if (driver->client_map[i].pid == process_id) {
driver->data_ready[i] |= data_type;
+ atomic_inc(&driver->data_ready_notif[i]);
break;
}
wake_up_interruptible(&driver->wait_q);
@@ -1703,6 +1709,8 @@ int diagfwd_init(void)
, GFP_KERNEL)) == NULL)
goto err;
kmemleak_not_leak(driver->data_ready);
+ for (i = 0; i < THRESHOLD_CLIENT_LIMIT; i++)
+ atomic_set(&driver->data_ready_notif[i], 0);
if (driver->apps_req_buf == NULL) {
driver->apps_req_buf = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
if (!driver->apps_req_buf)
diff --git a/drivers/clk/qcom/gpucc-sdm660.c b/drivers/clk/qcom/gpucc-sdm660.c
index 0f7ec18e477a..8b2e6fd601c0 100644
--- a/drivers/clk/qcom/gpucc-sdm660.c
+++ b/drivers/clk/qcom/gpucc-sdm660.c
@@ -179,6 +179,7 @@ static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
F_GFX(160000000, 0, 2, 0, 0, 640000000),
F_GFX(266000000, 0, 2, 0, 0, 532000000),
F_GFX(370000000, 0, 2, 0, 0, 740000000),
+ F_GFX(430000000, 0, 2, 0, 0, 860000000),
F_GFX(465000000, 0, 2, 0, 0, 930000000),
F_GFX(588000000, 0, 2, 0, 0, 1176000000),
F_GFX(647000000, 0, 2, 0, 0, 1294000000),
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index 9a7e37cf56b0..e1d7373e63e0 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -117,7 +117,8 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
/* Turn off the clock (and clear the event) */
disable_timer(cs5535_event_clock);
- if (clockevent_state_shutdown(&cs5535_clockevent))
+ if (clockevent_state_detached(&cs5535_clockevent) ||
+ clockevent_state_shutdown(&cs5535_clockevent))
return IRQ_HANDLED;
/* Clear the counter */
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index e2c02f9dd141..ff0fd0e44f07 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -241,7 +241,7 @@ config ARM_PXA2xx_CPUFREQ
config ACPI_CPPC_CPUFREQ
tristate "CPUFreq driver based on the ACPI CPPC spec"
- depends on ACPI
+ depends on ACPI_PROCESSOR
select ACPI_CPPC_LIB
default n
help
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 9d8b2e59b755..d775e2bfc017 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -10,7 +10,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse
-cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
+cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic -mno-single-pic-base
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 58bf94b69186..273e05a3c933 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1802,6 +1802,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
return -EINVAL;
}
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
+ req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
} else {
port = NULL;
req_payload.num_slots = 0;
@@ -1817,6 +1818,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
if (req_payload.num_slots) {
drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
mgr->payloads[i].num_slots = req_payload.num_slots;
+ mgr->payloads[i].vcpi = req_payload.vcpi;
} else if (mgr->payloads[i].num_slots) {
mgr->payloads[i].num_slots = 0;
drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index fe4b73b4ffea..de0551b22d2e 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -156,7 +156,7 @@ void sde_rm_init_hw_iter(
iter->type = type;
}
-bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+static bool _sde_rm_get_hw_locked(struct sde_rm *rm, struct sde_rm_hw_iter *i)
{
struct list_head *blk_list;
@@ -198,7 +198,21 @@ bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
return false;
}
-void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id)
+bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+{
+ bool ret;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _sde_rm_get_hw_locked(rm, i);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
+static void *_sde_rm_get_hw_by_id_locked(
+ struct sde_rm *rm,
+ enum sde_hw_blk_type type,
+ int id)
{
struct list_head *blk_list;
struct sde_rm_hw_blk *blk;
@@ -225,6 +239,17 @@ void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id)
return hw;
}
+void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id)
+{
+ void *ret = NULL;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _sde_rm_get_hw_by_id_locked(rm, type, id);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
{
switch (type) {
@@ -291,6 +316,8 @@ int sde_rm_destroy(struct sde_rm *rm)
sde_hw_mdp_destroy(rm->hw_mdp);
rm->hw_mdp = NULL;
+ mutex_destroy(&rm->rm_lock);
+
return 0;
}
@@ -387,6 +414,9 @@ int sde_rm_init(struct sde_rm *rm,
/* Clear, setup lists */
memset(rm, 0, sizeof(*rm));
+
+ mutex_init(&rm->rm_lock);
+
INIT_LIST_HEAD(&rm->rsvps);
for (type = 0; type < SDE_HW_BLK_MAX; type++)
INIT_LIST_HEAD(&rm->hw_blks[type]);
@@ -568,7 +598,7 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
if (lm_cfg->dspp != DSPP_MAX) {
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSPP);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id == lm_cfg->dspp) {
*dspp = iter.blk;
break;
@@ -589,7 +619,7 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
}
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_PINGPONG);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id == lm_cfg->pingpong) {
*pp = iter.blk;
break;
@@ -639,7 +669,8 @@ static int _sde_rm_reserve_lms(
/* Find a primary mixer */
sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
- while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_i)) {
+ while (lm_count != reqs->num_lm &&
+ _sde_rm_get_hw_locked(rm, &iter_i)) {
memset(&lm, 0, sizeof(lm));
memset(&dspp, 0, sizeof(dspp));
memset(&pp, 0, sizeof(pp));
@@ -657,7 +688,8 @@ static int _sde_rm_reserve_lms(
/* Valid primary mixer found, find matching peers */
sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
- while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_j)) {
+ while (lm_count != reqs->num_lm &&
+ _sde_rm_get_hw_locked(rm, &iter_j)) {
if (iter_i.blk == iter_j.blk)
continue;
@@ -693,7 +725,7 @@ static int _sde_rm_reserve_lms(
/* reserve a free PINGPONG_SLAVE block */
rc = -ENAVAIL;
sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
- while (sde_rm_get_hw(rm, &iter_i)) {
+ while (_sde_rm_get_hw_locked(rm, &iter_i)) {
struct sde_pingpong_cfg *pp_cfg =
(struct sde_pingpong_cfg *)
(iter_i.blk->catalog);
@@ -724,7 +756,7 @@ static int _sde_rm_reserve_ctls(
memset(&ctls, 0, sizeof(ctls));
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
unsigned long caps;
bool has_split_display, has_ppsplit;
@@ -771,7 +803,7 @@ static int _sde_rm_reserve_cdm(
struct sde_cdm_cfg *cdm;
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CDM);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
bool match = false;
if (RESERVED_BY_OTHER(iter.blk, rsvp))
@@ -816,7 +848,7 @@ static int _sde_rm_reserve_intf_or_wb(
/* Find the block entry in the rm, and note the reservation */
sde_rm_init_hw_iter(&iter, 0, type);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id != id)
continue;
@@ -1073,7 +1105,7 @@ static struct drm_connector *_sde_rm_get_connector(
* @rm: KMS handle
* @rsvp: RSVP pointer to release and release resources for
*/
-void _sde_rm_release_rsvp(
+static void _sde_rm_release_rsvp(
struct sde_rm *rm,
struct sde_rm_rsvp *rsvp,
struct drm_connector *conn)
@@ -1125,16 +1157,18 @@ void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
return;
}
+ mutex_lock(&rm->rm_lock);
+
rsvp = _sde_rm_get_rsvp(rm, enc);
if (!rsvp) {
SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
- return;
+ goto end;
}
conn = _sde_rm_get_connector(enc);
if (!conn) {
SDE_ERROR("failed to get connector for enc %d\n", enc->base.id);
- return;
+ goto end;
}
top_ctrl = sde_connector_get_property(conn->state,
@@ -1154,6 +1188,9 @@ void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
CONNECTOR_PROP_TOPOLOGY_NAME,
SDE_RM_TOPOLOGY_UNKNOWN);
}
+
+end:
+ mutex_unlock(&rm->rm_lock);
}
static int _sde_rm_commit_rsvp(
@@ -1232,13 +1269,15 @@ int sde_rm_reserve(
crtc_state->crtc->base.id, test_only);
SDE_EVT32(enc->base.id, conn_state->connector->base.id);
+ mutex_lock(&rm->rm_lock);
+
_sde_rm_print_rsvps(rm, SDE_RM_STAGE_BEGIN);
ret = _sde_rm_populate_requirements(rm, enc, crtc_state,
conn_state, &reqs);
if (ret) {
SDE_ERROR("failed to populate hw requirements\n");
- return ret;
+ goto end;
}
/*
@@ -1253,8 +1292,10 @@ int sde_rm_reserve(
* replace the current with the next.
*/
rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
- if (!rsvp_nxt)
- return -ENOMEM;
+ if (!rsvp_nxt) {
+ ret = -ENOMEM;
+ goto end;
+ }
rsvp_cur = _sde_rm_get_rsvp(rm, enc);
@@ -1306,5 +1347,8 @@ int sde_rm_reserve(
_sde_rm_print_rsvps(rm, SDE_RM_STAGE_FINAL);
+end:
+ mutex_unlock(&rm->rm_lock);
+
return ret;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 1cc22c5fbbf4..87e95bfebe98 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -70,6 +70,7 @@ enum sde_rm_topology_control {
* @hw_mdp: hardware object for mdp_top
* @lm_max_width: cached layer mixer maximum width
* @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
*/
struct sde_rm {
struct drm_device *dev;
@@ -78,6 +79,7 @@ struct sde_rm {
struct sde_hw_mdp *hw_mdp;
uint32_t lm_max_width;
uint32_t rsvp_next_seq;
+ struct mutex rm_lock;
};
/**
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index 3ef01071f073..103471ff4dc4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -40,5 +40,5 @@ int
g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
{
return nvkm_xtensa_new_(&g84_bsp, device, index,
- true, 0x103000, pengine);
+ device->chipset != 0x92, 0x103000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index e04a2296ecd0..5bb7f7e0f11f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -240,6 +240,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
}
+ mmu->func->flush(vm);
+
nvkm_memory_del(&pgt);
}
}
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index c620c7ac1afa..9b171389d69d 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1922,6 +1922,26 @@ static int adreno_getproperty(struct kgsl_device *device,
status = 0;
}
break;
+ case KGSL_PROP_IB_TIMEOUT:
+ {
+ unsigned int ib_timeout = adreno_drawobj_timeout;
+
+ if (ib_timeout == 0)
+ return -EINVAL;
+
+ if (sizebytes != sizeof(unsigned int)) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (copy_to_user(value, &ib_timeout,
+ sizeof(unsigned int))) {
+ status = -EFAULT;
+ break;
+ }
+ status = 0;
+ }
+ break;
default:
status = -EINVAL;
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index d3ba8ca0dc00..143dac1a9170 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -1038,7 +1038,7 @@ void kgsl_get_memory_usage(char *name, size_t name_size, uint64_t memflags)
else if (type < ARRAY_SIZE(memtype_str) && memtype_str[type] != NULL)
strlcpy(name, memtype_str[type], name_size);
else
- snprintf(name, name_size, "unknown(%3d)", type);
+ snprintf(name, name_size, "VK/others(%3d)", type);
}
EXPORT_SYMBOL(kgsl_get_memory_usage);
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 10835d1f559b..dee0fc421054 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -1131,6 +1131,7 @@ static int at91_twi_suspend_noirq(struct device *dev)
static int at91_twi_resume_noirq(struct device *dev)
{
+ struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
int ret;
if (!pm_runtime_status_suspended(dev)) {
@@ -1142,6 +1143,8 @@ static int at91_twi_resume_noirq(struct device *dev)
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
+ at91_init_twi_bus(twi_dev);
+
return 0;
}
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 639d1a9c8793..1111cb966a44 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -338,12 +338,15 @@ static int ismt_process_desc(const struct ismt_desc *desc,
data->word = dma_buffer[0] | (dma_buffer[1] << 8);
break;
case I2C_SMBUS_BLOCK_DATA:
- case I2C_SMBUS_I2C_BLOCK_DATA:
if (desc->rxbytes != dma_buffer[0] + 1)
return -EMSGSIZE;
memcpy(data->block, dma_buffer, desc->rxbytes);
break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ memcpy(&data->block[1], dma_buffer, desc->rxbytes);
+ data->block[0] = desc->rxbytes;
+ break;
}
return 0;
}
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 02e636a1c49a..475c5a74f2d1 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1208,7 +1208,7 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc->ops->setup(pdev, indio_dev, irq);
if (ret)
- goto err_free_samplerate_trigger;
+ goto err_clk_disable_unprepare;
ret = request_irq(irq, xadc->ops->interrupt_handler, 0,
dev_name(&pdev->dev), indio_dev);
@@ -1268,6 +1268,8 @@ static int xadc_probe(struct platform_device *pdev)
err_free_irq:
free_irq(irq, indio_dev);
+err_clk_disable_unprepare:
+ clk_disable_unprepare(xadc->clk);
err_free_samplerate_trigger:
if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
iio_trigger_free(xadc->samplerate_trigger);
@@ -1277,8 +1279,6 @@ err_free_convst_trigger:
err_triggered_buffer_cleanup:
if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
iio_triggered_buffer_cleanup(indio_dev);
-err_clk_disable_unprepare:
- clk_disable_unprepare(xadc->clk);
err_device_free:
kfree(indio_dev->channels);
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 75573fa431ba..63faee04a008 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -198,7 +198,8 @@ static const struct irq_domain_ops crossbar_domain_ops = {
static int __init crossbar_of_init(struct device_node *node)
{
- int i, size, max = 0, reserved = 0, entry;
+ int i, size, reserved = 0;
+ u32 max = 0, entry;
const __be32 *irqsr;
int ret = -ENOMEM;
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index bf3fbd00a091..64b586458d3d 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -828,7 +828,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
isdn_net_local *lp;
struct ippp_struct *is;
int proto;
- unsigned char protobuf[4];
is = file->private_data;
@@ -842,24 +841,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
if (!lp)
printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
else {
- /*
- * Don't reset huptimer for
- * LCP packets. (Echo requests).
- */
- if (copy_from_user(protobuf, buf, 4))
- return -EFAULT;
- proto = PPP_PROTOCOL(protobuf);
- if (proto != PPP_LCP)
- lp->huptimer = 0;
+ if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
+ unsigned char protobuf[4];
+ /*
+ * Don't reset huptimer for
+ * LCP packets. (Echo requests).
+ */
+ if (copy_from_user(protobuf, buf, 4))
+ return -EFAULT;
+
+ proto = PPP_PROTOCOL(protobuf);
+ if (proto != PPP_LCP)
+ lp->huptimer = 0;
- if (lp->isdn_device < 0 || lp->isdn_channel < 0)
return 0;
+ }
if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
lp->dialstate == 0 &&
(lp->flags & ISDN_NET_CONNECTED)) {
unsigned short hl;
struct sk_buff *skb;
+ unsigned char *cpy_buf;
/*
* we need to reserve enough space in front of
* sk_buff. old call to dev_alloc_skb only reserved
@@ -872,11 +875,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
return count;
}
skb_reserve(skb, hl);
- if (copy_from_user(skb_put(skb, count), buf, count))
+ cpy_buf = skb_put(skb, count);
+ if (copy_from_user(cpy_buf, buf, count))
{
kfree_skb(skb);
return -EFAULT;
}
+
+ /*
+ * Don't reset huptimer for
+ * LCP packets. (Echo requests).
+ */
+ proto = PPP_PROTOCOL(cpy_buf);
+ if (proto != PPP_LCP)
+ lp->huptimer = 0;
+
if (is->debug & 0x40) {
printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
diff --git a/drivers/leds/leds-qpnp-flash.c b/drivers/leds/leds-qpnp-flash.c
index cd76941b87ca..493631774936 100644
--- a/drivers/leds/leds-qpnp-flash.c
+++ b/drivers/leds/leds-qpnp-flash.c
@@ -226,11 +226,13 @@ struct flash_led_platform_data {
};
struct qpnp_flash_led_buffer {
- struct mutex debugfs_lock; /* Prevent thread concurrency */
- size_t rpos;
- size_t wpos;
- size_t len;
- char data[0];
+ struct mutex debugfs_lock; /* Prevent thread concurrency */
+ size_t rpos;
+ size_t wpos;
+ size_t len;
+ struct qpnp_flash_led *led;
+ u32 buffer_cnt;
+ char data[0];
};
/*
@@ -249,10 +251,8 @@ struct qpnp_flash_led {
struct workqueue_struct *ordered_workq;
struct qpnp_vadc_chip *vadc_dev;
struct mutex flash_led_lock;
- struct qpnp_flash_led_buffer *log;
struct dentry *dbgfs_root;
int num_leds;
- u32 buffer_cnt;
u16 base;
u16 current_addr;
u16 current2_addr;
@@ -284,10 +284,10 @@ static int flash_led_dbgfs_file_open(struct qpnp_flash_led *led,
log->wpos = 0;
log->len = logbufsize - sizeof(*log);
mutex_init(&log->debugfs_lock);
- led->log = log;
+ log->led = led;
- led->buffer_cnt = 1;
- file->private_data = led;
+ log->buffer_cnt = 1;
+ file->private_data = log;
return 0;
}
@@ -301,12 +301,12 @@ static int flash_led_dfs_open(struct inode *inode, struct file *file)
static int flash_led_dfs_close(struct inode *inode, struct file *file)
{
- struct qpnp_flash_led *led = file->private_data;
+ struct qpnp_flash_led_buffer *log = file->private_data;
- if (led && led->log) {
+ if (log) {
file->private_data = NULL;
- mutex_destroy(&led->log->debugfs_lock);
- kfree(led->log);
+ mutex_destroy(&log->debugfs_lock);
+ kfree(log);
}
return 0;
@@ -335,15 +335,21 @@ static int print_to_log(struct qpnp_flash_led_buffer *log,
static ssize_t flash_led_dfs_latched_reg_read(struct file *fp, char __user *buf,
size_t count, loff_t *ppos) {
- struct qpnp_flash_led *led = fp->private_data;
- struct qpnp_flash_led_buffer *log = led->log;
+ struct qpnp_flash_led_buffer *log = fp->private_data;
+ struct qpnp_flash_led *led;
uint val;
int rc = 0;
size_t len;
size_t ret;
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
mutex_lock(&log->debugfs_lock);
- if ((log->rpos >= log->wpos && led->buffer_cnt == 0) ||
+ if ((log->rpos >= log->wpos && log->buffer_cnt == 0) ||
((log->len - log->wpos) < MIN_BUFFER_WRITE_LEN))
goto unlock_mutex;
@@ -354,7 +360,7 @@ static ssize_t flash_led_dfs_latched_reg_read(struct file *fp, char __user *buf,
INT_LATCHED_STS(led->base), rc);
goto unlock_mutex;
}
- led->buffer_cnt--;
+ log->buffer_cnt--;
rc = print_to_log(log, "0x%05X ", INT_LATCHED_STS(led->base));
if (rc == 0)
@@ -389,18 +395,24 @@ unlock_mutex:
static ssize_t flash_led_dfs_fault_reg_read(struct file *fp, char __user *buf,
size_t count, loff_t *ppos) {
- struct qpnp_flash_led *led = fp->private_data;
- struct qpnp_flash_led_buffer *log = led->log;
+ struct qpnp_flash_led_buffer *log = fp->private_data;
+ struct qpnp_flash_led *led;
int rc = 0;
size_t len;
size_t ret;
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
mutex_lock(&log->debugfs_lock);
- if ((log->rpos >= log->wpos && led->buffer_cnt == 0) ||
+ if ((log->rpos >= log->wpos && log->buffer_cnt == 0) ||
((log->len - log->wpos) < MIN_BUFFER_WRITE_LEN))
goto unlock_mutex;
- led->buffer_cnt--;
+ log->buffer_cnt--;
rc = print_to_log(log, "0x%05X ", FLASH_LED_FAULT_STATUS(led->base));
if (rc == 0)
@@ -442,10 +454,17 @@ static ssize_t flash_led_dfs_fault_reg_enable(struct file *file,
int data;
size_t ret = 0;
- struct qpnp_flash_led *led = file->private_data;
+ struct qpnp_flash_led_buffer *log = file->private_data;
+ struct qpnp_flash_led *led;
char *kbuf;
- mutex_lock(&led->log->debugfs_lock);
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
+ mutex_lock(&log->debugfs_lock);
kbuf = kmalloc(count + 1, GFP_KERNEL);
if (!kbuf) {
ret = -ENOMEM;
@@ -480,7 +499,7 @@ static ssize_t flash_led_dfs_fault_reg_enable(struct file *file,
free_buf:
kfree(kbuf);
unlock_mutex:
- mutex_unlock(&led->log->debugfs_lock);
+ mutex_unlock(&log->debugfs_lock);
return ret;
}
@@ -492,10 +511,17 @@ static ssize_t flash_led_dfs_dbg_enable(struct file *file,
int cnt = 0;
int data;
size_t ret = 0;
- struct qpnp_flash_led *led = file->private_data;
+ struct qpnp_flash_led_buffer *log = file->private_data;
+ struct qpnp_flash_led *led;
char *kbuf;
- mutex_lock(&led->log->debugfs_lock);
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
+ mutex_lock(&log->debugfs_lock);
kbuf = kmalloc(count + 1, GFP_KERNEL);
if (!kbuf) {
ret = -ENOMEM;
@@ -529,7 +555,7 @@ static ssize_t flash_led_dfs_dbg_enable(struct file *file,
free_buf:
kfree(kbuf);
unlock_mutex:
- mutex_unlock(&led->log->debugfs_lock);
+ mutex_unlock(&log->debugfs_lock);
return ret;
}
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index c83cedf2daaf..b0519544759e 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -63,7 +63,6 @@
#define QPNP_WLED_VLOOP_COMP_RES_MASK 0xF0
#define QPNP_WLED_VLOOP_COMP_RES_OVERWRITE 0x80
-#define QPNP_WLED_LOOP_COMP_RES_DFLT_AMOLED_KOHM 320
#define QPNP_WLED_LOOP_COMP_RES_STEP_KOHM 20
#define QPNP_WLED_LOOP_COMP_RES_MIN_KOHM 20
#define QPNP_WLED_LOOP_COMP_RES_MAX_KOHM 320
@@ -106,10 +105,8 @@
#define QPNP_WLED_BOOST_DUTY_MIN_NS 26
#define QPNP_WLED_BOOST_DUTY_MAX_NS 156
#define QPNP_WLED_DEF_BOOST_DUTY_NS 104
-#define QPNP_WLED_SWITCH_FREQ_MASK 0x70
-#define QPNP_WLED_SWITCH_FREQ_800_KHZ 800
-#define QPNP_WLED_SWITCH_FREQ_1600_KHZ 1600
-#define QPNP_WLED_SWITCH_FREQ_OVERWRITE 0x80
+#define QPNP_WLED_SWITCH_FREQ_MASK GENMASK(3, 0)
+#define QPNP_WLED_SWITCH_FREQ_OVERWRITE BIT(7)
#define QPNP_WLED_OVP_MASK GENMASK(1, 0)
#define QPNP_WLED_TEST4_EN_DEB_BYPASS_ILIM_BIT BIT(6)
#define QPNP_WLED_TEST4_EN_SH_FOR_SS_BIT BIT(5)
@@ -125,6 +122,10 @@
#define QPNP_WLED_SC_FAULT_BIT BIT(2)
#define QPNP_WLED_OVP_FLT_RT_STS_BIT BIT(1)
+/* QPNP_WLED_SOFTSTART_RAMP_DLY */
+#define SOFTSTART_OVERWRITE_BIT BIT(7)
+#define SOFTSTART_RAMP_DELAY_MASK GENMASK(2, 0)
+
/* sink registers */
#define QPNP_WLED_CURR_SINK_REG(b) (b + 0x46)
#define QPNP_WLED_SYNC_REG(b) (b + 0x47)
@@ -404,6 +405,7 @@ struct qpnp_wled {
bool ovp_irq_disabled;
bool auto_calib_enabled;
bool auto_calib_done;
+ bool module_dis_perm;
ktime_t start_ovp_fault_time;
};
@@ -600,6 +602,9 @@ static int qpnp_wled_module_en(struct qpnp_wled *wled,
{
int rc;
+ if (wled->module_dis_perm)
+ return 0;
+
rc = qpnp_wled_masked_write_reg(wled,
QPNP_WLED_MODULE_EN_REG(base_addr),
QPNP_WLED_MODULE_EN_MASK,
@@ -1217,6 +1222,7 @@ static int wled_auto_calibrate(struct qpnp_wled *wled)
if (!sink_config) {
pr_warn("No valid WLED sinks found\n");
+ wled->module_dis_perm = true;
goto failed_calib;
}
@@ -1471,20 +1477,26 @@ static int qpnp_wled_gm_config(struct qpnp_wled *wled)
u8 mask = 0, reg = 0;
/* Configure the LOOP COMP GM register */
- if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
- wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE) {
- if (wled->loop_auto_gm_en)
- reg |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN;
-
- if (wled->loop_auto_gm_thresh >
- QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX)
- wled->loop_auto_gm_thresh =
- QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX;
-
- reg |= wled->loop_auto_gm_thresh <<
- QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_SHIFT;
- mask |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN |
- QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_MASK;
+ if ((wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+ wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)) {
+ if (wled->disp_type_amoled) {
+ reg = 0;
+ mask |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN |
+ QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_MASK;
+ } else {
+ if (wled->loop_auto_gm_en)
+ reg |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN;
+
+ if (wled->loop_auto_gm_thresh >
+ QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX)
+ wled->loop_auto_gm_thresh =
+ QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX;
+
+ reg |= wled->loop_auto_gm_thresh <<
+ QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_SHIFT;
+ mask |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN |
+ QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_MASK;
+ }
}
if (wled->loop_ea_gm < QPNP_WLED_LOOP_EA_GM_MIN)
@@ -1777,8 +1789,17 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
/* Configure the Soft start Ramp delay: for AMOLED - 0,for LCD - 2 */
reg = (wled->disp_type_amoled) ? 0 : 2;
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_SOFTSTART_RAMP_DLY(wled->ctrl_base), reg);
+ mask = SOFTSTART_RAMP_DELAY_MASK;
+ if ((wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+ wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+ && wled->disp_type_amoled) {
+ reg |= SOFTSTART_OVERWRITE_BIT;
+ mask |= SOFTSTART_OVERWRITE_BIT;
+ }
+
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_SOFTSTART_RAMP_DLY(wled->ctrl_base),
+ mask, reg);
if (rc)
return rc;
@@ -1800,21 +1821,24 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
return rc;
/* Configure the SWITCHING FREQ register */
- if (wled->switch_freq_khz == QPNP_WLED_SWITCH_FREQ_1600_KHZ)
- temp = QPNP_WLED_SWITCH_FREQ_1600_KHZ_CODE;
+ if (wled->switch_freq_khz == 1600)
+ reg = QPNP_WLED_SWITCH_FREQ_1600_KHZ_CODE;
else
- temp = QPNP_WLED_SWITCH_FREQ_800_KHZ_CODE;
+ reg = QPNP_WLED_SWITCH_FREQ_800_KHZ_CODE;
- rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_SWITCH_FREQ_REG(wled->ctrl_base), &reg);
+ /*
+ * Do not set the overwrite bit when switching frequency is selected
+ * for AMOLED. This register is in logic reset block which can cause
+ * the value to be overwritten during module enable/disable.
+ */
+ mask = QPNP_WLED_SWITCH_FREQ_MASK | QPNP_WLED_SWITCH_FREQ_OVERWRITE;
+ if (!wled->disp_type_amoled)
+ reg |= QPNP_WLED_SWITCH_FREQ_OVERWRITE;
+
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_SWITCH_FREQ_REG(wled->ctrl_base), mask, reg);
if (rc < 0)
return rc;
- reg &= QPNP_WLED_SWITCH_FREQ_MASK;
- reg |= (temp | QPNP_WLED_SWITCH_FREQ_OVERWRITE);
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_SWITCH_FREQ_REG(wled->ctrl_base), reg);
- if (rc)
- return rc;
rc = qpnp_wled_ovp_config(wled);
if (rc < 0) {
@@ -2123,8 +2147,11 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled)
return rc;
}
- wled->loop_comp_res_kohm =
- QPNP_WLED_LOOP_COMP_RES_DFLT_AMOLED_KOHM;
+ wled->loop_comp_res_kohm = 320;
+ if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+ wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+ wled->loop_comp_res_kohm = 300;
+
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,loop-comp-res-kohm", &temp_val);
if (!rc) {
@@ -2252,7 +2279,7 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled)
return rc;
}
- wled->switch_freq_khz = QPNP_WLED_SWITCH_FREQ_800_KHZ;
+ wled->switch_freq_khz = wled->disp_type_amoled ? 1600 : 800;
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,switch-freq-khz", &temp_val);
if (!rc) {
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 79223dceb1c2..b19205ea1a10 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -223,7 +223,8 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
* oldconf until no one uses it anymore.
*/
mddev_suspend(mddev);
- oldconf = rcu_dereference(mddev->private);
+ oldconf = rcu_dereference_protected(mddev->private,
+ lockdep_is_held(&mddev->reconfig_mutex));
mddev->raid_disks++;
WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
"copied raid_disks doesn't match mddev->raid_disks");
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index 61adc28ec8ec..ef80af1f7e5a 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -305,8 +305,8 @@ int cec_register_adapter(struct cec_adapter *adap,
adap->devnode.dev.parent = parent;
#if IS_REACHABLE(CONFIG_RC_CORE)
- adap->rc->dev.parent = parent;
if (adap->capabilities & CEC_CAP_RC) {
+ adap->rc->dev.parent = parent;
res = rc_register_device(adap->rc);
if (res) {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 92b1f2ea871b..fe0b2efeb0b8 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -402,6 +402,8 @@ static int msm_isp_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
return -EINVAL;
}
vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
+ 0, 1);
msm_isp_reset_framedrop(vfe_dev, stream_info);
rc = msm_isp_cfg_offline_ping_pong_address(vfe_dev, stream_info,
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
index b1bea12c2cc3..6c01d8f47389 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
@@ -573,6 +573,11 @@ static int32_t msm_actuator_move_focus(
CDBG("called, dir %d, num_steps %d\n", dir, num_steps);
+ if (a_ctrl->step_position_table == NULL) {
+ pr_err("Step Position Table is NULL\n");
+ return -EINVAL;
+ }
+
if ((dest_step_pos == a_ctrl->curr_step_pos) ||
((dest_step_pos <= a_ctrl->total_steps) &&
(a_ctrl->step_position_table[dest_step_pos] ==
diff --git a/drivers/media/platform/msm/sde/cec/sde_hdmi_cec.c b/drivers/media/platform/msm/sde/cec/sde_hdmi_cec.c
index b3798e8a9d24..d61bfdce1cef 100644
--- a/drivers/media/platform/msm/sde/cec/sde_hdmi_cec.c
+++ b/drivers/media/platform/msm/sde/cec/sde_hdmi_cec.c
@@ -138,6 +138,7 @@ static int sde_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
struct sde_hdmi_cec *cec = adap->priv;
struct cec_hw_resource *hw = &cec->hw_res;
u32 frame_type;
+ u8 retransmits;
int i;
u32 line_check_retry = 10;
@@ -152,22 +153,13 @@ static int sde_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
/* make sure state is cleared */
wmb();
- CEC_REG_WRITE(hw, HDMI_CEC_RETRANSMIT,
- ((attempts & 0xF) << 4) | BIT(0));
+ retransmits = attempts ? (attempts - 1) : 0;
- frame_type = cec_msg_is_broadcast(msg) ? BIT(0) : 0;
-
- /* header block */
- CEC_REG_WRITE(hw, HDMI_CEC_WR_DATA,
- (((cec_msg_initiator(msg) << 4) |
- cec_msg_destination(msg)) << 8) | frame_type);
+ CEC_REG_WRITE(hw, HDMI_CEC_RETRANSMIT, (retransmits << 4) | BIT(0));
- /* data block 0 : opcode */
- CEC_REG_WRITE(hw, HDMI_CEC_WR_DATA,
- ((msg->len < 2 ? 0 : cec_msg_opcode(msg)) << 8) | frame_type);
+ frame_type = cec_msg_is_broadcast(msg) ? BIT(0) : 0;
- /* data block 1-14 : operand 0-13 */
- for (i = 2; i < msg->len; i++)
+ for (i = 0; i < msg->len; i++)
CEC_REG_WRITE(hw, HDMI_CEC_WR_DATA,
(msg->msg[i] << 8) | frame_type);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 372f1fbbde4c..cfd56d9fa2ca 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -464,10 +464,11 @@ out:
}
EXPORT_SYMBOL(mmc_clk_update_freq);
-void mmc_recovery_fallback_lower_speed(struct mmc_host *host)
+int mmc_recovery_fallback_lower_speed(struct mmc_host *host)
{
+ int err = 0;
if (!host->card)
- return;
+ return -EINVAL;
if (host->sdr104_wa && mmc_card_sd(host->card) &&
(host->ios.timing == MMC_TIMING_UHS_SDR104) &&
@@ -475,9 +476,14 @@ void mmc_recovery_fallback_lower_speed(struct mmc_host *host)
pr_err("%s: %s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
mmc_hostname(host), __func__);
mmc_host_clear_sdr104(host);
- mmc_hw_reset(host);
+ err = mmc_hw_reset(host);
host->card->sdr104_blocked = true;
}
+ if (err)
+ pr_err("%s: %s: Fallback to lower speed mode failed with err=%d\n",
+ mmc_hostname(host), __func__, err);
+
+ return err;
}
static int mmc_devfreq_set_target(struct device *dev,
@@ -545,7 +551,7 @@ static int mmc_devfreq_set_target(struct device *dev,
if (err && err != -EAGAIN) {
pr_err("%s: clock scale to %lu failed with error %d\n",
mmc_hostname(host), *freq, err);
- mmc_recovery_fallback_lower_speed(host);
+ err = mmc_recovery_fallback_lower_speed(host);
} else {
pr_debug("%s: clock change to %lu finished successfully (%s)\n",
mmc_hostname(host), *freq, current->comm);
@@ -4121,8 +4127,7 @@ int _mmc_detect_card_removed(struct mmc_host *host)
if (ret) {
if (host->ops->get_cd && host->ops->get_cd(host)) {
- mmc_recovery_fallback_lower_speed(host);
- ret = 0;
+ ret = mmc_recovery_fallback_lower_speed(host);
} else {
mmc_card_set_removed(host->card);
if (host->card->sdr104_blocked) {
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 113e64fcd73b..4c6707ecc619 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
}
cf->can_id = id & ESD_IDMASK;
- cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
+ cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
if (id & ESD_EXTID)
cf->can_id |= CAN_EFF_FLAG;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index ae5709354546..27e2352fcc42 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -356,6 +356,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
gs_free_tx_context(txc);
+ atomic_dec(&dev->active_tx_urbs);
+
netif_wake_queue(netdev);
}
@@ -444,14 +446,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
urb->transfer_buffer_length,
urb->transfer_buffer,
urb->transfer_dma);
-
- atomic_dec(&dev->active_tx_urbs);
-
- if (!netif_device_present(netdev))
- return;
-
- if (netif_queue_stopped(netdev))
- netif_wake_queue(netdev);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev)
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index fdb5cdb3cd15..81abe46c9e0d 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget)
unsigned long flags;
MAL_DBG2(mal, "poll(%d)" NL, budget);
- again:
+
/* Process TX skbs */
list_for_each(l, &mal->poll_list) {
struct mal_commac *mc =
@@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget)
spin_lock_irqsave(&mal->lock, flags);
mal_disable_eob_irq(mal);
spin_unlock_irqrestore(&mal->lock, flags);
- goto again;
}
mc->ops->poll_tx(mc->dev);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 0e67145bc418..4f34e1b79705 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -4415,13 +4415,12 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
- mvpp2_txq_inc_get(txq_pcpu);
-
dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
tx_buf->size, DMA_TO_DEVICE);
- if (!tx_buf->skb)
- continue;
- dev_kfree_skb_any(tx_buf->skb);
+ if (tx_buf->skb)
+ dev_kfree_skb_any(tx_buf->skb);
+
+ mvpp2_txq_inc_get(txq_pcpu);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 1494997c4f7e..4dccf7287f0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -88,10 +88,17 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
}
}
+#define MLX4_EN_WRAP_AROUND_SEC 10UL
+/* By scheduling the overflow check every 5 seconds, we have a reasonably
+ * good chance we wont miss a wrap around.
+ * TOTO: Use a timer instead of a work queue to increase the guarantee.
+ */
+#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
+
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
{
bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
- mdev->overflow_period);
+ MLX4_EN_OVERFLOW_PERIOD);
unsigned long flags;
if (timeout) {
@@ -236,7 +243,6 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
.enable = mlx4_en_phc_enable,
};
-#define MLX4_EN_WRAP_AROUND_SEC 10ULL
/* This function calculates the max shift that enables the user range
* of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
@@ -258,7 +264,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
struct mlx4_dev *dev = mdev->dev;
unsigned long flags;
- u64 ns, zero = 0;
/* mlx4_en_init_timestamp is called for each netdev.
* mdev->ptp_clock is common for all ports, skip initialization if
@@ -282,13 +287,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
ktime_to_ns(ktime_get_real()));
write_unlock_irqrestore(&mdev->clock_lock, flags);
- /* Calculate period in seconds to call the overflow watchdog - to make
- * sure counter is checked at least once every wrap around.
- */
- ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
- do_div(ns, NSEC_PER_SEC / 2 / HZ);
- mdev->overflow_period = ns;
-
/* Configure the PHC */
mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 31c491e02e69..99361352ed0d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -791,8 +791,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
return -ENOSYS;
}
- mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
-
dev->caps.hca_core_clock = hca_param.hca_core_clock;
memset(&dev_cap, 0, sizeof(dev_cap));
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index c41f15102ae0..10aa6544cf4d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -409,7 +409,6 @@ struct mlx4_en_dev {
struct cyclecounter cycles;
struct timecounter clock;
unsigned long last_overflow_check;
- unsigned long overflow_period;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct notifier_block nb;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a444294fb555..89ad2b750531 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1196,11 +1196,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
if (tun->flags & IFF_NO_PI) {
- switch (skb->data[0] & 0xf0) {
- case 0x40:
+ u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
+
+ switch (ip_version) {
+ case 4:
pi.proto = htons(ETH_P_IP);
break;
- case 0x60:
+ case 6:
pi.proto = htons(ETH_P_IPV6);
break;
default:
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 096818610d40..213569d384e7 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -631,6 +631,8 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
struct regulatory_request *request))
{
const struct ieee80211_regdomain *regd;
+ u32 chan_num;
+ struct ieee80211_channel *chan;
wiphy->reg_notifier = reg_notifier;
wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
@@ -653,6 +655,20 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
}
wiphy_apply_custom_regulatory(wiphy, regd);
+
+ /* For regulatory rules similar to the following:
+ * REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), channels 12/13 are enabled
+ * due to support of 5/10 MHz.
+ * Therefore, disable 2.4 Ghz channels that dont have 20 mhz bw
+ */
+ for (chan_num = 0;
+ chan_num < wiphy->bands[IEEE80211_BAND_2GHZ]->n_channels;
+ chan_num++) {
+ chan = &wiphy->bands[IEEE80211_BAND_2GHZ]->channels[chan_num];
+ if (chan->flags & IEEE80211_CHAN_NO_20MHZ)
+ chan->flags |= IEEE80211_CHAN_DISABLED;
+ }
+
ath_reg_apply_radar_flags(wiphy);
ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
return 0;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 35dfa410c90c..797c157fa0ba 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -977,7 +977,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
u64 *cookie)
{
const u8 *buf = params->buf;
- size_t len = params->len;
+ size_t len = params->len, total;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
bool tx_status = false;
@@ -1002,7 +1002,11 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
if (len < sizeof(struct ieee80211_hdr_3addr))
return -EINVAL;
- cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
+ total = sizeof(*cmd) + len;
+ if (total < len)
+ return -EINVAL;
+
+ cmd = kmalloc(total, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
goto out;
@@ -1012,7 +1016,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
cmd->len = cpu_to_le16(len);
memcpy(cmd->payload, buf, len);
- rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len,
+ rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, total,
WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
if (rc == 0)
tx_status = !evt.evt.status;
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 7a33792913a3..77d1902947e3 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -26,14 +26,17 @@
prefix_type, rowsize, \
groupsize, buf, len, ascii)
-#define FW_ADDR_CHECK(ioaddr, val, msg) do { \
- ioaddr = wmi_buffer(wil, val); \
- if (!ioaddr) { \
- wil_err_fw(wil, "bad " msg ": 0x%08x\n", \
- le32_to_cpu(val)); \
- return -EINVAL; \
- } \
- } while (0)
+static bool wil_fw_addr_check(struct wil6210_priv *wil,
+ void __iomem **ioaddr, __le32 val,
+ u32 size, const char *msg)
+{
+ *ioaddr = wmi_buffer_block(wil, val, size);
+ if (!(*ioaddr)) {
+ wil_err_fw(wil, "bad %s: 0x%08x\n", msg, le32_to_cpu(val));
+ return false;
+ }
+ return true;
+}
/**
* wil_fw_verify - verify firmware file validity
@@ -160,7 +163,8 @@ static int fw_handle_data(struct wil6210_priv *wil, const void *data,
return -EINVAL;
}
- FW_ADDR_CHECK(dst, d->addr, "address");
+ if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
+ return -EINVAL;
wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(d->addr),
s);
wil_memcpy_toio_32(dst, d->data, s);
@@ -192,7 +196,8 @@ static int fw_handle_fill(struct wil6210_priv *wil, const void *data,
return -EINVAL;
}
- FW_ADDR_CHECK(dst, d->addr, "address");
+ if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
+ return -EINVAL;
v = le32_to_cpu(d->value);
wil_dbg_fw(wil, "fill [0x%08x] <== 0x%08x, %zu bytes\n",
@@ -248,7 +253,8 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data,
u32 v = le32_to_cpu(block[i].value);
u32 x, y;
- FW_ADDR_CHECK(dst, block[i].addr, "address");
+ if (!wil_fw_addr_check(wil, &dst, block[i].addr, 0, "address"))
+ return -EINVAL;
x = readl(dst);
y = (x & m) | (v & ~m);
@@ -314,10 +320,15 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data,
wil_dbg_fw(wil, "gw write record [%3d] blocks, cmd 0x%08x\n",
n, gw_cmd);
- FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
- FW_ADDR_CHECK(gwa_val, d->gateway_value_addr, "gateway_value_addr");
- FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
- FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+ if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+ "gateway_addr_addr") ||
+ !wil_fw_addr_check(wil, &gwa_val, d->gateway_value_addr, 0,
+ "gateway_value_addr") ||
+ !wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+ "gateway_cmd_addr") ||
+ !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+ "gateway_ctrl_address"))
+ return -EINVAL;
wil_dbg_fw(wil, "gw addresses: addr 0x%08x val 0x%08x"
" cmd 0x%08x ctl 0x%08x\n",
@@ -373,12 +384,19 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
wil_dbg_fw(wil, "gw4 write record [%3d] blocks, cmd 0x%08x\n",
n, gw_cmd);
- FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
+ if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+ "gateway_addr_addr"))
+ return -EINVAL;
for (k = 0; k < ARRAY_SIZE(block->value); k++)
- FW_ADDR_CHECK(gwa_val[k], d->gateway_value_addr[k],
- "gateway_value_addr");
- FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
- FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+ if (!wil_fw_addr_check(wil, &gwa_val[k],
+ d->gateway_value_addr[k],
+ 0, "gateway_value_addr"))
+ return -EINVAL;
+ if (!wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+ "gateway_cmd_addr") ||
+ !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+ "gateway_ctrl_address"))
+ return -EINVAL;
wil_dbg_fw(wil, "gw4 addresses: addr 0x%08x cmd 0x%08x ctl 0x%08x\n",
le32_to_cpu(d->gateway_addr_addr),
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 59def4f3fcf3..5cf341702dc1 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -358,6 +358,25 @@ static void wil_cache_mbox_regs(struct wil6210_priv *wil)
wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
}
+static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
+{
+ size_t min_size = sizeof(struct wil6210_mbox_hdr) +
+ sizeof(struct wmi_cmd_hdr);
+
+ if (wil->mbox_ctl.rx.entry_size < min_size) {
+ wil_err(wil, "rx mbox entry too small (%d)\n",
+ wil->mbox_ctl.rx.entry_size);
+ return false;
+ }
+ if (wil->mbox_ctl.tx.entry_size < min_size) {
+ wil_err(wil, "tx mbox entry too small (%d)\n",
+ wil->mbox_ctl.tx.entry_size);
+ return false;
+ }
+
+ return true;
+}
+
static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
@@ -393,7 +412,8 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
if (isr & ISR_MISC_FW_READY) {
wil_dbg_irq(wil, "IRQ: FW ready\n");
wil_cache_mbox_regs(wil);
- set_bit(wil_status_mbox_ready, wil->status);
+ if (wil_validate_mbox_regs(wil))
+ set_bit(wil_status_mbox_ready, wil->status);
/**
* Actual FW ready indicated by the
* WMI_FW_READY_EVENTID
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index d11e1d31fc77..2375704ae860 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1106,6 +1106,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (wil->tt_data_set)
wmi_set_tt_cfg(wil, &wil->tt_data);
+ if (wil->snr_thresh.enabled)
+ wmi_set_snr_thresh(wil, wil->snr_thresh.omni,
+ wil->snr_thresh.direct);
+
if (wil->platform_ops.notify) {
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_FW_RDY);
diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c
index b91bf51be767..7c9a7900c5ed 100644
--- a/drivers/net/wireless/ath/wil6210/sysfs.c
+++ b/drivers/net/wireless/ath/wil6210/sysfs.c
@@ -268,10 +268,49 @@ static DEVICE_ATTR(fst_link_loss, 0644,
wil_fst_link_loss_sysfs_show,
wil_fst_link_loss_sysfs_store);
+static ssize_t
+wil_snr_thresh_sysfs_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
+ ssize_t len = 0;
+
+ if (wil->snr_thresh.enabled)
+ len = snprintf(buf, PAGE_SIZE, "omni=%d, direct=%d\n",
+ wil->snr_thresh.omni, wil->snr_thresh.direct);
+
+ return len;
+}
+
+static ssize_t
+wil_snr_thresh_sysfs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
+ int rc;
+ short omni, direct;
+
+ /* to disable snr threshold, set both omni and direct to 0 */
+ if (sscanf(buf, "%hd %hd", &omni, &direct) != 2)
+ return -EINVAL;
+
+ rc = wmi_set_snr_thresh(wil, omni, direct);
+ if (!rc)
+ rc = count;
+
+ return rc;
+}
+
+static DEVICE_ATTR(snr_thresh, 0644,
+ wil_snr_thresh_sysfs_show,
+ wil_snr_thresh_sysfs_store);
+
static struct attribute *wil6210_sysfs_entries[] = {
&dev_attr_ftm_txrx_offset.attr,
&dev_attr_thermal_throttling.attr,
&dev_attr_fst_link_loss.attr,
+ &dev_attr_snr_thresh.attr,
NULL
};
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index ef10abc07da6..0df216689d20 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -740,6 +740,11 @@ struct wil6210_priv {
struct wil_ftm_priv ftm;
bool tt_data_set;
struct wmi_tt_data tt_data;
+ struct {
+ bool enabled;
+ short omni;
+ short direct;
+ } snr_thresh;
int fw_calib_result;
@@ -878,6 +883,7 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
int wil_find_cid(struct wil6210_priv *wil, const u8 *mac);
void wil_set_ethtoolops(struct net_device *ndev);
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr, u32 size);
void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
@@ -1053,4 +1059,5 @@ int wmi_link_maintain_cfg_write(struct wil6210_priv *wil,
const u8 *addr,
bool fst_link_loss);
+int wmi_set_snr_thresh(struct wil6210_priv *wil, short omni, short direct);
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index e421fdad81e2..27b2f6219ea0 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -141,13 +141,15 @@ static u32 wmi_addr_remap(u32 x)
/**
* Check address validity for WMI buffer; remap if needed
* @ptr - internal (linker) fw/ucode address
+ * @size - if non zero, validate the block does not
+ * exceed the device memory (bar)
*
* Valid buffer should be DWORD aligned
*
* return address for accessing buffer from the host;
* if buffer is not valid, return NULL.
*/
-void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr_, u32 size)
{
u32 off;
u32 ptr = le32_to_cpu(ptr_);
@@ -162,10 +164,17 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
off = HOSTADDR(ptr);
if (off > wil->bar_size - 4)
return NULL;
+ if (size && ((off + size > wil->bar_size) || (off + size < off)))
+ return NULL;
return wil->csr + off;
}
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+ return wmi_buffer_block(wil, ptr_, 0);
+}
+
/**
* Check address validity
*/
@@ -223,7 +232,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
uint retry;
int rc = 0;
- if (sizeof(cmd) + len > r->entry_size) {
+ if (len > r->entry_size - sizeof(cmd)) {
wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
(int)(sizeof(cmd) + len), r->entry_size);
return -ERANGE;
@@ -369,7 +378,7 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
s32 signal;
__le16 fc;
u32 d_len;
- u16 d_status;
+ s16 snr;
if (flen < 0) {
wil_err(wil, "MGMT Rx: short event, len %d\n", len);
@@ -391,13 +400,13 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
signal = 100 * data->info.rssi;
else
signal = data->info.sqi;
- d_status = le16_to_cpu(data->info.status);
+ snr = le16_to_cpu(data->info.snr); /* 1/4 dB units */
fc = rx_mgmt_frame->frame_control;
wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d RSSI %d SQI %d%%\n",
data->info.channel, data->info.mcs, data->info.rssi,
data->info.sqi);
- wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
+ wil_dbg_wmi(wil, "snr %ddB len %d fc 0x%04x\n", snr / 4, d_len,
le16_to_cpu(fc));
wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
data->info.qid, data->info.mid, data->info.cid);
@@ -425,6 +434,11 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
+ if (wil->snr_thresh.enabled && snr < wil->snr_thresh.omni) {
+ wil_dbg_wmi(wil, "snr below threshold. dropping\n");
+ return;
+ }
+
bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
d_len, signal, GFP_KERNEL);
if (bss) {
@@ -1411,8 +1425,14 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
};
int rc;
u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
- struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+ struct wmi_set_appie_cmd *cmd;
+ if (len < ie_len) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ cmd = kzalloc(len, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
goto out;
@@ -2149,3 +2169,32 @@ out:
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
return rc;
}
+
+int wmi_set_snr_thresh(struct wil6210_priv *wil, short omni, short direct)
+{
+ int rc;
+ struct wmi_set_connect_snr_thr_cmd cmd = {
+ .enable = true,
+ .omni_snr_thr = cpu_to_le16(omni),
+ .direct_snr_thr = cpu_to_le16(direct),
+ };
+
+ if (!test_bit(WMI_FW_CAPABILITY_CONNECT_SNR_THR, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ if (omni == 0 && direct == 0)
+ cmd.enable = false;
+
+ wil_dbg_wmi(wil, "%s snr thresh omni=%d, direct=%d (1/4 dB units)\n",
+ cmd.enable ? "enable" : "disable", omni, direct);
+
+ rc = wmi_send(wil, WMI_SET_CONNECT_SNR_THR_CMDID, &cmd, sizeof(cmd));
+ if (rc)
+ return rc;
+
+ wil->snr_thresh.enabled = cmd.enable;
+ wil->snr_thresh.omni = omni;
+ wil->snr_thresh.direct = direct;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 5263ee717a4f..35752885320e 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -71,6 +71,7 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_RSSI_REPORTING = 12,
WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE = 13,
WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14,
+ WMI_FW_CAPABILITY_CONNECT_SNR_THR = 16,
WMI_FW_CAPABILITY_MAX,
};
@@ -1821,7 +1822,7 @@ struct wmi_rx_mgmt_info {
u8 range;
u8 sqi;
__le16 stype;
- __le16 status;
+ __le16 snr;
__le32 len;
/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
u8 qid;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index 99dac9b8a082..c75bfd3f8cb3 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -14764,8 +14764,8 @@ static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi)
}
static void
-wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys,
- u8 len)
+wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, const u8 *events,
+ const u8 *dlys, u8 len)
{
u32 t1_offset, t2_offset;
u8 ctr;
@@ -15240,16 +15240,16 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi)
static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
{
u16 currband;
- s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
- s8 *lna1_gain_db = NULL;
- s8 *lna1_gain_db_2 = NULL;
- s8 *lna2_gain_db = NULL;
- s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
- s8 *tia_gain_db;
- s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
- s8 *tia_gainbits;
- u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
- u16 *rfseq_init_gain;
+ static const s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
+ const s8 *lna1_gain_db = NULL;
+ const s8 *lna1_gain_db_2 = NULL;
+ const s8 *lna2_gain_db = NULL;
+ static const s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
+ const s8 *tia_gain_db;
+ static const s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
+ const s8 *tia_gainbits;
+ static const u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
+ const u16 *rfseq_init_gain;
u16 init_gaincode;
u16 clip1hi_gaincode;
u16 clip1md_gaincode = 0;
@@ -15310,10 +15310,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
if ((freq <= 5080) || (freq == 5825)) {
- s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
- s8 lna1A_gain_db_2_rev7[] = {
- 11, 17, 22, 25};
- s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+ static const s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
+ static const s8 lna1A_gain_db_2_rev7[] = { 11, 17, 22, 25};
+ static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
crsminu_th = 0x3e;
lna1_gain_db = lna1A_gain_db_rev7;
@@ -15321,10 +15320,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
lna2_gain_db = lna2A_gain_db_rev7;
} else if ((freq >= 5500) && (freq <= 5700)) {
- s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
- s8 lna1A_gain_db_2_rev7[] = {
- 12, 18, 22, 26};
- s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
+ static const s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
+ static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
+ static const s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
crsminu_th = 0x45;
clip1md_gaincode_B = 0x14;
@@ -15335,10 +15333,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
lna2_gain_db = lna2A_gain_db_rev7;
} else {
- s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
- s8 lna1A_gain_db_2_rev7[] = {
- 12, 18, 22, 26};
- s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+ static const s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
+ static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
+ static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
crsminu_th = 0x41;
lna1_gain_db = lna1A_gain_db_rev7;
@@ -15450,65 +15447,65 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
NPHY_RFSEQ_CMD_SET_HPF_BW
};
- u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
- s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
- s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
- s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
- s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
- s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
- s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
- s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
- s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
- s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
- s8 *lna1_gain_db = NULL;
- s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
- s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
- s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
- s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
- s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
- s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
- s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
- s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
- s8 *lna2_gain_db = NULL;
- s8 tiaG_gain_db[] = {
+ static const u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
+ static const s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
+ static const s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
+ static const s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
+ static const s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
+ static const s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
+ static const s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
+ static const s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
+ static const s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
+ static const s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
+ const s8 *lna1_gain_db = NULL;
+ static const s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
+ static const s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
+ static const s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
+ static const s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
+ static const s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
+ static const s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
+ static const s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
+ static const s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
+ const s8 *lna2_gain_db = NULL;
+ static const s8 tiaG_gain_db[] = {
0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A };
- s8 tiaA_gain_db[] = {
+ static const s8 tiaA_gain_db[] = {
0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 };
- s8 tiaA_gain_db_rev4[] = {
+ static const s8 tiaA_gain_db_rev4[] = {
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
- s8 tiaA_gain_db_rev5[] = {
+ static const s8 tiaA_gain_db_rev5[] = {
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
- s8 tiaA_gain_db_rev6[] = {
+ static const s8 tiaA_gain_db_rev6[] = {
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
- s8 *tia_gain_db;
- s8 tiaG_gainbits[] = {
+ const s8 *tia_gain_db;
+ static const s8 tiaG_gainbits[] = {
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
- s8 tiaA_gainbits[] = {
+ static const s8 tiaA_gainbits[] = {
0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 };
- s8 tiaA_gainbits_rev4[] = {
+ static const s8 tiaA_gainbits_rev4[] = {
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
- s8 tiaA_gainbits_rev5[] = {
+ static const s8 tiaA_gainbits_rev5[] = {
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
- s8 tiaA_gainbits_rev6[] = {
+ static const s8 tiaA_gainbits_rev6[] = {
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
- s8 *tia_gainbits;
- s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
- s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
- u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
- u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
- u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
- u16 rfseqG_init_gain_rev5_elna[] = {
+ const s8 *tia_gainbits;
+ static const s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
+ static const s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
+ static const u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
+ static const u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
+ static const u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
+ static const u16 rfseqG_init_gain_rev5_elna[] = {
0x013f, 0x013f, 0x013f, 0x013f };
- u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
- u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
- u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
- u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
- u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
- u16 rfseqA_init_gain_rev4_elna[] = {
+ static const u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
+ static const u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
+ static const u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
+ static const u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
+ static const u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
+ static const u16 rfseqA_init_gain_rev4_elna[] = {
0x314f, 0x314f, 0x314f, 0x314f };
- u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
- u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
- u16 *rfseq_init_gain;
+ static const u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
+ static const u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
+ const u16 *rfseq_init_gain;
u16 initG_gaincode = 0x627e;
u16 initG_gaincode_rev4 = 0x527e;
u16 initG_gaincode_rev5 = 0x427e;
@@ -15538,10 +15535,10 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
u16 clip1mdA_gaincode_rev6 = 0x2084;
u16 clip1md_gaincode = 0;
u16 clip1loG_gaincode = 0x0074;
- u16 clip1loG_gaincode_rev5[] = {
+ static const u16 clip1loG_gaincode_rev5[] = {
0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c
};
- u16 clip1loG_gaincode_rev6[] = {
+ static const u16 clip1loG_gaincode_rev6[] = {
0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e
};
u16 clip1loG_gaincode_rev6_224B0 = 0x1074;
@@ -16066,7 +16063,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
{
- u8 rfseq_rx2tx_events[] = {
+ static const u8 rfseq_rx2tx_events[] = {
NPHY_RFSEQ_CMD_NOP,
NPHY_RFSEQ_CMD_RXG_FBW,
NPHY_RFSEQ_CMD_TR_SWITCH,
@@ -16076,7 +16073,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_RFSEQ_CMD_EXT_PA
};
u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 };
- u8 rfseq_tx2rx_events[] = {
+ static const u8 rfseq_tx2rx_events[] = {
NPHY_RFSEQ_CMD_NOP,
NPHY_RFSEQ_CMD_EXT_PA,
NPHY_RFSEQ_CMD_TX_GAIN,
@@ -16085,8 +16082,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_RFSEQ_CMD_RXG_FBW,
NPHY_RFSEQ_CMD_CLR_HIQ_DIS
};
- u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
- u8 rfseq_tx2rx_events_rev3[] = {
+ static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
+ static const u8 rfseq_tx2rx_events_rev3[] = {
NPHY_REV3_RFSEQ_CMD_EXT_PA,
NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
NPHY_REV3_RFSEQ_CMD_TX_GAIN,
@@ -16096,7 +16093,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
NPHY_REV3_RFSEQ_CMD_END
};
- u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+ static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
u8 rfseq_rx2tx_events_rev3[] = {
NPHY_REV3_RFSEQ_CMD_NOP,
NPHY_REV3_RFSEQ_CMD_RXG_FBW,
@@ -16110,7 +16107,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
};
u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
- u8 rfseq_rx2tx_events_rev3_ipa[] = {
+ static const u8 rfseq_rx2tx_events_rev3_ipa[] = {
NPHY_REV3_RFSEQ_CMD_NOP,
NPHY_REV3_RFSEQ_CMD_RXG_FBW,
NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
@@ -16121,15 +16118,15 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
NPHY_REV3_RFSEQ_CMD_END
};
- u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
- u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
+ static const u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+ static const u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
s16 alpha0, alpha1, alpha2;
s16 beta0, beta1, beta2;
u32 leg_data_weights, ht_data_weights, nss1_data_weights,
stbc_data_weights;
u8 chan_freq_range = 0;
- u16 dac_control = 0x0002;
+ static const u16 dac_control = 0x0002;
u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 };
u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 };
u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 };
@@ -16139,8 +16136,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 };
u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 };
u16 *aux_adc_gain;
- u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
- u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
+ static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
+ static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
s32 min_nvar_val = 0x18d;
s32 min_nvar_offset_6mbps = 20;
u8 pdetrange;
@@ -16151,9 +16148,9 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77;
u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77;
u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77;
- u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
- u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
- u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+ static const u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
+ static const u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+ static const u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
u16 ipalvlshift_3p3_war_en = 0;
u16 rccal_bcap_val, rccal_scap_val;
u16 rccal_tx20_11b_bcap = 0;
@@ -24291,13 +24288,13 @@ static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core)
u16 bbmult;
u16 tblentry;
- struct nphy_txiqcal_ladder ladder_lo[] = {
+ static const struct nphy_txiqcal_ladder ladder_lo[] = {
{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
{25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
{25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
};
- struct nphy_txiqcal_ladder ladder_iq[] = {
+ static const struct nphy_txiqcal_ladder ladder_iq[] = {
{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
{25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
{100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
@@ -25773,67 +25770,67 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
u16 cal_gain[2];
struct nphy_iqcal_params cal_params[2];
u32 tbl_len;
- void *tbl_ptr;
+ const void *tbl_ptr;
bool ladder_updated[2];
u8 mphase_cal_lastphase = 0;
int bcmerror = 0;
bool phyhang_avoid_state = false;
- u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
+ static const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901,
0x1902,
0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607,
0x6407
};
- u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
+ static const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400,
0x3200,
0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406,
0x6407
};
- u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
+ static const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201,
0x1202,
0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207,
0x4707
};
- u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
+ static const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900,
0x2300,
0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706,
0x4707
};
- u16 tbl_tx_iqlo_cal_startcoefs[] = {
+ static const u16 tbl_tx_iqlo_cal_startcoefs[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000
};
- u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
0x8123, 0x8264, 0x8086, 0x8245, 0x8056,
0x9123, 0x9264, 0x9086, 0x9245, 0x9056
};
- u16 tbl_tx_iqlo_cal_cmds_recal[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
0x8101, 0x8253, 0x8053, 0x8234, 0x8034,
0x9101, 0x9253, 0x9053, 0x9234, 0x9034
};
- u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
+ static const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000
};
- u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234,
0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234
};
- u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223,
0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223
};
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index 916820ee4f5d..5e2d44ce1c55 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -168,11 +168,11 @@ static ssize_t cnss_dev_boot_debug_write(struct file *fp,
set_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state);
ret = cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_POWER_UP,
- true, NULL);
+ CNSS_EVENT_SYNC, NULL);
} else if (sysfs_streq(cmd, "shutdown")) {
ret = cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_POWER_DOWN,
- true, NULL);
+ CNSS_EVENT_SYNC, NULL);
clear_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state);
} else {
cnss_pr_err("Device boot debugfs command is invalid\n");
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index d3afb516b119..fc90e30a53ca 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -189,19 +189,20 @@ static void cnss_pm_relax(struct cnss_plat_data *plat_priv)
pm_relax(&plat_priv->plat_dev->dev);
}
-void cnss_lock_pm_sem(void)
+void cnss_lock_pm_sem(struct device *dev)
{
down_read(&cnss_pm_sem);
}
EXPORT_SYMBOL(cnss_lock_pm_sem);
-void cnss_release_pm_sem(void)
+void cnss_release_pm_sem(struct device *dev)
{
up_read(&cnss_pm_sem);
}
EXPORT_SYMBOL(cnss_release_pm_sem);
-int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files,
+int cnss_get_fw_files_for_target(struct device *dev,
+ struct cnss_fw_files *pfw_files,
u32 target_type, u32 target_version)
{
if (!pfw_files)
@@ -223,10 +224,10 @@ int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files,
}
EXPORT_SYMBOL(cnss_get_fw_files_for_target);
-int cnss_request_bus_bandwidth(int bandwidth)
+int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
{
int ret = 0;
- struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
struct cnss_bus_bw_info *bus_bw_info;
if (!plat_priv)
@@ -258,9 +259,9 @@ int cnss_request_bus_bandwidth(int bandwidth)
}
EXPORT_SYMBOL(cnss_request_bus_bandwidth);
-int cnss_get_platform_cap(struct cnss_platform_cap *cap)
+int cnss_get_platform_cap(struct device *dev, struct cnss_platform_cap *cap)
{
- struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
if (!plat_priv)
return -ENODEV;
@@ -289,20 +290,9 @@ int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
}
EXPORT_SYMBOL(cnss_get_soc_info);
-void cnss_set_driver_status(enum cnss_driver_status driver_status)
+void cnss_request_pm_qos(struct device *dev, u32 qos_val)
{
- struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
-
- if (!plat_priv)
- return;
-
- plat_priv->driver_status = driver_status;
-}
-EXPORT_SYMBOL(cnss_set_driver_status);
-
-void cnss_request_pm_qos(u32 qos_val)
-{
- struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
if (!plat_priv)
return;
@@ -312,9 +302,9 @@ void cnss_request_pm_qos(u32 qos_val)
}
EXPORT_SYMBOL(cnss_request_pm_qos);
-void cnss_remove_pm_qos(void)
+void cnss_remove_pm_qos(struct device *dev)
{
- struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
if (!plat_priv)
return;
@@ -712,19 +702,19 @@ static char *cnss_driver_event_to_str(enum cnss_driver_event_type type)
int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
enum cnss_driver_event_type type,
- bool sync, void *data)
+ u32 flags, void *data)
{
struct cnss_driver_event *event;
- unsigned long flags;
+ unsigned long irq_flags;
int gfp = GFP_KERNEL;
int ret = 0;
if (!plat_priv)
return -ENODEV;
- cnss_pr_dbg("Posting event: %s(%d)%s, state: 0x%lx\n",
+ cnss_pr_dbg("Posting event: %s(%d)%s, state: 0x%lx flags: 0x%0x\n",
cnss_driver_event_to_str(type), type,
- sync ? "-sync" : "", plat_priv->driver_state);
+ flags ? "-sync" : "", plat_priv->driver_state, flags);
if (type >= CNSS_DRIVER_EVENT_MAX) {
cnss_pr_err("Invalid Event type: %d, can't post", type);
@@ -744,31 +734,33 @@ int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
event->data = data;
init_completion(&event->complete);
event->ret = CNSS_EVENT_PENDING;
- event->sync = sync;
+ event->sync = !!(flags & CNSS_EVENT_SYNC);
- spin_lock_irqsave(&plat_priv->event_lock, flags);
+ spin_lock_irqsave(&plat_priv->event_lock, irq_flags);
list_add_tail(&event->list, &plat_priv->event_list);
- spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+ spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
queue_work(plat_priv->event_wq, &plat_priv->event_work);
- if (!sync)
+ if (!(flags & CNSS_EVENT_SYNC))
goto out;
- ret = wait_for_completion_interruptible(&event->complete);
+ if (flags & CNSS_EVENT_UNINTERRUPTIBLE)
+ wait_for_completion(&event->complete);
+ else
+ ret = wait_for_completion_interruptible(&event->complete);
cnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
cnss_driver_event_to_str(type), type,
plat_priv->driver_state, ret, event->ret);
-
- spin_lock_irqsave(&plat_priv->event_lock, flags);
+ spin_lock_irqsave(&plat_priv->event_lock, irq_flags);
if (ret == -ERESTARTSYS && event->ret == CNSS_EVENT_PENDING) {
event->sync = false;
- spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+ spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
ret = -EINTR;
goto out;
}
- spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+ spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
ret = event->ret;
kfree(event);
@@ -793,7 +785,7 @@ int cnss_power_up(struct device *dev)
ret = cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_POWER_UP,
- true, NULL);
+ CNSS_EVENT_SYNC, NULL);
if (ret)
goto out;
@@ -831,7 +823,7 @@ int cnss_power_down(struct device *dev)
return cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_POWER_DOWN,
- true, NULL);
+ CNSS_EVENT_SYNC, NULL);
}
EXPORT_SYMBOL(cnss_power_down);
@@ -852,7 +844,8 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
ret = cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_REGISTER_DRIVER,
- true, driver_ops);
+ CNSS_EVENT_SYNC_UNINTERRUPTIBLE,
+ driver_ops);
return ret;
}
EXPORT_SYMBOL(cnss_wlan_register_driver);
@@ -868,7 +861,7 @@ void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
- true, NULL);
+ CNSS_EVENT_SYNC, NULL);
}
EXPORT_SYMBOL(cnss_wlan_unregister_driver);
@@ -1048,7 +1041,8 @@ static int cnss_qca6174_shutdown(struct cnss_plat_data *plat_priv)
cnss_driver_call_remove(plat_priv);
- cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
+ cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
+ CNSS_BUS_WIDTH_NONE);
cnss_pci_set_monitor_wake_intr(pci_priv, false);
cnss_pci_set_auto_suspended(pci_priv, 0);
@@ -1154,7 +1148,8 @@ static int cnss_qca6290_shutdown(struct cnss_plat_data *plat_priv)
cnss_driver_call_remove(plat_priv);
- cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
+ cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
+ CNSS_BUS_WIDTH_NONE);
cnss_pci_set_monitor_wake_intr(pci_priv, false);
cnss_pci_set_auto_suspended(pci_priv, 0);
@@ -1195,26 +1190,9 @@ static void cnss_qca6290_crash_shutdown(struct cnss_plat_data *plat_priv)
cnss_pci_collect_dump_info(pci_priv);
}
-static int cnss_powerup(const struct subsys_desc *subsys_desc)
+static int cnss_powerup(struct cnss_plat_data *plat_priv)
{
- int ret = 0;
- struct cnss_plat_data *plat_priv;
-
- if (!subsys_desc->dev) {
- cnss_pr_err("dev from subsys_desc is NULL\n");
- return -ENODEV;
- }
-
- plat_priv = dev_get_drvdata(subsys_desc->dev);
- if (!plat_priv) {
- cnss_pr_err("plat_priv is NULL!\n");
- return -ENODEV;
- }
-
- if (!plat_priv->driver_state) {
- cnss_pr_dbg("Powerup is ignored.\n");
- return 0;
- }
+ int ret;
switch (plat_priv->device_id) {
case QCA6174_DEVICE_ID:
@@ -1233,21 +1211,9 @@ static int cnss_powerup(const struct subsys_desc *subsys_desc)
return ret;
}
-static int cnss_shutdown(const struct subsys_desc *subsys_desc, bool force_stop)
+static int cnss_shutdown(struct cnss_plat_data *plat_priv)
{
- int ret = 0;
- struct cnss_plat_data *plat_priv;
-
- if (!subsys_desc->dev) {
- cnss_pr_err("dev from subsys_desc is NULL\n");
- return -ENODEV;
- }
-
- plat_priv = dev_get_drvdata(subsys_desc->dev);
- if (!plat_priv) {
- cnss_pr_err("plat_priv is NULL!\n");
- return -ENODEV;
- }
+ int ret;
switch (plat_priv->device_id) {
case QCA6174_DEVICE_ID:
@@ -1266,6 +1232,53 @@ static int cnss_shutdown(const struct subsys_desc *subsys_desc, bool force_stop)
return ret;
}
+static int cnss_subsys_powerup(const struct subsys_desc *subsys_desc)
+{
+ struct cnss_plat_data *plat_priv;
+
+ if (!subsys_desc->dev) {
+ cnss_pr_err("dev from subsys_desc is NULL\n");
+ return -ENODEV;
+ }
+
+ plat_priv = dev_get_drvdata(subsys_desc->dev);
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ if (!plat_priv->driver_state) {
+ cnss_pr_dbg("Powerup is ignored\n");
+ return 0;
+ }
+
+ return cnss_powerup(plat_priv);
+}
+
+static int cnss_subsys_shutdown(const struct subsys_desc *subsys_desc,
+ bool force_stop)
+{
+ struct cnss_plat_data *plat_priv;
+
+ if (!subsys_desc->dev) {
+ cnss_pr_err("dev from subsys_desc is NULL\n");
+ return -ENODEV;
+ }
+
+ plat_priv = dev_get_drvdata(subsys_desc->dev);
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ if (!plat_priv->driver_state) {
+ cnss_pr_dbg("shutdown is ignored\n");
+ return 0;
+ }
+
+ return cnss_shutdown(plat_priv);
+}
+
static int cnss_qca6290_ramdump(struct cnss_plat_data *plat_priv)
{
struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
@@ -1321,7 +1334,8 @@ static int cnss_qca6174_ramdump(struct cnss_plat_data *plat_priv)
return ret;
}
-static int cnss_ramdump(int enable, const struct subsys_desc *subsys_desc)
+static int cnss_subsys_ramdump(int enable,
+ const struct subsys_desc *subsys_desc)
{
int ret = 0;
struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
@@ -1351,9 +1365,9 @@ static int cnss_ramdump(int enable, const struct subsys_desc *subsys_desc)
return ret;
}
-void *cnss_get_virt_ramdump_mem(unsigned long *size)
+void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size)
{
- struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
struct cnss_ramdump_info *ramdump_info;
if (!plat_priv)
@@ -1366,9 +1380,9 @@ void *cnss_get_virt_ramdump_mem(unsigned long *size)
}
EXPORT_SYMBOL(cnss_get_virt_ramdump_mem);
-void cnss_device_crashed(void)
+void cnss_device_crashed(struct device *dev)
{
- struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
struct cnss_subsys_info *subsys_info;
if (!plat_priv)
@@ -1383,7 +1397,7 @@ void cnss_device_crashed(void)
}
EXPORT_SYMBOL(cnss_device_crashed);
-static void cnss_crash_shutdown(const struct subsys_desc *subsys_desc)
+static void cnss_subsys_crash_shutdown(const struct subsys_desc *subsys_desc)
{
struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
@@ -1472,8 +1486,8 @@ static int cnss_do_recovery(struct cnss_plat_data *plat_priv,
return 0;
self_recovery:
- cnss_shutdown(&subsys_info->subsys_desc, false);
- cnss_powerup(&subsys_info->subsys_desc);
+ cnss_shutdown(plat_priv);
+ cnss_powerup(plat_priv);
return 0;
}
@@ -1554,7 +1568,7 @@ void cnss_schedule_recovery(struct device *dev,
data->reason = reason;
cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_RECOVERY,
- false, data);
+ 0, data);
}
EXPORT_SYMBOL(cnss_schedule_recovery);
@@ -1601,7 +1615,7 @@ int cnss_force_fw_assert(struct device *dev)
cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
- false, NULL);
+ 0, NULL);
return 0;
}
@@ -1622,12 +1636,11 @@ static int cnss_register_driver_hdlr(struct cnss_plat_data *plat_priv,
void *data)
{
int ret = 0;
- struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
plat_priv->driver_ops = data;
- ret = cnss_powerup(&subsys_info->subsys_desc);
+ ret = cnss_powerup(plat_priv);
if (ret) {
clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
plat_priv->driver_ops = NULL;
@@ -1638,10 +1651,8 @@ static int cnss_register_driver_hdlr(struct cnss_plat_data *plat_priv,
static int cnss_unregister_driver_hdlr(struct cnss_plat_data *plat_priv)
{
- struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
-
set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
- cnss_shutdown(&subsys_info->subsys_desc, false);
+ cnss_shutdown(plat_priv);
plat_priv->driver_ops = NULL;
return 0;
@@ -1650,10 +1661,9 @@ static int cnss_unregister_driver_hdlr(struct cnss_plat_data *plat_priv)
static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
{
int ret = 0;
- struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
set_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
- ret = cnss_powerup(&subsys_info->subsys_desc);
+ ret = cnss_powerup(plat_priv);
if (ret)
clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
@@ -1662,10 +1672,8 @@ static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv)
{
- struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
-
cnss_wlfw_wlan_mode_send_sync(plat_priv, QMI_WLFW_OFF_V01);
- cnss_shutdown(&subsys_info->subsys_desc, false);
+ cnss_shutdown(plat_priv);
clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
return 0;
@@ -1673,16 +1681,12 @@ static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv)
static int cnss_power_up_hdlr(struct cnss_plat_data *plat_priv)
{
- struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
-
- return cnss_powerup(&subsys_info->subsys_desc);
+ return cnss_powerup(plat_priv);
}
static int cnss_power_down_hdlr(struct cnss_plat_data *plat_priv)
{
- struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
-
- cnss_shutdown(&subsys_info->subsys_desc, false);
+ cnss_shutdown(plat_priv);
return 0;
}
@@ -1807,10 +1811,10 @@ int cnss_register_subsys(struct cnss_plat_data *plat_priv)
}
subsys_info->subsys_desc.owner = THIS_MODULE;
- subsys_info->subsys_desc.powerup = cnss_powerup;
- subsys_info->subsys_desc.shutdown = cnss_shutdown;
- subsys_info->subsys_desc.ramdump = cnss_ramdump;
- subsys_info->subsys_desc.crash_shutdown = cnss_crash_shutdown;
+ subsys_info->subsys_desc.powerup = cnss_subsys_powerup;
+ subsys_info->subsys_desc.shutdown = cnss_subsys_shutdown;
+ subsys_info->subsys_desc.ramdump = cnss_subsys_ramdump;
+ subsys_info->subsys_desc.crash_shutdown = cnss_subsys_crash_shutdown;
subsys_info->subsys_desc.dev = &plat_priv->plat_dev->dev;
subsys_info->subsys_device = subsys_register(&subsys_info->subsys_desc);
@@ -2113,7 +2117,7 @@ static ssize_t cnss_fs_ready_store(struct device *dev,
if (fs_ready == FILE_SYSTEM_READY) {
cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START,
- true, NULL);
+ CNSS_EVENT_SYNC, NULL);
}
return count;
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index 4bf1c27d99de..81b5de8bc66f 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -25,6 +25,11 @@
#define MAX_NO_OF_MAC_ADDR 4
+#define CNSS_EVENT_SYNC BIT(0)
+#define CNSS_EVENT_UNINTERRUPTIBLE BIT(1)
+#define CNSS_EVENT_SYNC_UNINTERRUPTIBLE (CNSS_EVENT_SYNC | \
+ CNSS_EVENT_UNINTERRUPTIBLE)
+
enum cnss_dev_bus_type {
CNSS_BUS_NONE = -1,
CNSS_BUS_PCI,
@@ -205,7 +210,7 @@ void *cnss_bus_dev_to_bus_priv(struct device *dev);
struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev);
int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
enum cnss_driver_event_type type,
- bool sync, void *data);
+ u32 flags, void *data);
int cnss_get_vreg(struct cnss_plat_data *plat_priv);
int cnss_get_pinctrl(struct cnss_plat_data *plat_priv);
int cnss_power_on_device(struct cnss_plat_data *plat_priv);
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 2efc3aa63a75..8d34d74477eb 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -568,9 +568,9 @@ static int cnss_pci_runtime_idle(struct device *dev)
return -EBUSY;
}
-int cnss_wlan_pm_control(bool vote)
+int cnss_wlan_pm_control(struct device *dev, bool vote)
{
- struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
struct cnss_pci_data *pci_priv;
struct pci_dev *pci_dev;
@@ -590,10 +590,10 @@ int cnss_wlan_pm_control(bool vote)
}
EXPORT_SYMBOL(cnss_wlan_pm_control);
-int cnss_auto_suspend(void)
+int cnss_auto_suspend(struct device *dev)
{
int ret = 0;
- struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
struct pci_dev *pci_dev;
struct cnss_pci_data *pci_priv;
struct cnss_bus_bw_info *bus_bw_info;
@@ -648,10 +648,10 @@ out:
}
EXPORT_SYMBOL(cnss_auto_suspend);
-int cnss_auto_resume(void)
+int cnss_auto_resume(struct device *dev)
{
int ret = 0;
- struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
struct pci_dev *pci_dev;
struct cnss_pci_data *pci_priv;
struct cnss_bus_bw_info *bus_bw_info;
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index e010e2c39f02..f4344aee54ee 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -130,13 +130,13 @@ static int cnss_wlfw_clnt_svc_event_notifier(struct notifier_block *nb,
case QMI_SERVER_ARRIVE:
ret = cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_SERVER_ARRIVE,
- false, NULL);
+ 0, NULL);
break;
case QMI_SERVER_EXIT:
ret = cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_SERVER_EXIT,
- false, NULL);
+ 0, NULL);
break;
default:
cnss_pr_dbg("Invalid QMI service event: %ld\n", code);
@@ -278,7 +278,7 @@ static int cnss_wlfw_request_mem_ind_hdlr(struct cnss_plat_data *plat_priv,
fw_mem->size = ind_msg.size;
cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REQUEST_MEM,
- false, NULL);
+ 0, NULL);
return 0;
}
@@ -906,17 +906,17 @@ static void cnss_wlfw_clnt_ind(struct qmi_handle *handle,
case QMI_WLFW_FW_MEM_READY_IND_V01:
cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_FW_MEM_READY,
- false, NULL);
+ 0, NULL);
break;
case QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01:
cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
- false, NULL);
+ 0, NULL);
break;
case QMI_WLFW_FW_READY_IND_V01:
cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_FW_READY,
- false, NULL);
+ 0, NULL);
break;
case QMI_WLFW_PIN_CONNECT_RESULT_IND_V01:
cnss_qmi_pin_result_ind_hdlr(plat_priv, msg, msg_len);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 019d7165a045..2a996a68fc2b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2884,6 +2884,7 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
{
struct hwsim_new_radio_params param = { 0 };
+ const char *hwname = NULL;
param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
@@ -2897,8 +2898,14 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_NO_VIF])
param.no_vif = true;
- if (info->attrs[HWSIM_ATTR_RADIO_NAME])
- param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
+ if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
+ hwname = kasprintf(GFP_KERNEL, "%.*s",
+ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+ if (!hwname)
+ return -ENOMEM;
+ param.hwname = hwname;
+ }
if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
param.use_chanctx = true;
@@ -2926,11 +2933,15 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
s64 idx = -1;
const char *hwname = NULL;
- if (info->attrs[HWSIM_ATTR_RADIO_ID])
+ if (info->attrs[HWSIM_ATTR_RADIO_ID]) {
idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
- else if (info->attrs[HWSIM_ATTR_RADIO_NAME])
- hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
- else
+ } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
+ hwname = kasprintf(GFP_KERNEL, "%.*s",
+ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+ if (!hwname)
+ return -ENOMEM;
+ } else
return -EINVAL;
spin_lock_bh(&hwsim_radio_lock);
@@ -2939,7 +2950,8 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (data->idx != idx)
continue;
} else {
- if (strcmp(hwname, wiphy_name(data->hw->wiphy)))
+ if (!hwname ||
+ strcmp(hwname, wiphy_name(data->hw->wiphy)))
continue;
}
@@ -2947,10 +2959,12 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
spin_unlock_bh(&hwsim_radio_lock);
mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
info);
+ kfree(hwname);
return 0;
}
spin_unlock_bh(&hwsim_radio_lock);
+ kfree(hwname);
return -ENODEV;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index c2103e7a8132..bbb789f8990b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -1127,7 +1127,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
}
if (0 == tmp) {
read_addr = REG_DBI_RDATA + addr % 4;
- ret = rtl_read_byte(rtlpriv, read_addr);
+ ret = rtl_read_word(rtlpriv, read_addr);
}
return ret;
}
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 538d8f91ce1b..aa5e02ece208 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -466,39 +466,43 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
dev_dbg(&nqx_dev->client->dev,
"gpio_set_value disable: %s: info: %p\n",
__func__, nqx_dev);
- if (gpio_is_valid(nqx_dev->firm_gpio))
+ if (gpio_is_valid(nqx_dev->firm_gpio)) {
gpio_set_value(nqx_dev->firm_gpio, 0);
+ usleep_range(10000, 10100);
+ }
if (gpio_is_valid(nqx_dev->ese_gpio)) {
if (!gpio_get_value(nqx_dev->ese_gpio)) {
dev_dbg(&nqx_dev->client->dev, "disabling en_gpio\n");
gpio_set_value(nqx_dev->en_gpio, 0);
+ usleep_range(10000, 10100);
} else {
dev_dbg(&nqx_dev->client->dev, "keeping en_gpio high\n");
}
} else {
dev_dbg(&nqx_dev->client->dev, "ese_gpio invalid, set en_gpio to low\n");
gpio_set_value(nqx_dev->en_gpio, 0);
+ usleep_range(10000, 10100);
}
r = nqx_clock_deselect(nqx_dev);
if (r < 0)
dev_err(&nqx_dev->client->dev, "unable to disable clock\n");
nqx_dev->nfc_ven_enabled = false;
- /* hardware dependent delay */
- msleep(100);
} else if (arg == 1) {
nqx_enable_irq(nqx_dev);
dev_dbg(&nqx_dev->client->dev,
"gpio_set_value enable: %s: info: %p\n",
__func__, nqx_dev);
- if (gpio_is_valid(nqx_dev->firm_gpio))
+ if (gpio_is_valid(nqx_dev->firm_gpio)) {
gpio_set_value(nqx_dev->firm_gpio, 0);
+ usleep_range(10000, 10100);
+ }
gpio_set_value(nqx_dev->en_gpio, 1);
+ usleep_range(10000, 10100);
r = nqx_clock_select(nqx_dev);
if (r < 0)
dev_err(&nqx_dev->client->dev, "unable to enable clock\n");
nqx_dev->nfc_ven_enabled = true;
- msleep(20);
} else if (arg == 2) {
/*
* We are switching to Dowload Mode, toggle the enable pin
@@ -511,14 +515,15 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
}
}
gpio_set_value(nqx_dev->en_gpio, 1);
- msleep(20);
- if (gpio_is_valid(nqx_dev->firm_gpio))
+ usleep_range(10000, 10100);
+ if (gpio_is_valid(nqx_dev->firm_gpio)) {
gpio_set_value(nqx_dev->firm_gpio, 1);
- msleep(20);
+ usleep_range(10000, 10100);
+ }
gpio_set_value(nqx_dev->en_gpio, 0);
- msleep(100);
+ usleep_range(10000, 10100);
gpio_set_value(nqx_dev->en_gpio, 1);
- msleep(20);
+ usleep_range(10000, 10100);
} else {
r = -ENOIOCTLCMD;
}
@@ -643,13 +648,14 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
unsigned char nci_reset_rsp[6];
unsigned char init_rsp_len = 0;
unsigned int enable_gpio = nqx_dev->en_gpio;
+
/* making sure that the NFCC starts in a clean state. */
gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
/* hardware dependent delay */
- msleep(20);
+ usleep_range(10000, 10100);
gpio_set_value(enable_gpio, 1);/* HPD : Enable*/
/* hardware dependent delay */
- msleep(20);
+ usleep_range(10000, 10100);
/* send NCI CORE RESET CMD with Keep Config parameters */
ret = i2c_master_send(client, raw_nci_reset_cmd,
@@ -665,21 +671,17 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
/* Read Response of RESET command */
ret = i2c_master_recv(client, nci_reset_rsp,
sizeof(nci_reset_rsp));
- dev_err(&client->dev,
- "%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
- __func__, nci_reset_rsp[0],
- nci_reset_rsp[1], nci_reset_rsp[2]);
if (ret < 0) {
dev_err(&client->dev,
"%s: - i2c_master_recv Error\n", __func__);
goto err_nfcc_hw_check;
}
- ret = i2c_master_send(client, raw_nci_init_cmd,
- sizeof(raw_nci_init_cmd));
+ ret = nqx_standby_write(nqx_dev, raw_nci_init_cmd,
+ sizeof(raw_nci_init_cmd));
if (ret < 0) {
dev_err(&client->dev,
"%s: - i2c_master_send Error\n", __func__);
- goto err_nfcc_hw_check;
+ goto err_nfcc_core_init_fail;
}
/* hardware dependent delay */
msleep(30);
@@ -689,7 +691,7 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
if (ret < 0) {
dev_err(&client->dev,
"%s: - i2c_master_recv Error\n", __func__);
- goto err_nfcc_hw_check;
+ goto err_nfcc_core_init_fail;
}
init_rsp_len = 2 + nci_init_rsp[2]; /*payload + len*/
if (init_rsp_len > PAYLOAD_HEADER_LENGTH) {
@@ -702,6 +704,11 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
nqx_dev->nqx_info.info.fw_minor =
nci_init_rsp[init_rsp_len];
}
+ dev_dbg(&client->dev,
+ "%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
+ __func__, nci_reset_rsp[0],
+ nci_reset_rsp[1], nci_reset_rsp[2]);
+
dev_dbg(&nqx_dev->client->dev, "NQ NFCC chip_type = %x\n",
nqx_dev->nqx_info.info.chip_type);
dev_dbg(&nqx_dev->client->dev, "NQ fw version = %x.%x.%x\n",
@@ -741,6 +748,12 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
ret = 0;
goto done;
+err_nfcc_core_init_fail:
+ dev_err(&client->dev,
+ "%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
+ __func__, nci_reset_rsp[0],
+ nci_reset_rsp[1], nci_reset_rsp[2]);
+
err_nfcc_hw_check:
ret = -ENXIO;
dev_err(&client->dev,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 9e19fa625daa..b510bbd7d6c7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -2001,6 +2001,7 @@ static int ipa_q6_set_ex_path_dis_agg(void)
int index;
struct ipa_register_write *reg_write;
int retval;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
desc = kcalloc(ipa_ctx->ipa_num_pipes, sizeof(struct ipa_desc),
GFP_KERNEL);
@@ -2018,7 +2019,7 @@ static int ipa_q6_set_ex_path_dis_agg(void)
if (ipa_ctx->ep[ep_idx].valid &&
ipa_ctx->ep[ep_idx].skip_ep_cfg) {
BUG_ON(num_descs >= ipa_ctx->ipa_num_pipes);
- reg_write = kzalloc(sizeof(*reg_write), GFP_KERNEL);
+ reg_write = kzalloc(sizeof(*reg_write), flag);
if (!reg_write) {
IPAERR("failed to allocate memory\n");
@@ -2051,7 +2052,7 @@ static int ipa_q6_set_ex_path_dis_agg(void)
continue;
if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) ||
IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx)) {
- reg_write = kzalloc(sizeof(*reg_write), GFP_KERNEL);
+ reg_write = kzalloc(sizeof(*reg_write), flag);
if (!reg_write) {
IPAERR("failed to allocate memory\n");
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index a9bd0e11b330..7767c9b40537 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -4538,7 +4538,7 @@ int ipa_tag_process(struct ipa_desc desc[],
}
/* IP_PACKET_INIT IC for tag status to be sent to apps */
- pkt_init = kzalloc(sizeof(*pkt_init), GFP_KERNEL);
+ pkt_init = kzalloc(sizeof(*pkt_init), flag);
if (!pkt_init) {
IPAERR("failed to allocate memory\n");
res = -ENOMEM;
@@ -4557,7 +4557,7 @@ int ipa_tag_process(struct ipa_desc desc[],
desc_idx++;
/* NO-OP IC for ensuring that IPA pipeline is empty */
- reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+ reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag);
if (!reg_write_nop) {
IPAERR("no mem\n");
res = -ENOMEM;
@@ -4576,7 +4576,7 @@ int ipa_tag_process(struct ipa_desc desc[],
desc_idx++;
/* status IC */
- status = kzalloc(sizeof(*status), GFP_KERNEL);
+ status = kzalloc(sizeof(*status), flag);
if (!status) {
IPAERR("no mem\n");
res = -ENOMEM;
@@ -4612,7 +4612,7 @@ int ipa_tag_process(struct ipa_desc desc[],
atomic_set(&comp->cnt, 2);
/* dummy packet to send to IPA. packet payload is a completion object */
- dummy_skb = alloc_skb(sizeof(comp), GFP_KERNEL);
+ dummy_skb = alloc_skb(sizeof(comp), flag);
if (!dummy_skb) {
IPAERR("failed to allocate memory\n");
res = -ENOMEM;
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index b75d7db57c3e..1935704fcf09 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -404,6 +404,7 @@ struct fg_chip {
struct mutex sram_rw_lock;
struct mutex charge_full_lock;
struct mutex qnovo_esr_ctrl_lock;
+ spinlock_t suspend_lock;
u32 batt_soc_base;
u32 batt_info_base;
u32 mem_if_base;
@@ -438,6 +439,7 @@ struct fg_chip {
bool slope_limit_en;
bool use_ima_single_mode;
bool qnovo_enable;
+ bool suspended;
struct completion soc_update;
struct completion soc_ready;
struct delayed_work profile_load_work;
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 256d9ed8ada5..491dda6ff7e8 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -762,7 +762,19 @@ static int fg_get_msoc(struct fg_chip *chip, int *msoc)
if (rc < 0)
return rc;
- *msoc = DIV_ROUND_CLOSEST(*msoc * FULL_CAPACITY, FULL_SOC_RAW);
+ /*
+ * To have better endpoints for 0 and 100, it is good to tune the
+ * calculation discarding values 0 and 255 while rounding off. Rest
+ * of the values 1-254 will be scaled to 1-99. DIV_ROUND_UP will not
+ * be suitable here as it rounds up any value higher than 252 to 100.
+ */
+ if (*msoc == FULL_SOC_RAW)
+ *msoc = 100;
+ else if (*msoc == 0)
+ *msoc = 0;
+ else
+ *msoc = DIV_ROUND_CLOSEST((*msoc - 1) * (FULL_CAPACITY - 2),
+ FULL_SOC_RAW - 2) + 1;
return 0;
}
@@ -3776,6 +3788,14 @@ static int fg_notifier_cb(struct notifier_block *nb,
struct power_supply *psy = data;
struct fg_chip *chip = container_of(nb, struct fg_chip, nb);
+ spin_lock(&chip->suspend_lock);
+ if (chip->suspended) {
+ /* Return if we are still suspended */
+ spin_unlock(&chip->suspend_lock);
+ return NOTIFY_OK;
+ }
+ spin_unlock(&chip->suspend_lock);
+
if (event != PSY_EVENT_PROP_CHANGED)
return NOTIFY_OK;
@@ -5089,6 +5109,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
mutex_init(&chip->ttf.lock);
mutex_init(&chip->charge_full_lock);
mutex_init(&chip->qnovo_esr_ctrl_lock);
+ spin_lock_init(&chip->suspend_lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
@@ -5186,6 +5207,10 @@ static int fg_gen3_suspend(struct device *dev)
struct fg_chip *chip = dev_get_drvdata(dev);
int rc;
+ spin_lock(&chip->suspend_lock);
+ chip->suspended = true;
+ spin_unlock(&chip->suspend_lock);
+
rc = fg_esr_timer_config(chip, true);
if (rc < 0)
pr_err("Error in configuring ESR timer, rc=%d\n", rc);
@@ -5209,6 +5234,16 @@ static int fg_gen3_resume(struct device *dev)
if (fg_sram_dump)
schedule_delayed_work(&chip->sram_dump_work,
msecs_to_jiffies(fg_sram_dump_period_ms));
+
+ if (!work_pending(&chip->status_change_work)) {
+ pm_stay_awake(chip->dev);
+ schedule_work(&chip->status_change_work);
+ }
+
+ spin_lock(&chip->suspend_lock);
+ chip->suspended = false;
+ spin_unlock(&chip->suspend_lock);
+
return 0;
}
diff --git a/drivers/regulator/qpnp-regulator.c b/drivers/regulator/qpnp-regulator.c
index 08e991fa7db3..4098cb91f8fb 100644
--- a/drivers/regulator/qpnp-regulator.c
+++ b/drivers/regulator/qpnp-regulator.c
@@ -1771,7 +1771,7 @@ static int qpnp_regulator_check_constraints(struct qpnp_regulator *vreg,
if (vreg->logical_type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS) {
max_uV = pdata->init_data.constraints.max_uV;
/* Find the range which max_uV is inside of. */
- for (i = vreg->set_points->count - 1; i > 0; i--) {
+ for (i = vreg->set_points->count - 1; i >= 0; i--) {
range = &vreg->set_points->range[i];
if (range->set_point_max_uV > 0
&& max_uV >= range->set_point_min_uV
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index e6fb97cb12f4..7c28dc1cb0dd 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -456,7 +456,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
static int clariion_std_inquiry(struct scsi_device *sdev,
struct clariion_dh_data *csdev)
{
- int err;
+ int err = SCSI_DH_OK;
char *sp_model;
err = send_inquiry_cmd(sdev, 0, csdev);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index a2136c6863d3..1e8f50c4ebad 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -959,4 +959,12 @@ config MSM_CACHE_M4M_ERP64_PANIC_ON_UE
Say 'Y' here to cause kernel panic when uncorrectable cache/M4M errors
are detected.
+config QCOM_QDSS_BRIDGE
+ bool "Configure bridge driver for QTI/Qualcomm Technologies, Inc. MDM"
+ depends on MSM_MHI
+ help
+ The driver will help route diag traffic from modem side over the QDSS
+ sub-system to USB on APSS side. The driver acts as a bridge between the
+ MHI and USB interface. If unsure, say N.
+
source "drivers/soc/qcom/memshare/Kconfig"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 229b13a04819..ba2ff8326cac 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -108,3 +108,4 @@ obj-$(CONFIG_QCOM_EARLY_RANDOM) += early_random.o
obj-$(CONFIG_QCOM_CX_IPEAK) += cx_ipeak.o
obj-$(CONFIG_MSM_CACHE_M4M_ERP64) += cache_m4m_erp64.o
obj-$(CONFIG_MSM_HAB) += hab/
+obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
diff --git a/drivers/soc/qcom/glink_private.h b/drivers/soc/qcom/glink_private.h
index bb794f45cff7..e91ad30f73ab 100644
--- a/drivers/soc/qcom/glink_private.h
+++ b/drivers/soc/qcom/glink_private.h
@@ -692,7 +692,6 @@ enum ssr_command {
* received.
* edge: The G-Link edge name for the channel associated with
* this callback data
- * do_cleanup_data: Structure containing the G-Link SSR do_cleanup message.
* cb_kref: Kref object to maintain cb_data reference.
*/
struct ssr_notify_data {
@@ -700,7 +699,6 @@ struct ssr_notify_data {
unsigned event;
bool responded;
const char *edge;
- struct do_cleanup_msg *do_cleanup_data;
struct kref cb_kref;
};
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 042108d4035b..f2c273b0f4e0 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -225,6 +225,7 @@ struct edge_info {
spinlock_t rt_vote_lock;
uint32_t rt_votes;
uint32_t num_pw_states;
+ uint32_t readback;
unsigned long *ramp_time_us;
struct mailbox_config_info *mailbox;
};
@@ -269,6 +270,7 @@ static void send_irq(struct edge_info *einfo)
* Any data associated with this event must be visable to the remote
* before the interrupt is triggered
*/
+ einfo->readback = einfo->tx_ch_desc->write_index;
wmb();
writel_relaxed(einfo->out_irq_mask, einfo->out_irq_reg);
einfo->tx_irq_count++;
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
index c28eeab92fed..fb003bd5d35b 100644
--- a/drivers/soc/qcom/glink_ssr.c
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -253,6 +253,8 @@ static void glink_ssr_link_state_cb(struct glink_link_state_cb_info *cb_info,
void glink_ssr_notify_rx(void *handle, const void *priv, const void *pkt_priv,
const void *ptr, size_t size)
{
+ struct do_cleanup_msg *do_cleanup_data =
+ (struct do_cleanup_msg *)pkt_priv;
struct ssr_notify_data *cb_data = (struct ssr_notify_data *)priv;
struct cleanup_done_msg *resp = (struct cleanup_done_msg *)ptr;
struct rx_done_ch_work *rx_done_work;
@@ -263,15 +265,15 @@ void glink_ssr_notify_rx(void *handle, const void *priv, const void *pkt_priv,
__func__);
return;
}
+ if (unlikely(!do_cleanup_data))
+ goto missing_do_cleanup_data;
if (unlikely(!cb_data))
goto missing_cb_data;
- if (unlikely(!cb_data->do_cleanup_data))
- goto missing_do_cleanup_data;
if (unlikely(!resp))
goto missing_response;
- if (unlikely(resp->version != cb_data->do_cleanup_data->version))
+ if (unlikely(resp->version != do_cleanup_data->version))
goto version_mismatch;
- if (unlikely(resp->seq_num != cb_data->do_cleanup_data->seq_num))
+ if (unlikely(resp->seq_num != do_cleanup_data->seq_num))
goto invalid_seq_number;
if (unlikely(resp->response != GLINK_SSR_CLEANUP_DONE))
goto wrong_response;
@@ -283,10 +285,9 @@ void glink_ssr_notify_rx(void *handle, const void *priv, const void *pkt_priv,
"<SSR> %s: Response from %s resp[%d] version[%d] seq_num[%d] restarted[%s]\n",
__func__, cb_data->edge, resp->response,
resp->version, resp->seq_num,
- cb_data->do_cleanup_data->name);
+ do_cleanup_data->name);
- kfree(cb_data->do_cleanup_data);
- cb_data->do_cleanup_data = NULL;
+ kfree(do_cleanup_data);
rx_done_work->ptr = ptr;
rx_done_work->handle = handle;
INIT_WORK(&rx_done_work->work, rx_done_cb_worker);
@@ -305,13 +306,13 @@ missing_response:
return;
version_mismatch:
GLINK_SSR_ERR("<SSR> %s: Version mismatch. %s[%d], %s[%d]\n", __func__,
- "do_cleanup version", cb_data->do_cleanup_data->version,
+ "do_cleanup version", do_cleanup_data->version,
"cleanup_done version", resp->version);
return;
invalid_seq_number:
GLINK_SSR_ERR("<SSR> %s: Invalid seq. number. %s[%d], %s[%d]\n",
__func__, "do_cleanup seq num",
- cb_data->do_cleanup_data->seq_num,
+ do_cleanup_data->seq_num,
"cleanup_done seq_num", resp->seq_num);
return;
wrong_response:
@@ -593,10 +594,8 @@ int notify_for_subsystem(struct subsys_info *ss_info)
do_cleanup_data->name_len = strlen(ss_info->edge);
strlcpy(do_cleanup_data->name, ss_info->edge,
do_cleanup_data->name_len + 1);
- ss_leaf_entry->cb_data->do_cleanup_data = do_cleanup_data;
- ret = glink_queue_rx_intent(handle,
- (void *)ss_leaf_entry->cb_data,
+ ret = glink_queue_rx_intent(handle, do_cleanup_data,
sizeof(struct cleanup_done_msg));
if (ret) {
GLINK_SSR_ERR(
@@ -605,7 +604,6 @@ int notify_for_subsystem(struct subsys_info *ss_info)
"queue_rx_intent failed", ret,
atomic_read(&responses_remaining));
kfree(do_cleanup_data);
- ss_leaf_entry->cb_data->do_cleanup_data = NULL;
if (!strcmp(ss_leaf_entry->ssr_name, "rpm"))
panic("%s: Could not queue intent for RPM!\n",
@@ -617,12 +615,12 @@ int notify_for_subsystem(struct subsys_info *ss_info)
}
if (strcmp(ss_leaf_entry->ssr_name, "rpm"))
- ret = glink_tx(handle, ss_leaf_entry->cb_data,
+ ret = glink_tx(handle, do_cleanup_data,
do_cleanup_data,
sizeof(*do_cleanup_data),
GLINK_TX_REQ_INTENT);
else
- ret = glink_tx(handle, ss_leaf_entry->cb_data,
+ ret = glink_tx(handle, do_cleanup_data,
do_cleanup_data,
sizeof(*do_cleanup_data),
GLINK_TX_SINGLE_THREADED);
@@ -632,7 +630,6 @@ int notify_for_subsystem(struct subsys_info *ss_info)
__func__, ret, "resp. remaining",
atomic_read(&responses_remaining));
kfree(do_cleanup_data);
- ss_leaf_entry->cb_data->do_cleanup_data = NULL;
if (!strcmp(ss_leaf_entry->ssr_name, "rpm"))
panic("%s: glink_tx() to RPM failed!\n",
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index e9a097151141..83efbbe25e6b 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -2347,7 +2347,7 @@ static int icnss_driver_event_pd_service_down(struct icnss_priv *priv,
if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
goto out;
- if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
+ if (test_bit(ICNSS_PD_RESTART, &priv->state) && event_data->crashed) {
icnss_pr_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n",
event_data->crashed, priv->state);
ICNSS_ASSERT(0);
@@ -2651,7 +2651,9 @@ event_post:
clear_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
fw_down_data.crashed = event_data->crashed;
- icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data);
+ if (test_bit(ICNSS_FW_READY, &priv->state))
+ icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN,
+ &fw_down_data);
icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
ICNSS_EVENT_SYNC, event_data);
done:
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index 45712457de73..1177cac25ffa 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -41,6 +41,7 @@
#define PROXY_TIMEOUT_MS 10000
#define MAX_SSR_REASON_LEN 130U
#define STOP_ACK_TIMEOUT_MS 1000
+#define QDSP6SS_NMI_STATUS 0x44
#define subsys_to_drv(d) container_of(d, struct modem_data, subsys_desc)
@@ -77,12 +78,17 @@ static void restart_modem(struct modem_data *drv)
static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
{
struct modem_data *drv = subsys_to_drv(dev_id);
+ u32 nmi_status = readl_relaxed(drv->q6->reg_base + QDSP6SS_NMI_STATUS);
/* Ignore if we're the one that set the force stop GPIO */
if (drv->crash_shutdown)
return IRQ_HANDLED;
- pr_err("Fatal error on the modem.\n");
+ if (nmi_status & 0x04)
+ pr_err("%s: Fatal error on the modem due to TZ NMI\n",
+ __func__);
+ else
+ pr_err("%s: Fatal error on the modem\n", __func__);
subsys_set_crash_status(drv->subsys, CRASH_STATUS_ERR_FATAL);
restart_modem(drv);
return IRQ_HANDLED;
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
new file mode 100644
index 000000000000..443e9e384ea2
--- /dev/null
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -0,0 +1,463 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define KMSG_COMPONENT "QDSS diag bridge"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
+#include <linux/msm_mhi.h>
+#include <linux/usb/usb_qdss.h>
+#include "qdss_bridge.h"
+
+#define MODULE_NAME "qdss_bridge"
+
+#define QDSS_BUF_SIZE (16*1024)
+#define MHI_CLIENT_QDSS_IN 9
+
+/* Max number of objects needed */
+static int poolsize = 32;
+module_param(poolsize, int, 0644);
+
+/* Size of single buffer */
+static int itemsize = QDSS_BUF_SIZE;
+module_param(itemsize, int, 0644);
+
+static int qdss_destroy_buf_tbl(struct qdss_bridge_drvdata *drvdata)
+{
+ struct list_head *start, *temp;
+ struct qdss_buf_tbl_lst *entry = NULL;
+
+ list_for_each_safe(start, temp, &drvdata->buf_tbl) {
+ entry = list_entry(start, struct qdss_buf_tbl_lst, link);
+ list_del(&entry->link);
+ kfree(entry->buf);
+ kfree(entry->usb_req);
+ kfree(entry);
+ }
+
+ return 0;
+}
+
+static int qdss_create_buf_tbl(struct qdss_bridge_drvdata *drvdata)
+{
+ struct qdss_buf_tbl_lst *entry;
+ void *buf;
+ struct qdss_request *usb_req;
+ int i;
+
+ for (i = 0; i < poolsize; i++) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ goto err;
+
+ buf = kzalloc(QDSS_BUF_SIZE, GFP_KERNEL);
+ usb_req = kzalloc(sizeof(*usb_req), GFP_KERNEL);
+
+ entry->buf = buf;
+ entry->usb_req = usb_req;
+ atomic_set(&entry->available, 1);
+ list_add_tail(&entry->link, &drvdata->buf_tbl);
+
+ if (!buf || !usb_req)
+ goto err;
+ }
+
+ return 0;
+err:
+ qdss_destroy_buf_tbl(drvdata);
+ return -ENOMEM;
+}
+
+struct qdss_buf_tbl_lst *qdss_get_buf_tbl_entry(
+ struct qdss_bridge_drvdata *drvdata,
+ void *buf)
+{
+ struct qdss_buf_tbl_lst *entry;
+
+ list_for_each_entry(entry, &drvdata->buf_tbl, link) {
+ if (atomic_read(&entry->available))
+ continue;
+ if (entry->buf == buf)
+ return entry;
+ }
+
+ return NULL;
+}
+
+struct qdss_buf_tbl_lst *qdss_get_entry(struct qdss_bridge_drvdata *drvdata)
+{
+ struct qdss_buf_tbl_lst *item;
+
+ list_for_each_entry(item, &drvdata->buf_tbl, link)
+ if (atomic_cmpxchg(&item->available, 1, 0) == 1)
+ return item;
+
+ return NULL;
+}
+
+static void qdss_buf_tbl_remove(struct qdss_bridge_drvdata *drvdata,
+ void *buf)
+{
+ struct qdss_buf_tbl_lst *entry = NULL;
+
+ list_for_each_entry(entry, &drvdata->buf_tbl, link) {
+ if (entry->buf != buf)
+ continue;
+ atomic_set(&entry->available, 1);
+ return;
+ }
+
+ pr_err_ratelimited("Failed to find buffer for removal\n");
+}
+
+static void mhi_ch_close(struct qdss_bridge_drvdata *drvdata)
+{
+ flush_workqueue(drvdata->mhi_wq);
+ qdss_destroy_buf_tbl(drvdata);
+ mhi_close_channel(drvdata->hdl);
+}
+
+static void mhi_close_work_fn(struct work_struct *work)
+{
+ struct qdss_bridge_drvdata *drvdata =
+ container_of(work,
+ struct qdss_bridge_drvdata,
+ close_work);
+
+ usb_qdss_close(drvdata->usb_ch);
+ mhi_ch_close(drvdata);
+}
+
+static void mhi_read_work_fn(struct work_struct *work)
+{
+ int err = 0;
+ enum MHI_FLAGS mhi_flags = MHI_EOT;
+ struct qdss_buf_tbl_lst *entry;
+
+ struct qdss_bridge_drvdata *drvdata =
+ container_of(work,
+ struct qdss_bridge_drvdata,
+ read_work);
+
+ do {
+ if (!drvdata->opened)
+ break;
+ entry = qdss_get_entry(drvdata);
+ if (!entry)
+ break;
+
+ err = mhi_queue_xfer(drvdata->hdl, entry->buf, QDSS_BUF_SIZE,
+ mhi_flags);
+ if (err) {
+ pr_err_ratelimited("Unable to read from MHI buffer err:%d",
+ err);
+ goto fail;
+ }
+ } while (entry);
+
+ return;
+fail:
+ qdss_buf_tbl_remove(drvdata, entry->buf);
+ queue_work(drvdata->mhi_wq, &drvdata->read_work);
+}
+
+static int mhi_queue_read(struct qdss_bridge_drvdata *drvdata)
+{
+ queue_work(drvdata->mhi_wq, &(drvdata->read_work));
+ return 0;
+}
+
+static int usb_write(struct qdss_bridge_drvdata *drvdata,
+ struct mhi_result *result)
+{
+ int ret = 0;
+ struct qdss_buf_tbl_lst *entry;
+
+ entry = qdss_get_buf_tbl_entry(drvdata, result->buf_addr);
+ if (!entry)
+ return -EINVAL;
+
+ entry->usb_req->buf = result->buf_addr;
+ entry->usb_req->length = result->bytes_xferd;
+ ret = usb_qdss_data_write(drvdata->usb_ch, entry->usb_req);
+
+ return ret;
+}
+
+static void mhi_read_done_work_fn(struct work_struct *work)
+{
+ unsigned char *buf = NULL;
+ struct mhi_result result;
+ int err = 0;
+ struct qdss_bridge_drvdata *drvdata =
+ container_of(work,
+ struct qdss_bridge_drvdata,
+ read_done_work);
+
+ do {
+ err = mhi_poll_inbound(drvdata->hdl, &result);
+ if (err) {
+ pr_debug("MHI poll failed err:%d\n", err);
+ break;
+ }
+ buf = result.buf_addr;
+ if (!buf)
+ break;
+ err = usb_write(drvdata, &result);
+ if (err)
+ qdss_buf_tbl_remove(drvdata, buf);
+ } while (1);
+}
+
+static void usb_write_done(struct qdss_bridge_drvdata *drvdata,
+ struct qdss_request *d_req)
+{
+ if (d_req->status) {
+ pr_err_ratelimited("USB write failed err:%d\n", d_req->status);
+ mhi_queue_read(drvdata);
+ return;
+ }
+ qdss_buf_tbl_remove(drvdata, d_req->buf);
+ mhi_queue_read(drvdata);
+}
+
+static void usb_notifier(void *priv, unsigned int event,
+ struct qdss_request *d_req, struct usb_qdss_ch *ch)
+{
+ struct qdss_bridge_drvdata *drvdata = priv;
+
+ if (!drvdata)
+ return;
+
+ switch (event) {
+ case USB_QDSS_CONNECT:
+ usb_qdss_alloc_req(drvdata->usb_ch, poolsize, 0);
+ mhi_queue_read(drvdata);
+ break;
+
+ case USB_QDSS_DISCONNECT:
+ /* Leave MHI/USB open.Only close on MHI disconnect */
+ break;
+
+ case USB_QDSS_DATA_WRITE_DONE:
+ usb_write_done(drvdata, d_req);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int mhi_ch_open(struct qdss_bridge_drvdata *drvdata)
+{
+ int ret;
+
+ if (drvdata->opened)
+ return 0;
+
+ ret = mhi_open_channel(drvdata->hdl);
+ if (ret) {
+ pr_err("Unable to open MHI channel\n");
+ return ret;
+ }
+
+ ret = mhi_get_free_desc(drvdata->hdl);
+ if (ret <= 0)
+ return -EIO;
+
+ drvdata->opened = 1;
+ return 0;
+}
+
+static void qdss_bridge_open_work_fn(struct work_struct *work)
+{
+ struct qdss_bridge_drvdata *drvdata =
+ container_of(work,
+ struct qdss_bridge_drvdata,
+ open_work);
+ int ret;
+
+ ret = mhi_ch_open(drvdata);
+ if (ret)
+ goto err_open;
+
+ ret = qdss_create_buf_tbl(drvdata);
+ if (ret)
+ goto err;
+
+ drvdata->usb_ch = usb_qdss_open("qdss_mdm", drvdata, usb_notifier);
+ if (IS_ERR_OR_NULL(drvdata->usb_ch)) {
+ ret = PTR_ERR(drvdata->usb_ch);
+ goto err;
+ }
+
+ return;
+err:
+ mhi_ch_close(drvdata);
+err_open:
+ pr_err("Open work failed with err:%d\n", ret);
+}
+
+static void mhi_notifier(struct mhi_cb_info *cb_info)
+{
+ struct mhi_result *result;
+ struct qdss_bridge_drvdata *drvdata;
+
+ if (!cb_info)
+ return;
+
+ result = cb_info->result;
+ if (!result) {
+ pr_err_ratelimited("Failed to obtain MHI result\n");
+ return;
+ }
+
+ drvdata = (struct qdss_bridge_drvdata *)cb_info->result->user_data;
+ if (!drvdata) {
+ pr_err_ratelimited("MHI returned invalid drvdata\n");
+ return;
+ }
+
+ switch (cb_info->cb_reason) {
+ case MHI_CB_MHI_ENABLED:
+ queue_work(drvdata->mhi_wq, &drvdata->open_work);
+ break;
+
+ case MHI_CB_XFER:
+ if (!drvdata->opened)
+ break;
+
+ queue_work(drvdata->mhi_wq, &drvdata->read_done_work);
+ break;
+
+ case MHI_CB_MHI_DISABLED:
+ if (!drvdata->opened)
+ break;
+
+ drvdata->opened = 0;
+ queue_work(drvdata->mhi_wq, &drvdata->close_work);
+ break;
+
+ default:
+ pr_err_ratelimited("MHI returned invalid cb reason 0x%x\n",
+ cb_info->cb_reason);
+ break;
+ }
+}
+
+static int qdss_mhi_register_ch(struct qdss_bridge_drvdata *drvdata)
+{
+ struct mhi_client_info_t *client_info;
+ int ret;
+ struct mhi_client_info_t *mhi_info;
+
+ client_info = devm_kzalloc(drvdata->dev, sizeof(*client_info),
+ GFP_KERNEL);
+ if (!client_info)
+ return -ENOMEM;
+
+ client_info->mhi_client_cb = mhi_notifier;
+ drvdata->client_info = client_info;
+
+ mhi_info = client_info;
+ mhi_info->chan = MHI_CLIENT_QDSS_IN;
+ mhi_info->dev = drvdata->dev;
+ mhi_info->node_name = "qcom,mhi";
+ mhi_info->user_data = drvdata;
+
+ ret = mhi_register_channel(&drvdata->hdl, mhi_info);
+ return ret;
+}
+
+int qdss_mhi_init(struct qdss_bridge_drvdata *drvdata)
+{
+ int ret;
+
+ drvdata->mhi_wq = create_singlethread_workqueue(MODULE_NAME);
+ if (!drvdata->mhi_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&(drvdata->read_work), mhi_read_work_fn);
+ INIT_WORK(&(drvdata->read_done_work), mhi_read_done_work_fn);
+ INIT_WORK(&(drvdata->open_work), qdss_bridge_open_work_fn);
+ INIT_WORK(&(drvdata->close_work), mhi_close_work_fn);
+ INIT_LIST_HEAD(&drvdata->buf_tbl);
+ drvdata->opened = 0;
+
+ ret = qdss_mhi_register_ch(drvdata);
+ if (ret) {
+ destroy_workqueue(drvdata->mhi_wq);
+ pr_err("Unable to register MHI read channel err:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qdss_mhi_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+ struct qdss_bridge_drvdata *drvdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ ret = qdss_mhi_init(drvdata);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ pr_err("Device probe failed err:%d\n", ret);
+ return ret;
+}
+
+static const struct of_device_id qdss_mhi_table[] = {
+ {.compatible = "qcom,qdss-mhi"},
+ {},
+};
+
+static struct platform_driver qdss_mhi_driver = {
+ .probe = qdss_mhi_probe,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = qdss_mhi_table,
+ },
+};
+
+static int __init qdss_bridge_init(void)
+{
+ return platform_driver_register(&qdss_mhi_driver);
+}
+
+static void __exit qdss_bridge_exit(void)
+{
+ platform_driver_unregister(&qdss_mhi_driver);
+}
+
+module_init(qdss_bridge_init);
+module_exit(qdss_bridge_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QDSS Bridge driver");
diff --git a/drivers/soc/qcom/qdss_bridge.h b/drivers/soc/qcom/qdss_bridge.h
new file mode 100644
index 000000000000..97b9c4099141
--- /dev/null
+++ b/drivers/soc/qcom/qdss_bridge.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QDSS_BRIDGE_H
+#define _QDSS_BRIDGE_H
+
+struct qdss_buf_tbl_lst {
+ struct list_head link;
+ unsigned char *buf;
+ struct qdss_request *usb_req;
+ atomic_t available;
+};
+
+struct qdss_bridge_drvdata {
+ struct device *dev;
+ bool opened;
+ struct work_struct read_work;
+ struct work_struct read_done_work;
+ struct work_struct open_work;
+ struct work_struct close_work;
+ struct workqueue_struct *mhi_wq;
+ struct mhi_client_handle *hdl;
+ struct mhi_client_info_t *client_info;
+ struct list_head buf_tbl;
+ struct usb_qdss_ch *usb_ch;
+};
+
+#endif
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index 43e2e4d17648..c6bdcee8131e 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -185,9 +185,8 @@ static int scm_remap_error(int err)
case SCM_ENOMEM:
return -ENOMEM;
case SCM_EBUSY:
- return SCM_EBUSY;
case SCM_V2_EBUSY:
- return SCM_V2_EBUSY;
+ return -EBUSY;
}
return -EINVAL;
}
@@ -338,13 +337,13 @@ static int _scm_call_retry(u32 svc_id, u32 cmd_id, const void *cmd_buf,
do {
ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len,
resp_buf, resp_len, cmd, len);
- if (ret == SCM_EBUSY)
+ if (ret == -EBUSY)
msleep(SCM_EBUSY_WAIT_MS);
if (retry_count == 33)
pr_warn("scm: secure world has been busy for 1 second!\n");
- } while (ret == SCM_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
+ } while (ret == -EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
- if (ret == SCM_EBUSY)
+ if (ret == -EBUSY)
pr_err("scm: secure world busy (rc = SCM_EBUSY)\n");
return ret;
@@ -799,7 +798,7 @@ int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
resp_len, cmd, len);
- if (unlikely(ret == SCM_EBUSY))
+ if (unlikely(ret == -EBUSY))
ret = _scm_call_retry(svc_id, cmd_id, cmd_buf, cmd_len,
resp_buf, resp_len, cmd, PAGE_ALIGN(len));
kfree(cmd);
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 8bf5f8eb64ad..b448f5297f95 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -43,6 +43,7 @@
#define ERR_READY 0
#define PBL_DONE 1
+#define NMI_STATUS_REGISTER 0x44
#define desc_to_data(d) container_of(d, struct pil_tz_data, desc)
#define subsys_to_data(d) container_of(d, struct pil_tz_data, subsys_desc)
@@ -110,6 +111,7 @@ struct pil_tz_data {
void __iomem *irq_mask;
void __iomem *err_status;
void __iomem *err_status_spare;
+ void __iomem *reg_base;
u32 bits_arr[2];
};
@@ -874,8 +876,19 @@ static void subsys_crash_shutdown(const struct subsys_desc *subsys)
static irqreturn_t subsys_err_fatal_intr_handler (int irq, void *dev_id)
{
struct pil_tz_data *d = subsys_to_data(dev_id);
+ u32 nmi_status = 0;
+
+ if (d->reg_base)
+ nmi_status = readl_relaxed(d->reg_base +
+ NMI_STATUS_REGISTER);
+
+ if (nmi_status & 0x04)
+ pr_err("%s: Fatal error on the %s due to TZ NMI\n",
+ __func__, d->subsys_desc.name);
+ else
+ pr_err("%s Fatal error on the %s\n",
+ __func__, d->subsys_desc.name);
- pr_err("Fatal error on %s!\n", d->subsys_desc.name);
if (subsys_get_crash_status(d->subsys)) {
pr_err("%s: Ignoring error fatal, restart in progress\n",
d->subsys_desc.name);
@@ -1011,6 +1024,13 @@ static int pil_tz_driver_probe(struct platform_device *pdev)
d->keep_proxy_regs_on = of_property_read_bool(pdev->dev.of_node,
"qcom,keep-proxy-regs-on");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base_reg");
+ d->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(d->reg_base)) {
+ dev_err(&pdev->dev, "Failed to iomap base register\n");
+ d->reg_base = NULL;
+ }
+
rc = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
&d->desc.name);
if (rc)
@@ -1076,23 +1096,55 @@ static int pil_tz_driver_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"sp2soc_irq_status");
d->irq_status = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(d->irq_status)) {
+ dev_err(&pdev->dev, "Invalid resource for sp2soc_irq_status\n");
+ rc = PTR_ERR(d->irq_status);
+ goto err_ramdump;
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"sp2soc_irq_clr");
d->irq_clear = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(d->irq_clear)) {
+ dev_err(&pdev->dev, "Invalid resource for sp2soc_irq_clr\n");
+ rc = PTR_ERR(d->irq_clear);
+ goto err_ramdump;
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"sp2soc_irq_mask");
d->irq_mask = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(d->irq_mask)) {
+ dev_err(&pdev->dev, "Invalid resource for sp2soc_irq_mask\n");
+ rc = PTR_ERR(d->irq_mask);
+ goto err_ramdump;
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"rmb_err");
d->err_status = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(d->err_status)) {
+ dev_err(&pdev->dev, "Invalid resource for rmb_err\n");
+ rc = PTR_ERR(d->err_status);
+ goto err_ramdump;
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"rmb_err_spare2");
d->err_status_spare = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(d->err_status_spare)) {
+ dev_err(&pdev->dev, "Invalid resource for rmb_err_spare2\n");
+ rc = PTR_ERR(d->err_status_spare);
+ goto err_ramdump;
+ }
+
rc = of_property_read_u32_array(pdev->dev.of_node,
"qcom,spss-scsr-bits", d->bits_arr, sizeof(d->bits_arr)/
sizeof(d->bits_arr[0]));
- if (rc)
+ if (rc) {
dev_err(&pdev->dev, "Failed to read qcom,spss-scsr-bits");
+ goto err_ramdump;
+ }
mask_scsr_irqs(d);
} else {
@@ -1120,6 +1172,7 @@ err_subsys:
destroy_ramdump_device(d->ramdump_dev);
err_ramdump:
pil_desc_release(&d->desc);
+ platform_set_drvdata(pdev, NULL);
return rc;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8b9c2a38d1cc..1f4a1f02a2cd 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -704,6 +704,18 @@ config SPI_TLE62X0
endif # SPI_MASTER
-# (slave support would go here)
+#
+# SLAVE side ... listening to other SPI masters
+#
+
+config SPI_SLAVE
+ bool "SPI slave protocol handlers"
+ help
+ If your system has a slave-capable SPI controller, you can enable
+ slave protocol handlers.
+
+if SPI_SLAVE
+
+endif # SPI_SLAVE
endif # SPI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index a7dade652e1b..9e7f8f97c908 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -95,3 +95,5 @@ obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o
obj-$(CONFIG_SPI_QSD) += spi_qsd.o
obj-$(CONFIG_SPI_QUP) += spi_qsd.o
+
+# SPI slave protocol handlers
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index dee1cb87d24f..f273de948a78 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1421,39 +1421,11 @@ err_init_queue:
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_OF)
-static struct spi_device *
-of_register_spi_device(struct spi_master *master, struct device_node *nc)
+static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
+ struct device_node *nc)
{
- struct spi_device *spi;
- int rc;
u32 value;
-
- /* Alloc an spi_device */
- spi = spi_alloc_device(master);
- if (!spi) {
- dev_err(&master->dev, "spi_device alloc error for %s\n",
- nc->full_name);
- rc = -ENOMEM;
- goto err_out;
- }
-
- /* Select device driver */
- rc = of_modalias_node(nc, spi->modalias,
- sizeof(spi->modalias));
- if (rc < 0) {
- dev_err(&master->dev, "cannot find modalias for %s\n",
- nc->full_name);
- goto err_out;
- }
-
- /* Device address */
- rc = of_property_read_u32(nc, "reg", &value);
- if (rc) {
- dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
- nc->full_name, rc);
- goto err_out;
- }
- spi->chip_select = value;
+ int rc;
/* Mode (clock phase/polarity/etc.) */
if (of_find_property(nc, "spi-cpha", NULL))
@@ -1504,15 +1476,64 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
}
}
+ if (spi_controller_is_slave(master)) {
+ if (strcmp(nc->name, "slave")) {
+ dev_err(&master->dev, "%s is not called 'slave'\n",
+ nc->full_name);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ /* Device address */
+ rc = of_property_read_u32(nc, "reg", &value);
+ if (rc) {
+ dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
+ nc->full_name, rc);
+ return rc;
+ }
+ spi->chip_select = value;
+
/* Device speed */
rc = of_property_read_u32(nc, "spi-max-frequency", &value);
if (rc) {
dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
nc->full_name, rc);
- goto err_out;
+ return rc;
}
spi->max_speed_hz = value;
+ return 0;
+}
+
+static struct spi_device *
+of_register_spi_device(struct spi_master *master, struct device_node *nc)
+{
+ struct spi_device *spi;
+ int rc;
+
+ /* Alloc an spi_device */
+ spi = spi_alloc_device(master);
+ if (!spi) {
+ dev_err(&master->dev, "spi_device alloc error for %s\n",
+ nc->full_name);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ /* Select device driver */
+ rc = of_modalias_node(nc, spi->modalias,
+ sizeof(spi->modalias));
+ if (rc < 0) {
+ dev_err(&master->dev, "cannot find modalias for %s\n",
+ nc->full_name);
+ goto err_out;
+ }
+
+ rc = of_spi_parse_dt(master, spi, nc);
+ if (rc)
+ goto err_out;
+
/* Store a pointer to the node in the device structure */
of_node_get(nc);
spi->dev.of_node = nc;
@@ -1536,8 +1557,8 @@ err_out:
* of_register_spi_devices() - Register child devices onto the SPI bus
* @master: Pointer to spi_master device
*
- * Registers an spi_device for each child node of master node which has a 'reg'
- * property.
+ * Registers an spi_device for each child node of controller node which
+ * represents a valid SPI slave.
*/
static void of_register_spi_devices(struct spi_master *master)
{
@@ -1669,28 +1690,129 @@ static struct class spi_master_class = {
.dev_groups = spi_master_groups,
};
+#ifdef CONFIG_SPI_SLAVE
+/**
+ * spi_slave_abort - abort the ongoing transfer request on an SPI slave
+ * controller
+ * @spi: device used for the current transfer
+ */
+int spi_slave_abort(struct spi_device *spi)
+{
+ struct spi_master *master = spi->master;
+
+ if (spi_controller_is_slave(master) && master->slave_abort)
+ return master->slave_abort(master);
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(spi_slave_abort);
+
+static int match_true(struct device *dev, void *data)
+{
+ return 1;
+}
+
+static ssize_t spi_slave_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_master *ctlr = container_of(dev, struct spi_master, dev);
+ struct device *child;
+
+ child = device_find_child(&ctlr->dev, NULL, match_true);
+ return sprintf(buf, "%s\n",
+ child ? to_spi_device(child)->modalias : NULL);
+}
+
+static ssize_t spi_slave_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct spi_master *ctlr = container_of(dev, struct spi_master, dev);
+ struct spi_device *spi;
+ struct device *child;
+ char name[32];
+ int rc;
+
+ rc = sscanf(buf, "%31s", name);
+ if (rc != 1 || !name[0])
+ return -EINVAL;
+
+ child = device_find_child(&ctlr->dev, NULL, match_true);
+ if (child) {
+ /* Remove registered slave */
+ device_unregister(child);
+ put_device(child);
+ }
+
+ if (strcmp(name, "(null)")) {
+ /* Register new slave */
+ spi = spi_alloc_device(ctlr);
+ if (!spi)
+ return -ENOMEM;
+
+ strlcpy(spi->modalias, name, sizeof(spi->modalias));
+
+ rc = spi_add_device(spi);
+ if (rc) {
+ spi_dev_put(spi);
+ return rc;
+ }
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
+
+static struct attribute *spi_slave_attrs[] = {
+ &dev_attr_slave.attr,
+ NULL,
+};
+
+static const struct attribute_group spi_slave_group = {
+ .attrs = spi_slave_attrs,
+};
+
+static const struct attribute_group *spi_slave_groups[] = {
+ &spi_master_statistics_group,
+ &spi_slave_group,
+ NULL,
+};
+
+static struct class spi_slave_class = {
+ .name = "spi_slave",
+ .owner = THIS_MODULE,
+ .dev_release = spi_master_release,
+ .dev_groups = spi_slave_groups,
+};
+#else
+extern struct class spi_slave_class; /* dummy */
+#endif
/**
- * spi_alloc_master - allocate SPI master controller
+ * __spi_alloc_controller - allocate an SPI master or slave controller
* @dev: the controller, possibly using the platform_bus
* @size: how much zeroed driver-private data to allocate; the pointer to this
* memory is in the driver_data field of the returned device,
* accessible with spi_master_get_devdata().
+ * @slave: flag indicating whether to allocate an SPI master (false) or SPI
+ * slave (true) controller
* Context: can sleep
*
- * This call is used only by SPI master controller drivers, which are the
+ * This call is used only by SPI controller drivers, which are the
* only ones directly touching chip registers. It's how they allocate
* an spi_master structure, prior to calling spi_register_master().
*
* This must be called from context that can sleep.
*
- * The caller is responsible for assigning the bus number and initializing
- * the master's methods before calling spi_register_master(); and (after errors
+ * The caller is responsible for assigning the bus number and initializing the
+ * controller's methods before calling spi_register_master(); and (after errors
* adding the device) calling spi_master_put() to prevent a memory leak.
*
- * Return: the SPI master structure on success, else NULL.
+ * Return: the SPI controller structure on success, else NULL.
*/
-struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
+struct spi_master *__spi_alloc_controller(struct device *dev,
+ unsigned int size, bool slave)
{
struct spi_master *master;
@@ -1704,13 +1826,17 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
device_initialize(&master->dev);
master->bus_num = -1;
master->num_chipselect = 1;
- master->dev.class = &spi_master_class;
+ master->slave = slave;
+ if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
+ master->dev.class = &spi_slave_class;
+ else
+ master->dev.class = &spi_master_class;
master->dev.parent = dev;
spi_master_set_devdata(master, &master[1]);
return master;
}
-EXPORT_SYMBOL_GPL(spi_alloc_master);
+EXPORT_SYMBOL_GPL(__spi_alloc_controller);
#ifdef CONFIG_OF
static int of_spi_register_master(struct spi_master *master)
@@ -1786,9 +1912,11 @@ int spi_register_master(struct spi_master *master)
if (!dev)
return -ENODEV;
- status = of_spi_register_master(master);
- if (status)
- return status;
+ if (!spi_controller_is_slave(master)) {
+ status = of_spi_register_master(master);
+ if (status)
+ return status;
+ }
/* even if it's just one always-selected device, there must
* be at least one chipselect
@@ -1824,8 +1952,9 @@ int spi_register_master(struct spi_master *master)
status = device_add(&master->dev);
if (status < 0)
goto done;
- dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
- dynamic ? " (dynamic)" : "");
+ dev_dbg(dev, "registered %s %s%s\n",
+ spi_controller_is_slave(master) ? "slave" : "master",
+ dev_name(&master->dev), dynamic ? " (dynamic)" : "");
/* If we're using a queued driver, start the queue */
if (master->transfer)
@@ -2613,6 +2742,9 @@ static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
dev = class_find_device(&spi_master_class, NULL, node,
__spi_of_master_match);
+ if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
+ dev = class_find_device(&spi_slave_class, NULL, node,
+ __spi_of_master_match);
if (!dev)
return NULL;
@@ -2685,11 +2817,19 @@ static int __init spi_init(void)
if (status < 0)
goto err2;
+ if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
+ status = class_register(&spi_slave_class);
+ if (status < 0)
+ goto err3;
+ }
+
if (IS_ENABLED(CONFIG_OF_DYNAMIC))
WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
return 0;
+err3:
+ class_unregister(&spi_master_class);
err2:
bus_unregister(&spi_bus_type);
err1:
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 6c88fb021444..4eeb82cf79e4 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -44,10 +44,8 @@ void iscsit_set_dataout_sequence_values(
*/
if (cmd->unsolicited_data) {
cmd->seq_start_offset = cmd->write_data_done;
- cmd->seq_end_offset = (cmd->write_data_done +
- ((cmd->se_cmd.data_length >
- conn->sess->sess_ops->FirstBurstLength) ?
- conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length));
+ cmd->seq_end_offset = min(cmd->se_cmd.data_length,
+ conn->sess->sess_ops->FirstBurstLength);
return;
}
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index b2f830d6a654..1e332855b933 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -300,7 +300,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
return 0;
err_tty_register_device_failed:
- free_irq(irq, qtty);
+ free_irq(irq, pdev);
err_request_irq_failed:
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 416006a3384c..c2c9b9361d64 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -218,7 +218,7 @@ struct msm_hs_wakeup {
};
struct msm_hs_port {
- bool startup_locked;
+ atomic_t startup_locked;
struct uart_port uport;
unsigned long imr_reg; /* shadow value of UARTDM_IMR */
struct clk *clk;
@@ -649,7 +649,6 @@ static int msm_serial_loopback_enable_set(void *data, u64 val)
unsigned long flags;
int ret = 0;
- msm_uport->startup_locked = true;
msm_hs_resource_vote(msm_uport);
if (val) {
@@ -669,7 +668,6 @@ static int msm_serial_loopback_enable_set(void *data, u64 val)
}
/* Calling CLOCK API. Hence mb() requires here. */
mb();
- msm_uport->startup_locked = false;
msm_hs_resource_unvote(msm_uport);
return 0;
}
@@ -681,13 +679,11 @@ static int msm_serial_loopback_enable_get(void *data, u64 *val)
unsigned long flags;
int ret = 0;
- msm_uport->startup_locked = true;
msm_hs_resource_vote(msm_uport);
spin_lock_irqsave(&uport->lock, flags);
ret = msm_hs_read(&msm_uport->uport, UART_DM_MR2);
spin_unlock_irqrestore(&uport->lock, flags);
- msm_uport->startup_locked = false;
msm_hs_resource_unvote(msm_uport);
@@ -1372,12 +1368,9 @@ static void msm_hs_stop_rx_locked(struct uart_port *uport)
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
- MSM_HS_WARN("%s(): Clocks are off\n", __func__);
- /* Make sure resource_on doesn't get called */
- if (msm_hs_clk_bus_vote(msm_uport))
- MSM_HS_ERR("%s:Failed clock vote\n", __func__);
- msm_hs_disable_rx(uport);
- msm_hs_clk_bus_unvote(msm_uport);
+ MSM_HS_WARN("%s(): Clocks are off, Rx still active\n",
+ __func__);
+ return;
} else
msm_hs_disable_rx(uport);
@@ -1421,7 +1414,7 @@ void tx_timeout_handler(unsigned long arg)
if (UARTDM_ISR_CURRENT_CTS_BMSK & isr)
MSM_HS_WARN("%s(): CTS Disabled, ISR 0x%x", __func__, isr);
dump_uart_hs_registers(msm_uport);
- /* Stop further loging */
+ /* Stop further logging */
MSM_HS_ERR("%s(): Stop IPC logging\n", __func__);
}
@@ -1868,12 +1861,6 @@ static void msm_hs_start_tx_locked(struct uart_port *uport)
struct msm_hs_tx *tx = &msm_uport->tx;
unsigned int isr;
- if (msm_uport->startup_locked) {
- MSM_HS_DBG("%s(): No Tx Request, startup_locked=%d\n",
- __func__, msm_uport->startup_locked);
- return;
- }
-
/* Bail if transfer in progress */
if (tx->flush < FLUSH_STOP || tx->dma_in_flight) {
MSM_HS_INFO("%s(): retry, flush %d, dma_in_flight %d\n",
@@ -1881,9 +1868,12 @@ static void msm_hs_start_tx_locked(struct uart_port *uport)
if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) {
isr = msm_hs_read(uport, UART_DM_ISR);
- if (UARTDM_ISR_CURRENT_CTS_BMSK & isr)
- MSM_HS_DBG("%s():CTS 1: Peer is Busy, ISR 0x%x",
- __func__, isr);
+ if (UARTDM_ISR_CURRENT_CTS_BMSK & isr) {
+ MSM_HS_DBG("%s():CTS 1: Peer is Busy\n",
+ __func__);
+ MSM_HS_DBG("%s():ISR 0x%x\n",
+ __func__, isr);
+ }
} else
MSM_HS_WARN("%s(): Clocks are off\n", __func__);
@@ -2364,11 +2354,11 @@ void msm_hs_resource_on(struct msm_hs_port *msm_uport)
unsigned int data;
unsigned long flags;
- if (msm_uport->startup_locked) {
- MSM_HS_WARN("%s(): startup_locked=%d\n",
- __func__, msm_uport->startup_locked);
+ if (atomic_read(&msm_uport->startup_locked)) {
+ MSM_HS_DBG("%s(): Port open in progress\n", __func__);
return;
}
+ msm_hs_disable_flow_control(uport, false);
if (msm_uport->rx.flush == FLUSH_SHUTDOWN ||
msm_uport->rx.flush == FLUSH_STOP) {
@@ -2387,6 +2377,8 @@ void msm_hs_resource_on(struct msm_hs_port *msm_uport)
spin_unlock_irqrestore(&uport->lock, flags);
}
msm_hs_spsconnect_tx(msm_uport);
+
+ msm_hs_enable_flow_control(uport, false);
}
/* Request to turn off uart clock once pending TX is flushed */
@@ -2679,7 +2671,7 @@ static int msm_hs_startup(struct uart_port *uport)
struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
- msm_uport->startup_locked = true;
+ atomic_set(&msm_uport->startup_locked, 1);
rfr_level = uport->fifosize;
if (rfr_level > 16)
rfr_level -= 16;
@@ -2809,7 +2801,7 @@ static int msm_hs_startup(struct uart_port *uport)
atomic_set(&msm_uport->client_req_state, 0);
LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
"%s: Client_Count 0\n", __func__);
- msm_uport->startup_locked = false;
+ atomic_set(&msm_uport->startup_locked, 0);
msm_hs_start_rx_locked(uport);
spin_unlock_irqrestore(&uport->lock, flags);
@@ -2826,6 +2818,7 @@ unconfig_uart_gpios:
free_uart_irq:
free_irq(uport->irq, msm_uport);
unvote_exit:
+ atomic_set(&msm_uport->startup_locked, 0);
msm_hs_resource_unvote(msm_uport);
MSM_HS_ERR("%s(): Error return\n", __func__);
return ret;
@@ -3238,8 +3231,6 @@ static void msm_hs_pm_suspend(struct device *dev)
msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
msm_hs_resource_off(msm_uport);
obs_manage_irq(msm_uport, false);
- if (!atomic_read(&msm_uport->client_req_state))
- enable_wakeup_interrupt(msm_uport);
msm_hs_clk_bus_unvote(msm_uport);
/* For OBS, don't use wakeup interrupt, set gpio to suspended state */
@@ -3251,6 +3242,8 @@ static void msm_hs_pm_suspend(struct device *dev)
__func__);
}
+ if (!atomic_read(&msm_uport->client_req_state))
+ enable_wakeup_interrupt(msm_uport);
LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
"%s: PM State Suspended client_count %d\n", __func__,
client_count);
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 4e603d060e80..59828d819145 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -398,6 +398,12 @@ static struct uart_driver sunhv_reg = {
static struct uart_port *sunhv_port;
+void sunhv_migrate_hvcons_irq(int cpu)
+{
+ /* Migrate hvcons irq to param cpu */
+ irq_force_affinity(sunhv_port->irq, cpumask_of(cpu));
+}
+
/* Copy 's' into the con_write_page, decoding "\n" into
* "\r\n" along the way. We have to return two lengths
* because the caller needs to know how much to advance
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index df96f5f88c15..3f6bb3fff890 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1762,6 +1762,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
+ { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
+ .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
+ },
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
.driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index dd5c038c71fd..e8846c91ca71 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -926,10 +926,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
for (i = 0; i < num; i++) {
buffer += length;
cap = (struct usb_dev_cap_header *)buffer;
- length = cap->bLength;
- if (total_len < length)
+ if (total_len < sizeof(*cap) || total_len < cap->bLength) {
+ dev->bos->desc->bNumDeviceCaps = i;
break;
+ }
+ length = cap->bLength;
total_len -= length;
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index bd9419213d06..873ba02d59e6 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1417,11 +1417,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
totlen += isopkt[u].length;
}
u *= sizeof(struct usb_iso_packet_descriptor);
- if (totlen <= uurb->buffer_length)
- uurb->buffer_length = totlen;
- else
- WARN_ONCE(1, "uurb->buffer_length is too short %d vs %d",
- totlen, uurb->buffer_length);
+ uurb->buffer_length = totlen;
break;
default:
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1f685ea17d7f..c08a524f3dab 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2667,13 +2667,16 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (!(portstatus & USB_PORT_STAT_CONNECTION))
return -ENOTCONN;
- /* bomb out completely if the connection bounced. A USB 3.0
- * connection may bounce if multiple warm resets were issued,
+ /* Retry if connect change is set but status is still connected.
+ * A USB 3.0 connection may bounce if multiple warm resets were issued,
* but the device may have successfully re-connected. Ignore it.
*/
if (!hub_is_superspeed(hub->hdev) &&
- (portchange & USB_PORT_STAT_C_CONNECTION))
- return -ENOTCONN;
+ (portchange & USB_PORT_STAT_C_CONNECTION)) {
+ usb_clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_CONNECTION);
+ return -EAGAIN;
+ }
if (!(portstatus & USB_PORT_STAT_ENABLE))
return -EBUSY;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 82806e311202..a6aaf2f193a4 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -221,6 +221,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Corsair Strafe RGB */
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* MIDI keyboard WORLDE MINI */
+ { USB_DEVICE(0x1c75, 0x0204), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* Acer C120 LED Projector */
{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index d92a33097461..26422a659bfc 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -224,6 +224,7 @@ struct dwc3_msm {
struct notifier_block id_nb;
struct notifier_block host_nb;
+ bool host_only_mode;
int pwr_event_irq;
atomic_t in_p3;
@@ -3224,6 +3225,7 @@ static int dwc3_msm_probe(struct platform_device *pdev)
if (host_mode ||
(dwc->is_drd && !of_property_read_bool(node, "extcon"))) {
dev_dbg(&pdev->dev, "DWC3 in default host mode\n");
+ mdwc->host_only_mode = true;
mdwc->id_state = DWC3_ID_GROUND;
dwc3_ext_event_notify(mdwc);
}
@@ -3598,7 +3600,9 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
mdwc->in_host_mode = false;
/* re-init core and OTG registers as block reset clears these */
- dwc3_post_host_reset_core_init(dwc);
+ if (!mdwc->host_only_mode)
+ dwc3_post_host_reset_core_init(dwc);
+
pm_runtime_mark_last_busy(mdwc->dev);
pm_runtime_put_sync_autosuspend(mdwc->dev);
dbg_event(0xFF, "StopHost psync",
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 255a11f595c4..d9fb5d411d1d 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -87,6 +87,7 @@ struct gadget_info {
struct usb_composite_driver composite;
struct usb_composite_dev cdev;
bool use_os_desc;
+ bool unbinding;
char b_vendor_code;
char qw_sign[OS_STRING_QW_SIGN_LEN];
#ifdef CONFIG_USB_CONFIGFS_UEVENT
@@ -283,9 +284,11 @@ static int unregister_gadget(struct gadget_info *gi)
if (!gi->udc_name)
return -ENODEV;
+ gi->unbinding = true;
ret = usb_gadget_unregister_driver(&gi->composite.gadget_driver);
if (ret)
return ret;
+ gi->unbinding = false;
kfree(gi->udc_name);
gi->udc_name = NULL;
return 0;
@@ -1561,7 +1564,8 @@ static void android_disconnect(struct usb_gadget *gadget)
acc_disconnect();
#endif
gi->connected = 0;
- schedule_work(&gi->work);
+ if (!gi->unbinding)
+ schedule_work(&gi->work);
composite_disconnect(gadget);
}
#endif
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 00eed5d66fda..06d83825923a 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -877,7 +877,7 @@ b_host:
*/
if (int_usb & MUSB_INTR_RESET) {
handled = IRQ_HANDLED;
- if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_active(musb)) {
/*
* When BABBLE happens what we can depends on which
* platform MUSB is running, because some platforms
@@ -887,9 +887,7 @@ b_host:
* drop the session.
*/
dev_err(musb->controller, "Babble\n");
-
- if (is_host_active(musb))
- musb_recover_from_babble(musb);
+ musb_recover_from_babble(musb);
} else {
dev_dbg(musb->controller, "BUS RESET as %s\n",
usb_otg_state_string(musb->xceiv->otg->state));
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index d9b0dc461439..2d3be66fb563 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -320,6 +320,8 @@ static int sunxi_musb_exit(struct musb *musb)
if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
sunxi_sram_release(musb->controller->parent);
+ devm_usb_put_phy(glue->dev, glue->xceiv);
+
return 0;
}
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 39e683096e94..45182c65fa1f 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -45,6 +45,7 @@ struct metrousb_private {
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0c2e, 0x0730, 0xff) }, /* MS7820 */
{ }, /* Terminating entry. */
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index fca1d37b40bb..88f6b9040651 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -1488,11 +1488,15 @@ static int mdss_dsi_wait4video_eng_busy(struct mdss_dsi_ctrl_pdata *ctrl)
{
int ret = 0;
u32 v_total = 0, v_blank = 0, sleep_ms = 0, fps = 0;
- struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info;
+ struct mdss_panel_info *pinfo;
- if (ctrl->panel_mode == DSI_CMD_MODE)
+ /* for dsi 2.1 and above dma scheduling is used */
+ if ((!ctrl) || (ctrl->panel_mode == DSI_CMD_MODE) ||
+ (ctrl->shared_data->hw_rev > MDSS_DSI_HW_REV_200))
return ret;
+ pinfo = &ctrl->panel_data.panel_info;
+
if (ctrl->ctrl_state & CTRL_STATE_MDP_ACTIVE) {
mdss_dsi_wait4video_done(ctrl);
v_total = mdss_panel_get_vtotal(pinfo);
@@ -1512,12 +1516,39 @@ static int mdss_dsi_wait4video_eng_busy(struct mdss_dsi_ctrl_pdata *ctrl)
return ret;
}
+static void mdss_dsi_schedule_dma_cmd(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 v_blank, val = 0x0;
+ struct mdss_panel_info *pinfo;
+
+ /* for dsi 2.0 and below dma scheduling is not supported */
+ if ((!ctrl) || (ctrl->panel_mode == DSI_CMD_MODE) ||
+ (ctrl->shared_data->hw_rev < MDSS_DSI_HW_REV_201))
+ return;
+
+ pinfo = &ctrl->panel_data.panel_info;
+ v_blank = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
+
+ /* DMA_SCHEDULE_CTRL */
+ val = MIPI_INP(ctrl->ctrl_io.base + 0x100);
+ val = val | (1 << 28); /* DMA_SCHEDULE_EN */
+ MIPI_OUTP(ctrl->ctrl_io.base + 0x100, val);
+ val |= (pinfo->yres + v_blank);
+ MIPI_OUTP(ctrl->ctrl_io.base + 0x100, val); /* DMA_SCHEDULE_LINE */
+ wmb();
+
+ pr_debug("%s schedule at line %x", __func__, val);
+ MDSS_XLOG(ctrl->ndx, val);
+}
+
static void mdss_dsi_wait4active_region(struct mdss_dsi_ctrl_pdata *ctrl)
{
int in_blanking = 0;
int retry_count = 0;
- if (ctrl->panel_mode != DSI_VIDEO_MODE)
+ /* for dsi 2.1 and above dma scheduling is used */
+ if ((!ctrl) || (ctrl->panel_mode != DSI_VIDEO_MODE) ||
+ (ctrl->shared_data->hw_rev > MDSS_DSI_HW_REV_200))
return;
while (retry_count != MAX_BTA_WAIT_RETRY) {
@@ -2204,6 +2235,10 @@ static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
MIPI_OUTP((ctrl->ctrl_base) + 0x04c, len);
wmb();
+ /* schedule dma cmds at start of blanking region */
+ mdss_dsi_schedule_dma_cmd(ctrl);
+
+ /* DSI_CMD_MODE_DMA_SW_TRIGGER */
MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01);
wmb();
MDSS_XLOG(ctrl->dma_addr, len);
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index 5bf931ce1353..978098f71761 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -140,12 +140,19 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data,
unsigned int timeout)
{
struct kempld_device_data *pld = wdt_data->pld;
- u32 prescaler = kempld_prescaler[PRESCALER_21];
+ u32 prescaler;
u64 stage_timeout64;
u32 stage_timeout;
u32 remainder;
u8 stage_cfg;
+#if GCC_VERSION < 40400
+ /* work around a bug compiling do_div() */
+ prescaler = READ_ONCE(kempld_prescaler[PRESCALER_21]);
+#else
+ prescaler = kempld_prescaler[PRESCALER_21];
+#endif
+
if (!stage)
return -EINVAL;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index d3c296d4eb25..43b80ca84d9c 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -558,6 +558,8 @@ static void bdev_evict_inode(struct inode *inode)
}
list_del_init(&bdev->bd_list);
spin_unlock(&bdev_lock);
+ /* Detach inode from wb early as bdi_put() may free bdi->wb */
+ inode_detach_wb(inode);
if (bdev->bd_bdi != &noop_backing_dev_info) {
bdi_put(bdev->bd_bdi);
bdev->bd_bdi = &noop_backing_dev_info;
@@ -1221,8 +1223,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_disk = disk;
bdev->bd_queue = disk->queue;
bdev->bd_contains = bdev;
- if (bdev->bd_bdi == &noop_backing_dev_info)
- bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
bdev->bd_inode->i_flags = disk->fops->direct_access ? S_DAX : 0;
if (!partno) {
@@ -1294,6 +1294,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
(bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
bdev->bd_inode->i_flags &= ~S_DAX;
}
+
+ if (bdev->bd_bdi == &noop_backing_dev_info)
+ bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
} else {
if (bdev->bd_contains == bdev) {
ret = 0;
@@ -1325,8 +1328,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_disk = NULL;
bdev->bd_part = NULL;
bdev->bd_queue = NULL;
- bdi_put(bdev->bd_bdi);
- bdev->bd_bdi = &noop_backing_dev_info;
if (bdev != bdev->bd_contains)
__blkdev_put(bdev->bd_contains, mode, 1);
bdev->bd_contains = NULL;
@@ -1548,12 +1549,6 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
kill_bdev(bdev);
bdev_write_inode(bdev);
- /*
- * Detaching bdev inode from its wb in __destroy_inode()
- * is too late: the queue which embeds its bdi (along with
- * root wb) can be gone as soon as we put_disk() below.
- */
- inode_detach_wb(bdev->bd_inode);
}
if (bdev->bd_contains == bdev) {
if (disk->fops->release)
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 63a6152be04b..c5bbb5300658 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1648,6 +1648,9 @@ static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
{
int ret;
+ if (ino == BTRFS_FIRST_FREE_OBJECTID)
+ return 1;
+
ret = get_cur_inode_state(sctx, ino, gen);
if (ret < 0)
goto out;
@@ -1833,7 +1836,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
* not delted and then re-created, if it was then we have no overwrite
* and we can just unlink this entry.
*/
- if (sctx->parent_root) {
+ if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
NULL, NULL, NULL);
if (ret < 0 && ret != -ENOENT)
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index f54f77037d22..ead89489ae71 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1845,13 +1845,18 @@ static int build_dentry_path(struct dentry *dentry,
int *pfreepath)
{
char *path;
+ struct inode *dir;
- if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) {
- *pino = ceph_ino(d_inode(dentry->d_parent));
+ rcu_read_lock();
+ dir = d_inode_rcu(dentry->d_parent);
+ if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
+ *pino = ceph_ino(dir);
+ rcu_read_unlock();
*ppath = dentry->d_name.name;
*ppathlen = dentry->d_name.len;
return 0;
}
+ rcu_read_unlock();
path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
if (IS_ERR(path))
return PTR_ERR(path);
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
index 363e4c6bf37f..15ebac242288 100644
--- a/fs/ext4/crypto_key.c
+++ b/fs/ext4/crypto_key.c
@@ -313,6 +313,12 @@ retry:
}
down_read(&keyring_key->sem);
ukp = user_key_payload(keyring_key);
+ if (!ukp) {
+ /* key was revoked before we acquired its semaphore */
+ res = -EKEYREVOKED;
+ up_read(&keyring_key->sem);
+ goto out;
+ }
if (ukp->datalen != sizeof(struct ext4_encryption_key)) {
res = -EINVAL;
up_read(&keyring_key->sem);
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index 6b028b7c4250..926580a85153 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -330,6 +330,13 @@ static void fscache_objlist_config(struct fscache_objlist_data *data)
rcu_read_lock();
confkey = user_key_payload(key);
+ if (!confkey) {
+ /* key was revoked */
+ rcu_read_unlock();
+ key_put(key);
+ goto no_config;
+ }
+
buf = confkey->data;
for (len = confkey->datalen - 1; len >= 0; len--) {
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 15bdc2d48cfe..24ace275160c 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -696,6 +696,14 @@ int set_callback_cred(void)
return 0;
}
+void cleanup_callback_cred(void)
+{
+ if (callback_cred) {
+ put_rpccred(callback_cred);
+ callback_cred = NULL;
+ }
+}
+
static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
{
if (clp->cl_minorversion == 0) {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 9e5a6842346e..ca9ebc3242d3 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -6792,23 +6792,24 @@ nfs4_state_start(void)
ret = set_callback_cred();
if (ret)
- return -ENOMEM;
+ return ret;
+
laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
if (laundry_wq == NULL) {
ret = -ENOMEM;
- goto out_recovery;
+ goto out_cleanup_cred;
}
ret = nfsd4_create_callback_queue();
if (ret)
goto out_free_laundry;
set_max_delegations();
-
return 0;
out_free_laundry:
destroy_workqueue(laundry_wq);
-out_recovery:
+out_cleanup_cred:
+ cleanup_callback_cred();
return ret;
}
@@ -6847,6 +6848,7 @@ nfs4_state_shutdown(void)
{
destroy_workqueue(laundry_wq);
nfsd4_destroy_callback_queue();
+ cleanup_callback_cred();
}
static void
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 5134eedcb16c..86af697c21d3 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -595,6 +595,7 @@ extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(const char *recdir,
extern __be32 nfs4_check_open_reclaim(clientid_t *clid,
struct nfsd4_compound_state *cstate, struct nfsd_net *nn);
extern int set_callback_cred(void);
+extern void cleanup_callback_cred(void);
extern void nfsd4_probe_callback(struct nfs4_client *clp);
extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 60a5f1548cd9..555b57a16499 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -531,6 +531,7 @@ void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
init_waitqueue_head(&res->l_event);
INIT_LIST_HEAD(&res->l_blocked_list);
INIT_LIST_HEAD(&res->l_mask_waiters);
+ INIT_LIST_HEAD(&res->l_holders);
}
void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
@@ -748,6 +749,50 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
res->l_flags = 0UL;
}
+/*
+ * Keep a list of processes who have interest in a lockres.
+ * Note: this is now only uesed for check recursive cluster locking.
+ */
+static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
+ struct ocfs2_lock_holder *oh)
+{
+ INIT_LIST_HEAD(&oh->oh_list);
+ oh->oh_owner_pid = get_pid(task_pid(current));
+
+ spin_lock(&lockres->l_lock);
+ list_add_tail(&oh->oh_list, &lockres->l_holders);
+ spin_unlock(&lockres->l_lock);
+}
+
+static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
+ struct ocfs2_lock_holder *oh)
+{
+ spin_lock(&lockres->l_lock);
+ list_del(&oh->oh_list);
+ spin_unlock(&lockres->l_lock);
+
+ put_pid(oh->oh_owner_pid);
+}
+
+static inline int ocfs2_is_locked_by_me(struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_lock_holder *oh;
+ struct pid *pid;
+
+ /* look in the list of holders for one with the current task as owner */
+ spin_lock(&lockres->l_lock);
+ pid = task_pid(current);
+ list_for_each_entry(oh, &lockres->l_holders, oh_list) {
+ if (oh->oh_owner_pid == pid) {
+ spin_unlock(&lockres->l_lock);
+ return 1;
+ }
+ }
+ spin_unlock(&lockres->l_lock);
+
+ return 0;
+}
+
static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
int level)
{
@@ -2343,8 +2388,9 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
goto getbh;
}
- if (ocfs2_mount_local(osb))
- goto local;
+ if ((arg_flags & OCFS2_META_LOCK_GETBH) ||
+ ocfs2_mount_local(osb))
+ goto update;
if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
ocfs2_wait_for_recovery(osb);
@@ -2373,7 +2419,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
ocfs2_wait_for_recovery(osb);
-local:
+update:
/*
* We only see this flag if we're being called from
* ocfs2_read_locked_inode(). It means we're locking an inode
@@ -2515,6 +2561,59 @@ void ocfs2_inode_unlock(struct inode *inode,
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
}
+/*
+ * This _tracker variantes are introduced to deal with the recursive cluster
+ * locking issue. The idea is to keep track of a lock holder on the stack of
+ * the current process. If there's a lock holder on the stack, we know the
+ * task context is already protected by cluster locking. Currently, they're
+ * used in some VFS entry routines.
+ *
+ * return < 0 on error, return == 0 if there's no lock holder on the stack
+ * before this call, return == 1 if this call would be a recursive locking.
+ */
+int ocfs2_inode_lock_tracker(struct inode *inode,
+ struct buffer_head **ret_bh,
+ int ex,
+ struct ocfs2_lock_holder *oh)
+{
+ int status;
+ int arg_flags = 0, has_locked;
+ struct ocfs2_lock_res *lockres;
+
+ lockres = &OCFS2_I(inode)->ip_inode_lockres;
+ has_locked = ocfs2_is_locked_by_me(lockres);
+ /* Just get buffer head if the cluster lock has been taken */
+ if (has_locked)
+ arg_flags = OCFS2_META_LOCK_GETBH;
+
+ if (likely(!has_locked || ret_bh)) {
+ status = ocfs2_inode_lock_full(inode, ret_bh, ex, arg_flags);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ return status;
+ }
+ }
+ if (!has_locked)
+ ocfs2_add_holder(lockres, oh);
+
+ return has_locked;
+}
+
+void ocfs2_inode_unlock_tracker(struct inode *inode,
+ int ex,
+ struct ocfs2_lock_holder *oh,
+ int had_lock)
+{
+ struct ocfs2_lock_res *lockres;
+
+ lockres = &OCFS2_I(inode)->ip_inode_lockres;
+ if (!had_lock) {
+ ocfs2_remove_holder(lockres, oh);
+ ocfs2_inode_unlock(inode, ex);
+ }
+}
+
int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
{
struct ocfs2_lock_res *lockres;
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index d293a22c32c5..a7fc18ba0dc1 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -70,6 +70,11 @@ struct ocfs2_orphan_scan_lvb {
__be32 lvb_os_seqno;
};
+struct ocfs2_lock_holder {
+ struct list_head oh_list;
+ struct pid *oh_owner_pid;
+};
+
/* ocfs2_inode_lock_full() 'arg_flags' flags */
/* don't wait on recovery. */
#define OCFS2_META_LOCK_RECOVERY (0x01)
@@ -77,6 +82,8 @@ struct ocfs2_orphan_scan_lvb {
#define OCFS2_META_LOCK_NOQUEUE (0x02)
/* don't block waiting for the downconvert thread, instead return -EAGAIN */
#define OCFS2_LOCK_NONBLOCK (0x04)
+/* just get back disk inode bh if we've got cluster lock. */
+#define OCFS2_META_LOCK_GETBH (0x08)
/* Locking subclasses of inode cluster lock */
enum {
@@ -170,4 +177,15 @@ void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug);
/* To set the locking protocol on module initialization */
void ocfs2_set_locking_protocol(void);
+
+/* The _tracker pair is used to avoid cluster recursive locking */
+int ocfs2_inode_lock_tracker(struct inode *inode,
+ struct buffer_head **ret_bh,
+ int ex,
+ struct ocfs2_lock_holder *oh);
+void ocfs2_inode_unlock_tracker(struct inode *inode,
+ int ex,
+ struct ocfs2_lock_holder *oh,
+ int had_lock);
+
#endif /* DLMGLUE_H */
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 7a0126267847..2495066a9ca3 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -172,6 +172,7 @@ struct ocfs2_lock_res {
struct list_head l_blocked_list;
struct list_head l_mask_waiters;
+ struct list_head l_holders;
unsigned long l_flags;
char l_name[OCFS2_LOCK_ID_MAX_LEN];
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 4d9f233c4ba8..7d58ffdacd62 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -105,15 +105,35 @@ do { \
(__ret); \
})
-#define this_cpu_generic_read(pcp) \
+#define __this_cpu_generic_read_nopreempt(pcp) \
({ \
typeof(pcp) __ret; \
preempt_disable(); \
- __ret = *this_cpu_ptr(&(pcp)); \
+ __ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
preempt_enable(); \
__ret; \
})
+#define __this_cpu_generic_read_noirq(pcp) \
+({ \
+ typeof(pcp) __ret; \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ __ret = *raw_cpu_ptr(&(pcp)); \
+ raw_local_irq_restore(__flags); \
+ __ret; \
+})
+
+#define this_cpu_generic_read(pcp) \
+({ \
+ typeof(pcp) __ret; \
+ if (__native_word(pcp)) \
+ __ret = __this_cpu_generic_read_nopreempt(pcp); \
+ else \
+ __ret = __this_cpu_generic_read_noirq(pcp); \
+ __ret; \
+})
+
#define this_cpu_generic_to_op(pcp, val, op) \
do { \
unsigned long __flags; \
diff --git a/include/linux/key.h b/include/linux/key.h
index dcc115e8dd03..af071ca73079 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -126,6 +126,11 @@ static inline bool is_key_possessed(const key_ref_t key_ref)
return (unsigned long) key_ref & 1UL;
}
+enum key_state {
+ KEY_IS_UNINSTANTIATED,
+ KEY_IS_POSITIVE, /* Positively instantiated */
+};
+
/*****************************************************************************/
/*
* authentication token / access credential / keyring
@@ -157,6 +162,7 @@ struct key {
* - may not match RCU dereferenced payload
* - payload should contain own length
*/
+ short state; /* Key state (+) or rejection error (-) */
#ifdef KEY_DEBUGGING
unsigned magic;
@@ -165,19 +171,17 @@ struct key {
#endif
unsigned long flags; /* status flags (change with bitops) */
-#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */
-#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */
-#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */
-#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */
-#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */
-#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
-#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
-#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
-#define KEY_FLAG_TRUSTED 8 /* set if key is trusted */
-#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */
-#define KEY_FLAG_BUILTIN 10 /* set if key is builtin */
-#define KEY_FLAG_ROOT_CAN_INVAL 11 /* set if key can be invalidated by root without permission */
-#define KEY_FLAG_UID_KEYRING 12 /* set if key is a user or user session keyring */
+#define KEY_FLAG_DEAD 0 /* set if key type has been deleted */
+#define KEY_FLAG_REVOKED 1 /* set if key had been revoked */
+#define KEY_FLAG_IN_QUOTA 2 /* set if key consumes quota */
+#define KEY_FLAG_USER_CONSTRUCT 3 /* set if key is being constructed in userspace */
+#define KEY_FLAG_ROOT_CAN_CLEAR 4 /* set if key can be cleared by root without permission */
+#define KEY_FLAG_INVALIDATED 5 /* set if key has been invalidated */
+#define KEY_FLAG_TRUSTED 6 /* set if key is trusted */
+#define KEY_FLAG_TRUSTED_ONLY 7 /* set if keyring only accepts links to trusted keys */
+#define KEY_FLAG_BUILTIN 8 /* set if key is builtin */
+#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */
+#define KEY_FLAG_UID_KEYRING 10 /* set if key is a user or user session keyring */
/* the key type and key description string
* - the desc is used to match a key against search criteria
@@ -203,7 +207,6 @@ struct key {
struct list_head name_link;
struct assoc_array keys;
};
- int reject_error;
};
};
@@ -319,17 +322,27 @@ extern void key_set_timeout(struct key *, unsigned);
#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */
#define KEY_NEED_ALL 0x3f /* All the above permissions */
+static inline short key_read_state(const struct key *key)
+{
+ /* Barrier versus mark_key_instantiated(). */
+ return smp_load_acquire(&key->state);
+}
+
/**
- * key_is_instantiated - Determine if a key has been positively instantiated
+ * key_is_positive - Determine if a key has been positively instantiated
* @key: The key to check.
*
* Return true if the specified key has been positively instantiated, false
* otherwise.
*/
-static inline bool key_is_instantiated(const struct key *key)
+static inline bool key_is_positive(const struct key *key)
+{
+ return key_read_state(key) == KEY_IS_POSITIVE;
+}
+
+static inline bool key_is_negative(const struct key *key)
{
- return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
- !test_bit(KEY_FLAG_NEGATIVE, &key->flags);
+ return key_read_state(key) < 0;
}
#define rcu_dereference_key(KEY) \
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 1f7bc630d225..71a5a56b0bba 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -29,8 +29,8 @@ struct mbus_dram_target_info
struct mbus_dram_window {
u8 cs_index;
u8 mbus_attr;
- u32 base;
- u32 size;
+ u64 base;
+ u64 size;
} cs[4];
};
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index d4b56351027b..2d79ec1496e5 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -330,7 +330,7 @@ void memblock_enforce_memory_limit(phys_addr_t memory_limit);
int memblock_is_memory(phys_addr_t addr);
int memblock_is_map_memory(phys_addr_t addr);
int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
-int memblock_overlaps_memory(phys_addr_t base, phys_addr_t size);
+bool memblock_overlaps_memory(phys_addr_t base, phys_addr_t size);
int memblock_is_reserved(phys_addr_t addr);
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 08b3b8348fd7..862d8d1bae8f 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -224,7 +224,7 @@ extern void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
bool lock_needed);
extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
bool lock_needed, bool is_cmdq_dcmd);
-extern void mmc_recovery_fallback_lower_speed(struct mmc_host *host);
+extern int mmc_recovery_fallback_lower_speed(struct mmc_host *host);
/**
* mmc_claim_host - exclusively claim a host
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7299540fe1ec..c95eb69e7326 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -227,9 +227,10 @@ extern void proc_sched_set_task(struct task_struct *p);
#define TASK_WAKING 256
#define TASK_PARKED 512
#define TASK_NOLOAD 1024
-#define TASK_STATE_MAX 2048
+#define TASK_NEW 2048
+#define TASK_STATE_MAX 4096
-#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
+#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -1061,12 +1062,13 @@ struct wake_q_node {
struct wake_q_head {
struct wake_q_node *first;
struct wake_q_node **lastp;
+ int count;
};
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
#define WAKE_Q(name) \
- struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+ struct wake_q_head name = { WAKE_Q_TAIL, &name.first, 0 }
extern void wake_q_add(struct wake_q_head *head,
struct task_struct *task);
@@ -1656,6 +1658,7 @@ struct task_struct {
struct related_thread_group *grp;
struct list_head grp_list;
u64 cpu_cycles;
+ u64 last_sleep_ts;
#endif
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index cce80e6dc7d1..01cf8b6ac61a 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -27,8 +27,8 @@ struct spi_master;
struct spi_transfer;
/*
- * INTERFACES between SPI master-side drivers and SPI infrastructure.
- * (There's no SPI slave support for Linux yet...)
+ * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
+ * and SPI infrastructure.
*/
extern struct bus_type spi_bus_type;
@@ -303,6 +303,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @min_speed_hz: Lowest supported transfer speed
* @max_speed_hz: Highest supported transfer speed
* @flags: other constraints relevant to this driver
+ * @slave: indicates that this is an SPI slave controller
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for SPI bus locking
* @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
@@ -361,6 +362,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @handle_err: the subsystem calls the driver to handle an error that occurs
* in the generic implementation of transfer_one_message().
* @unprepare_message: undo any work done by prepare_message().
+ * @slave_abort: abort the ongoing transfer request on an SPI slave controller
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself).
@@ -425,6 +427,9 @@ struct spi_master {
#define SPI_MASTER_MUST_RX BIT(3) /* requires rx */
#define SPI_MASTER_MUST_TX BIT(4) /* requires tx */
+ /* flag indicating this is an SPI slave controller */
+ bool slave;
+
/* lock and mutex for SPI bus locking */
spinlock_t bus_lock_spinlock;
struct mutex bus_lock_mutex;
@@ -507,6 +512,7 @@ struct spi_master {
struct spi_message *message);
int (*unprepare_message)(struct spi_master *master,
struct spi_message *message);
+ int (*slave_abort)(struct spi_master *spi);
/*
* These hooks are for drivers that use a generic implementation
@@ -556,6 +562,11 @@ static inline void spi_master_put(struct spi_master *master)
put_device(&master->dev);
}
+static inline bool spi_controller_is_slave(struct spi_master *ctlr)
+{
+ return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave;
+}
+
/* PM calls that need to be issued by the driver */
extern int spi_master_suspend(struct spi_master *master);
extern int spi_master_resume(struct spi_master *master);
@@ -566,8 +577,23 @@ extern void spi_finalize_current_message(struct spi_master *master);
extern void spi_finalize_current_transfer(struct spi_master *master);
/* the spi driver core manages memory for the spi_master classdev */
-extern struct spi_master *
-spi_alloc_master(struct device *host, unsigned size);
+extern struct spi_master *__spi_alloc_controller(struct device *host,
+ unsigned int size, bool slave);
+
+static inline struct spi_master *spi_alloc_master(struct device *host,
+ unsigned int size)
+{
+ return __spi_alloc_controller(host, size, false);
+}
+
+static inline struct spi_master *spi_alloc_slave(struct device *host,
+ unsigned int size)
+{
+ if (!IS_ENABLED(CONFIG_SPI_SLAVE))
+ return NULL;
+
+ return __spi_alloc_controller(host, size, true);
+}
extern int spi_register_master(struct spi_master *master);
extern int devm_spi_register_master(struct device *dev,
@@ -831,6 +857,7 @@ extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
extern int spi_async_locked(struct spi_device *spi,
struct spi_message *message);
+extern int spi_slave_abort(struct spi_device *spi);
/*---------------------------------------------------------------------------*/
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 6ff6ab8534dd..2181ae5db42e 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -303,6 +303,7 @@ struct trace_event_call {
int perf_refcount;
struct hlist_head __percpu *perf_events;
struct bpf_prog *prog;
+ struct perf_event *bpf_prog_owner;
int (*perf_perm)(struct trace_event_call *,
struct perf_event *);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index d0b5ca5d4e08..6c1cbbedc79c 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -224,6 +224,7 @@ static inline void inode_attach_wb(struct inode *inode, struct page *page)
static inline void inode_detach_wb(struct inode *inode)
{
if (inode->i_wb) {
+ WARN_ON_ONCE(!(inode->i_state & I_CLEAR));
wb_put(inode->i_wb);
inode->i_wb = NULL;
}
diff --git a/include/net/cnss2.h b/include/net/cnss2.h
index f1d321299492..ca2de6013a36 100644
--- a/include/net/cnss2.h
+++ b/include/net/cnss2.h
@@ -18,6 +18,12 @@
#define CNSS_MAX_FILE_NAME 20
#define CNSS_MAX_TIMESTAMP_LEN 32
+/*
+ * Temporary change for compilation, will be removed
+ * after WLAN host driver switched to use new APIs
+ */
+#define CNSS_API_WITH_DEV
+
enum cnss_bus_width_type {
CNSS_BUS_WIDTH_NONE,
CNSS_BUS_WIDTH_LOW,
@@ -139,29 +145,30 @@ enum cnss_recovery_reason {
extern int cnss_wlan_register_driver(struct cnss_wlan_driver *driver);
extern void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver);
-extern void cnss_device_crashed(void);
+extern void cnss_device_crashed(struct device *dev);
extern int cnss_pci_link_down(struct device *dev);
extern void cnss_schedule_recovery(struct device *dev,
enum cnss_recovery_reason reason);
extern int cnss_self_recovery(struct device *dev,
enum cnss_recovery_reason reason);
extern int cnss_force_fw_assert(struct device *dev);
-extern void *cnss_get_virt_ramdump_mem(unsigned long *size);
-extern int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files,
+extern void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size);
+extern int cnss_get_fw_files_for_target(struct device *dev,
+ struct cnss_fw_files *pfw_files,
u32 target_type, u32 target_version);
-extern int cnss_get_platform_cap(struct cnss_platform_cap *cap);
+extern int cnss_get_platform_cap(struct device *dev,
+ struct cnss_platform_cap *cap);
extern int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info);
-extern void cnss_set_driver_status(enum cnss_driver_status driver_status);
-extern int cnss_request_bus_bandwidth(int bandwidth);
+extern int cnss_request_bus_bandwidth(struct device *dev, int bandwidth);
extern int cnss_power_up(struct device *dev);
extern int cnss_power_down(struct device *dev);
-extern void cnss_request_pm_qos(u32 qos_val);
-extern void cnss_remove_pm_qos(void);
-extern void cnss_lock_pm_sem(void);
-extern void cnss_release_pm_sem(void);
-extern int cnss_wlan_pm_control(bool vote);
-extern int cnss_auto_suspend(void);
-extern int cnss_auto_resume(void);
+extern void cnss_request_pm_qos(struct device *dev, u32 qos_val);
+extern void cnss_remove_pm_qos(struct device *dev);
+extern void cnss_lock_pm_sem(struct device *dev);
+extern void cnss_release_pm_sem(struct device *dev);
+extern int cnss_wlan_pm_control(struct device *dev, bool vote);
+extern int cnss_auto_suspend(struct device *dev);
+extern int cnss_auto_resume(struct device *dev);
extern int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
int *num_vectors,
uint32_t *user_base_data,
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index cccdcfd14973..f348c736e6e0 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -141,8 +141,12 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
struct sctp_event_subscribe *mask)
{
+ int offset = sn_type - SCTP_SN_TYPE_BASE;
char *amask = (char *) mask;
- return amask[sn_type - SCTP_SN_TYPE_BASE];
+
+ if (offset >= sizeof(struct sctp_event_subscribe))
+ return 0;
+ return amask[offset];
}
/* Given an event subscription, is this event enabled? */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index feffec7b489a..739bcb89f602 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -1462,7 +1462,7 @@ TRACE_EVENT(sched_contrib_scale_f,
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
extern unsigned int walt_ravg_window;
-extern unsigned int walt_disabled;
+extern bool walt_disabled;
#endif
/*
@@ -1544,9 +1544,9 @@ TRACE_EVENT(sched_load_avg_cpu,
__entry->util_avg_pelt = cfs_rq->avg.util_avg;
__entry->util_avg_walt = 0;
#ifdef CONFIG_SCHED_WALT
- __entry->util_avg_walt =
- cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT;
- do_div(__entry->util_avg_walt, walt_ravg_window);
+ __entry->util_avg_walt =
+ div64_u64(cpu_rq(cpu)->cumulative_runnable_avg,
+ walt_ravg_window >> SCHED_LOAD_SHIFT);
if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
__entry->util_avg = __entry->util_avg_walt;
#endif
@@ -1851,6 +1851,153 @@ TRACE_EVENT(sched_overutilized,
TP_printk("overutilized=%d",
__entry->overutilized ? 1 : 0)
);
+#ifdef CONFIG_SCHED_WALT
+struct rq;
+
+TRACE_EVENT(walt_update_task_ravg,
+
+ TP_PROTO(struct task_struct *p, struct rq *rq, int evt,
+ u64 wallclock, u64 irqtime),
+
+ TP_ARGS(p, rq, evt, wallclock, irqtime),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( pid_t, cur_pid )
+ __field( u64, wallclock )
+ __field( u64, mark_start )
+ __field( u64, delta_m )
+ __field( u64, win_start )
+ __field( u64, delta )
+ __field( u64, irqtime )
+ __field( int, evt )
+ __field(unsigned int, demand )
+ __field(unsigned int, sum )
+ __field( int, cpu )
+ __field( u64, cs )
+ __field( u64, ps )
+ __field(unsigned long, util )
+ __field( u32, curr_window )
+ __field( u32, prev_window )
+ __field( u64, nt_cs )
+ __field( u64, nt_ps )
+ __field( u32, active_windows )
+ ),
+
+ TP_fast_assign(
+ __entry->wallclock = wallclock;
+ __entry->win_start = rq->window_start;
+ __entry->delta = (wallclock - rq->window_start);
+ __entry->evt = evt;
+ __entry->cpu = rq->cpu;
+ __entry->cur_pid = rq->curr->pid;
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->mark_start = p->ravg.mark_start;
+ __entry->delta_m = (wallclock - p->ravg.mark_start);
+ __entry->demand = p->ravg.demand;
+ __entry->sum = p->ravg.sum;
+ __entry->irqtime = irqtime;
+ __entry->cs = rq->curr_runnable_sum;
+ __entry->ps = rq->prev_runnable_sum;
+ __entry->util = rq->prev_runnable_sum << SCHED_LOAD_SHIFT;
+ do_div(__entry->util, walt_ravg_window);
+ __entry->curr_window = p->ravg.curr_window;
+ __entry->prev_window = p->ravg.prev_window;
+ __entry->nt_cs = rq->nt_curr_runnable_sum;
+ __entry->nt_ps = rq->nt_prev_runnable_sum;
+ __entry->active_windows = p->ravg.active_windows;
+ ),
+
+ TP_printk("wc %llu ws %llu delta %llu event %d cpu %d cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu"
+ " cs %llu ps %llu util %lu cur_window %u prev_window %u active_wins %u"
+ , __entry->wallclock, __entry->win_start, __entry->delta,
+ __entry->evt, __entry->cpu, __entry->cur_pid,
+ __entry->pid, __entry->comm, __entry->mark_start,
+ __entry->delta_m, __entry->demand,
+ __entry->sum, __entry->irqtime,
+ __entry->cs, __entry->ps, __entry->util,
+ __entry->curr_window, __entry->prev_window,
+ __entry->active_windows
+ )
+);
+
+TRACE_EVENT(walt_update_history,
+
+ TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
+ int evt),
+
+ TP_ARGS(rq, p, runtime, samples, evt),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field(unsigned int, runtime )
+ __field( int, samples )
+ __field( int, evt )
+ __field( u64, demand )
+ __field( u64, walt_avg )
+ __field(unsigned int, pelt_avg )
+ __array( u32, hist, RAVG_HIST_SIZE_MAX)
+ __field( int, cpu )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->runtime = runtime;
+ __entry->samples = samples;
+ __entry->evt = evt;
+ __entry->demand = p->ravg.demand;
+ __entry->walt_avg = (__entry->demand << 10) / walt_ravg_window,
+ __entry->pelt_avg = p->se.avg.util_avg;
+ memcpy(__entry->hist, p->ravg.sum_history,
+ RAVG_HIST_SIZE_MAX * sizeof(u32));
+ __entry->cpu = rq->cpu;
+ ),
+
+ TP_printk("%d (%s): runtime %u samples %d event %d demand %llu"
+ " walt %llu pelt %u (hist: %u %u %u %u %u) cpu %d",
+ __entry->pid, __entry->comm,
+ __entry->runtime, __entry->samples, __entry->evt,
+ __entry->demand,
+ __entry->walt_avg,
+ __entry->pelt_avg,
+ __entry->hist[0], __entry->hist[1],
+ __entry->hist[2], __entry->hist[3],
+ __entry->hist[4], __entry->cpu)
+);
+
+TRACE_EVENT(walt_migration_update_sum,
+
+ TP_PROTO(struct rq *rq, struct task_struct *p),
+
+ TP_ARGS(rq, p),
+
+ TP_STRUCT__entry(
+ __field(int, cpu )
+ __field(int, pid )
+ __field( u64, cs )
+ __field( u64, ps )
+ __field( s64, nt_cs )
+ __field( s64, nt_ps )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu_of(rq);
+ __entry->cs = rq->curr_runnable_sum;
+ __entry->ps = rq->prev_runnable_sum;
+ __entry->nt_cs = (s64)rq->nt_curr_runnable_sum;
+ __entry->nt_ps = (s64)rq->nt_prev_runnable_sum;
+ __entry->pid = p->pid;
+ ),
+
+ TP_printk("cpu %d: cs %llu ps %llu nt_cs %lld nt_ps %lld pid %d",
+ __entry->cpu, __entry->cs, __entry->ps,
+ __entry->nt_cs, __entry->nt_ps, __entry->pid)
+);
+#endif /* CONFIG_SCHED_WALT */
#endif /* CONFIG_SMP */
diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
index ce91215cf7e6..e0b566dc72ef 100644
--- a/include/uapi/linux/mroute6.h
+++ b/include/uapi/linux/mroute6.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/sockios.h>
+#include <linux/in6.h> /* For struct sockaddr_in6. */
/*
* Based on the MROUTING 3.5 defines primarily to keep
diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h
index 5fd58dfb153e..13bb8b79359a 100644
--- a/include/uapi/linux/msm_kgsl.h
+++ b/include/uapi/linux/msm_kgsl.h
@@ -321,6 +321,7 @@ enum kgsl_timestamp_type {
#define KGSL_PROP_DEVICE_BITNESS 0x18
#define KGSL_PROP_DEVICE_QDSS_STM 0x19
#define KGSL_PROP_DEVICE_QTIMER 0x20
+#define KGSL_PROP_IB_TIMEOUT 0x21
struct kgsl_shadowprop {
unsigned long gpuaddr;
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index 0f9265cb2a96..7af20a136429 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -35,6 +35,7 @@
#define _LINUX_RDS_H
#include <linux/types.h>
+#include <linux/socket.h> /* For __kernel_sockaddr_storage. */
#define RDS_IB_ABI_VERSION 0x301
@@ -223,7 +224,7 @@ struct rds_get_mr_args {
};
struct rds_get_mr_for_dest_args {
- struct sockaddr_storage dest_addr;
+ struct __kernel_sockaddr_storage dest_addr;
struct rds_iovec vec;
uint64_t cookie_addr;
uint64_t flags;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c97bce6a0e0e..eb759f5008b8 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1044,7 +1044,8 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
}
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
- (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
+ (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
+ BPF_CLASS(insn->code) == BPF_ALU64) {
verbose("BPF_END uses reserved fields\n");
return -EINVAL;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 98928fb7fecc..322f63370038 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7302,6 +7302,7 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
}
event->tp_event->prog = prog;
+ event->tp_event->bpf_prog_owner = event;
return 0;
}
@@ -7314,7 +7315,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
return;
prog = event->tp_event->prog;
- if (prog) {
+ if (prog && event->tp_event->bpf_prog_owner == event) {
event->tp_event->prog = NULL;
bpf_prog_put_rcu(prog);
}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 60ace56618f6..0e2c4911ba61 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3128,10 +3128,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (depth) {
hlock = curr->held_locks + depth - 1;
if (hlock->class_idx == class_idx && nest_lock) {
- if (hlock->references)
+ if (hlock->references) {
+ /*
+ * Check: unsigned int references:12, overflow.
+ */
+ if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
+ return 0;
+
hlock->references++;
- else
+ } else {
hlock->references = 2;
+ }
return 1;
}
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index 750ed601ddf7..8620fd01b3d0 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -111,14 +111,11 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
{
if (tg != &root_task_group)
return false;
-
/*
- * We can only assume the task group can't go away on us if
- * autogroup_move_group() can see us on ->thread_group list.
+ * If we race with autogroup_move_group() the caller can use the old
+ * value of signal->autogroup but in this case sched_move_task() will
+ * be called again before autogroup_kref_put().
*/
- if (p->flags & PF_EXITING)
- return false;
-
return true;
}
@@ -138,13 +135,17 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
}
p->signal->autogroup = autogroup_kref_get(ag);
-
- if (!READ_ONCE(sysctl_sched_autogroup_enabled))
- goto out;
-
+ /*
+ * We can't avoid sched_move_task() after we changed signal->autogroup,
+ * this process can already run with task_group() == prev->tg or we can
+ * race with cgroup code which can read autogroup = prev under rq->lock.
+ * In the latter case for_each_thread() can not miss a migrating thread,
+ * cpu_cgroup_attach() must not be possible after cgroup_exit() and it
+ * can't be removed from thread list, we hold ->siglock.
+ */
for_each_thread(p, t)
sched_move_task(t);
-out:
+
unlock_task_sighand(p, &flags);
autogroup_kref_put(prev);
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4581a6c9b16f..eacfd2ac56a1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -99,6 +99,10 @@
ATOMIC_NOTIFIER_HEAD(load_alert_notifier_head);
+#ifdef CONFIG_SMP
+static bool have_sched_energy_data(void);
+#endif
+
DEFINE_MUTEX(sched_domains_mutex);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
@@ -201,6 +205,11 @@ static int sched_feat_set(char *cmp)
sysctl_sched_features &= ~(1UL << i);
sched_feat_disable(i);
} else {
+#ifdef CONFIG_SMP
+ if (i == __SCHED_FEAT_ENERGY_AWARE)
+ WARN(!have_sched_energy_data(),
+ "Missing sched energy data\n");
+#endif
sysctl_sched_features |= (1UL << i);
sched_feat_enable(i);
}
@@ -554,6 +563,8 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
return;
+ head->count++;
+
get_task_struct(task);
/*
@@ -563,6 +574,10 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
head->lastp = &node->next;
}
+static int
+try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags,
+ int sibling_count_hint);
+
void wake_up_q(struct wake_q_head *head)
{
struct wake_q_node *node = head->first;
@@ -577,10 +592,10 @@ void wake_up_q(struct wake_q_head *head)
task->wake_q.next = NULL;
/*
- * wake_up_process() implies a wmb() to pair with the queueing
+ * try_to_wake_up() implies a wmb() to pair with the queueing
* in wake_q_add() so as not to miss wakeups.
*/
- wake_up_process(task);
+ try_to_wake_up(task, TASK_NORMAL, 0, head->count);
put_task_struct(task);
}
}
@@ -1702,14 +1717,16 @@ out:
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
*/
static inline
-int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
+int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags,
+ int sibling_count_hint)
{
bool allow_isolated = (p->flags & PF_KTHREAD);
lockdep_assert_held(&p->pi_lock);
if (p->nr_cpus_allowed > 1)
- cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
+ cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags,
+ sibling_count_hint);
/*
* In order not to call set_task_cpu() on a blocking task we need
@@ -2007,6 +2024,8 @@ static void ttwu_queue(struct task_struct *p, int cpu)
* @p: the thread to be awakened
* @state: the mask of task states that can be woken
* @wake_flags: wake modifier flags (WF_*)
+ * @sibling_count_hint: A hint at the number of threads that are being woken up
+ * in this event.
*
* Put it on the run-queue if it's not already there. The "current"
* thread is always on the run-queue (except when the actual
@@ -2018,7 +2037,8 @@ static void ttwu_queue(struct task_struct *p, int cpu)
* or @state didn't match @p's state.
*/
static int
-try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags,
+ int sibling_count_hint)
{
unsigned long flags;
int cpu, src_cpu, success = 0;
@@ -2134,7 +2154,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
if (p->sched_class->task_waking)
p->sched_class->task_waking(p);
- cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
+ cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags,
+ sibling_count_hint);
/* Refresh src_cpu as it could have changed since we last read it */
src_cpu = task_cpu(p);
@@ -2236,7 +2257,7 @@ out:
*/
int wake_up_process(struct task_struct *p)
{
- return try_to_wake_up(p, TASK_NORMAL, 0);
+ return try_to_wake_up(p, TASK_NORMAL, 0, 1);
}
EXPORT_SYMBOL(wake_up_process);
@@ -2256,13 +2277,13 @@ EXPORT_SYMBOL(wake_up_process);
int wake_up_process_no_notif(struct task_struct *p)
{
WARN_ON(task_is_stopped_or_traced(p));
- return try_to_wake_up(p, TASK_NORMAL, WF_NO_NOTIFIER);
+ return try_to_wake_up(p, TASK_NORMAL, WF_NO_NOTIFIER, 1);
}
EXPORT_SYMBOL(wake_up_process_no_notif);
int wake_up_state(struct task_struct *p, unsigned int state)
{
- return try_to_wake_up(p, state, 0);
+ return try_to_wake_up(p, state, 0, 1);
}
/*
@@ -2337,9 +2358,16 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
+#ifdef CONFIG_SCHED_WALT
+ p->last_sleep_ts = 0;
+#endif
INIT_LIST_HEAD(&p->se.group_node);
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ p->se.cfs_rq = NULL;
+#endif
+
#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
#endif
@@ -2429,11 +2457,11 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
__sched_fork(clone_flags, p);
/*
- * We mark the process as running here. This guarantees that
+ * We mark the process as NEW here. This guarantees that
* nobody will actually run it, and a signal or other external
* event cannot wake it up and insert it on the runqueue either.
*/
- p->state = TASK_RUNNING;
+ p->state = TASK_NEW;
/*
* Make sure we do not leak PI boosting priority to the child.
@@ -2470,8 +2498,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->sched_class = &fair_sched_class;
}
- if (p->sched_class->task_fork)
- p->sched_class->task_fork(p);
+ init_entity_runnable_average(&p->se);
/*
* The child is not yet in the pid-hash so no cgroup attach races,
@@ -2481,7 +2508,13 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
* Silence PROVE_RCU.
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
- set_task_cpu(p, cpu);
+ /*
+ * We're setting the cpu for the first time, we don't migrate,
+ * so use __set_task_cpu().
+ */
+ __set_task_cpu(p, cpu);
+ if (p->sched_class->task_fork)
+ p->sched_class->task_fork(p);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
#ifdef CONFIG_SCHED_INFO
@@ -2614,6 +2647,8 @@ void wake_up_new_task(struct task_struct *p)
add_new_task_to_grp(p);
raw_spin_lock_irqsave(&p->pi_lock, flags);
+ p->state = TASK_RUNNING;
+
/* Initialize new task's runnable average */
init_entity_runnable_average(&p->se);
#ifdef CONFIG_SMP
@@ -2621,11 +2656,15 @@ void wake_up_new_task(struct task_struct *p)
* Fork balancing, do it here and not earlier because:
* - cpus_allowed can change in the fork path
* - any previously selected cpu might disappear through hotplug
+ *
+ * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
+ * as we're not fully set-up yet.
*/
- set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
+ __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0, 1));
#endif
rq = __task_rq_lock(p);
mark_task_starting(p);
+ update_rq_clock(rq);
post_init_entity_util_avg(&p->se);
activate_task(rq, p, ENQUEUE_WAKEUP_NEW);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -3071,7 +3110,7 @@ void sched_exec(void)
raw_spin_lock_irqsave(&p->pi_lock, flags);
curr_cpu = task_cpu(p);
- dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
+ dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0, 1);
if (dest_cpu == smp_processor_id())
goto unlock;
@@ -3171,7 +3210,9 @@ static void sched_freq_tick_pelt(int cpu)
* utilization and to harm its performance the least, request
* a jump to a higher OPP as soon as the margin of free capacity
* is impacted (specified by capacity_margin).
+ * Remember CPU utilization in sched_capacity_reqs should be normalised.
*/
+ cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
set_cfs_cpu_capacity(cpu, true, cpu_utilization);
}
@@ -3198,7 +3239,9 @@ static void sched_freq_tick_walt(int cpu)
* It is likely that the load is growing so we
* keep the added margin in our request as an
* extra boost.
+ * Remember CPU utilization in sched_capacity_reqs should be normalised.
*/
+ cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
set_cfs_cpu_capacity(cpu, true, cpu_utilization);
}
@@ -3209,16 +3252,9 @@ static void sched_freq_tick_walt(int cpu)
static void sched_freq_tick(int cpu)
{
- unsigned long capacity_orig, capacity_curr;
-
if (!sched_freq())
return;
- capacity_orig = capacity_orig_of(cpu);
- capacity_curr = capacity_curr_of(cpu);
- if (capacity_curr == capacity_orig)
- return;
-
_sched_freq_tick(cpu);
}
#else
@@ -3586,6 +3622,10 @@ static void __sched notrace __schedule(bool preempt)
if (!is_idle_task(prev) && !prev->on_rq)
update_avg_burst(prev);
+#ifdef CONFIG_SCHED_WALT
+ if (!prev->on_rq)
+ prev->last_sleep_ts = wallclock;
+#endif
rq->nr_switches++;
rq->curr = next;
++*switch_count;
@@ -3762,7 +3802,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
void *key)
{
- return try_to_wake_up(curr->private, mode, wake_flags);
+ return try_to_wake_up(curr->private, mode, wake_flags, 1);
}
EXPORT_SYMBOL(default_wake_function);
@@ -3788,6 +3828,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
BUG_ON(prio > MAX_PRIO);
rq = __task_rq_lock(p);
+ update_rq_clock(rq);
/*
* Idle task boosting is a nono in general. There is one
@@ -3883,6 +3924,8 @@ void set_user_nice(struct task_struct *p, long nice)
* the task might be in the middle of scheduling on another CPU.
*/
rq = task_rq_lock(p, &flags);
+ update_rq_clock(rq);
+
/*
* The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected
@@ -4310,6 +4353,7 @@ recheck:
* runqueue lock must be held.
*/
rq = task_rq_lock(p, &flags);
+ update_rq_clock(rq);
/*
* Changing the policy of the stop threads its a very bad idea
@@ -7158,6 +7202,19 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
}
+static bool have_sched_energy_data(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (!rcu_dereference(per_cpu(sd_scs, cpu)) ||
+ !rcu_dereference(per_cpu(sd_ea, cpu)))
+ return false;
+ }
+
+ return true;
+}
+
/*
* Check that the per-cpu provided sd energy data is consistent for all cpus
* within the mask.
@@ -7974,6 +8031,9 @@ static int build_sched_domains(const struct cpumask *cpu_map,
}
rcu_read_unlock();
+ WARN(sched_feat(ENERGY_AWARE) && !have_sched_energy_data(),
+ "Missing data for energy aware scheduling\n");
+
ret = 0;
error:
__free_domain_allocs(&d, alloc_state, cpu_map);
@@ -8791,27 +8851,9 @@ void sched_offline_group(struct task_group *tg)
spin_unlock_irqrestore(&task_group_lock, flags);
}
-/* change task's runqueue when it moves between groups.
- * The caller of this function should have put the task in its new group
- * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
- * reflect its new group.
- */
-void sched_move_task(struct task_struct *tsk)
+static void sched_change_group(struct task_struct *tsk, int type)
{
struct task_group *tg;
- int queued, running;
- unsigned long flags;
- struct rq *rq;
-
- rq = task_rq_lock(tsk, &flags);
-
- running = task_current(rq, tsk);
- queued = task_on_rq_queued(tsk);
-
- if (queued)
- dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE);
- if (unlikely(running))
- put_prev_task(rq, tsk);
/*
* All callers are synchronized by task_rq_lock(); we do not use RCU
@@ -8824,11 +8866,37 @@ void sched_move_task(struct task_struct *tsk)
tsk->sched_task_group = tg;
#ifdef CONFIG_FAIR_GROUP_SCHED
- if (tsk->sched_class->task_move_group)
- tsk->sched_class->task_move_group(tsk);
+ if (tsk->sched_class->task_change_group)
+ tsk->sched_class->task_change_group(tsk, type);
else
#endif
set_task_rq(tsk, task_cpu(tsk));
+}
+
+/*
+ * Change task's runqueue when it moves between groups.
+ *
+ * The caller of this function should have put the task in its new group by
+ * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
+ * its new group.
+ */
+void sched_move_task(struct task_struct *tsk)
+{
+ int queued, running;
+ unsigned long flags;
+ struct rq *rq;
+
+ rq = task_rq_lock(tsk, &flags);
+
+ running = task_current(rq, tsk);
+ queued = task_on_rq_queued(tsk);
+
+ if (queued)
+ dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE);
+ if (unlikely(running))
+ put_prev_task(rq, tsk);
+
+ sched_change_group(tsk, TASK_MOVE_GROUP);
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
@@ -9265,15 +9333,28 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
sched_free_group(tg);
}
+/*
+ * This is called before wake_up_new_task(), therefore we really only
+ * have to set its group bits, all the other stuff does not apply.
+ */
static void cpu_cgroup_fork(struct task_struct *task, void *private)
{
- sched_move_task(task);
+ unsigned long flags;
+ struct rq *rq;
+
+ rq = task_rq_lock(task, &flags);
+
+ update_rq_clock(rq);
+ sched_change_group(task, TASK_SET_GROUP);
+
+ task_rq_unlock(rq, task, &flags);
}
static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_subsys_state *css;
+ int ret = 0;
cgroup_taskset_for_each(task, css, tset) {
#ifdef CONFIG_RT_GROUP_SCHED
@@ -9284,8 +9365,24 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
if (task->sched_class != &fair_sched_class)
return -EINVAL;
#endif
+ /*
+ * Serialize against wake_up_new_task() such that if its
+ * running, we're sure to observe its full state.
+ */
+ raw_spin_lock_irq(&task->pi_lock);
+ /*
+ * Avoid calling sched_move_task() before wake_up_new_task()
+ * has happened. This would lead to problems with PELT, due to
+ * move wanting to detach+attach while we're not attached yet.
+ */
+ if (task->state == TASK_NEW)
+ ret = -EINVAL;
+ raw_spin_unlock_irq(&task->pi_lock);
+
+ if (ret)
+ break;
}
- return 0;
+ return ret;
}
static void cpu_cgroup_attach(struct cgroup_taskset *tset)
diff --git a/kernel/sched/cpufreq_sched.c b/kernel/sched/cpufreq_sched.c
index 6ffb23adbcef..ec0aed7a8f96 100644
--- a/kernel/sched/cpufreq_sched.c
+++ b/kernel/sched/cpufreq_sched.c
@@ -202,7 +202,7 @@ static void update_fdomain_capacity_request(int cpu)
}
/* Convert the new maximum capacity request into a cpu frequency */
- freq_new = capacity * policy->max >> SCHED_CAPACITY_SHIFT;
+ freq_new = capacity * policy->cpuinfo.max_freq >> SCHED_CAPACITY_SHIFT;
if (cpufreq_frequency_table_target(policy, policy->freq_table,
freq_new, CPUFREQ_RELATION_L,
&index_new))
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 28977799017b..d3765f0cb699 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -216,8 +216,9 @@ static void sugov_get_util(unsigned long *util, unsigned long *max, u64 time)
*util = boosted_cpu_util(cpu);
if (likely(use_pelt()))
- *util = min((*util + rt), max_cap);
+ *util = *util + rt;
+ *util = min(*util, max_cap);
*max = max_cap;
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 167a1038cff0..bb22bcf499f8 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1107,7 +1107,8 @@ static void yield_task_dl(struct rq *rq)
static int find_later_rq(struct task_struct *task);
static int
-select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
+select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags,
+ int sibling_count_hint)
{
struct task_struct *curr;
struct rq *rq;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index aa016919eab8..9263ffd5673f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -762,7 +762,9 @@ void init_entity_runnable_average(struct sched_entity *se)
}
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
+static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq);
static void attach_entity_cfs_rq(struct sched_entity *se);
+static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);
/*
* With new tasks being created, their initial util_avgs are extrapolated
@@ -833,7 +835,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
attach_entity_cfs_rq(se);
}
-#else
+#else /* !CONFIG_SMP */
void init_entity_runnable_average(struct sched_entity *se)
{
}
@@ -4412,11 +4414,14 @@ void remove_entity_load_avg(struct sched_entity *se)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
/*
- * Newly created task or never used group entity should not be removed
- * from its (source) cfs_rq
+ * tasks cannot exit without having gone through wake_up_new_task() ->
+ * post_init_entity_util_avg() which will have added things to the
+ * cfs_rq, so we can remove unconditionally.
+ *
+ * Similarly for groups, they will have passed through
+ * post_init_entity_util_avg() before unregister_sched_fair_group()
+ * calls this.
*/
- if (se->avg.last_update_time == 0)
- return;
sync_entity_load_avg(se);
atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
@@ -5824,7 +5829,7 @@ static void update_capacity_of(int cpu)
if (!sched_freq())
return;
- /* Convert scale-invariant capacity to cpu. */
+ /* Normalize scale-invariant capacity to cpu. */
req_cap = boosted_cpu_util(cpu);
req_cap = req_cap * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
set_cfs_cpu_capacity(cpu, true, req_cap);
@@ -5867,7 +5872,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
*
* note: in the case of encountering a throttled cfs_rq we will
* post the final h_nr_running increment below.
- */
+ */
if (cfs_rq_throttled(cfs_rq))
break;
cfs_rq->h_nr_running++;
@@ -6023,7 +6028,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (rq->cfs.nr_running)
update_capacity_of(cpu_of(rq));
else if (sched_freq())
- set_cfs_cpu_capacity(cpu_of(rq), false, 0);
+ set_cfs_cpu_capacity(cpu_of(rq), false, 0); /* no normalization required for 0 */
}
}
@@ -6446,6 +6451,7 @@ struct energy_env {
int util_delta;
int src_cpu;
int dst_cpu;
+ int trg_cpu;
int energy;
int payoff;
struct task_struct *task;
@@ -6462,11 +6468,14 @@ struct energy_env {
} cap;
};
+static int cpu_util_wake(int cpu, struct task_struct *p);
+
/*
* __cpu_norm_util() returns the cpu util relative to a specific capacity,
- * i.e. it's busy ratio, in the range [0..SCHED_LOAD_SCALE] which is useful for
- * energy calculations. Using the scale-invariant util returned by
- * cpu_util() and approximating scale-invariant util by:
+ * i.e. it's busy ratio, in the range [0..SCHED_LOAD_SCALE], which is useful for
+ * energy calculations.
+ *
+ * Since util is a scale-invariant utilization defined as:
*
* util ~ (curr_freq/max_freq)*1024 * capacity_orig/1024 * running_time/time
*
@@ -6476,34 +6485,32 @@ struct energy_env {
*
* norm_util = running_time/time ~ util/capacity
*/
-static unsigned long __cpu_norm_util(int cpu, unsigned long capacity, int delta)
+static unsigned long __cpu_norm_util(unsigned long util, unsigned long capacity)
{
- int util = __cpu_util(cpu, delta);
-
if (util >= capacity)
return SCHED_CAPACITY_SCALE;
return (util << SCHED_CAPACITY_SHIFT)/capacity;
}
-static int calc_util_delta(struct energy_env *eenv, int cpu)
+static unsigned long group_max_util(struct energy_env *eenv)
{
- if (cpu == eenv->src_cpu)
- return -eenv->util_delta;
- if (cpu == eenv->dst_cpu)
- return eenv->util_delta;
- return 0;
-}
-
-static
-unsigned long group_max_util(struct energy_env *eenv)
-{
- int i, delta;
unsigned long max_util = 0;
+ unsigned long util;
+ int cpu;
+
+ for_each_cpu(cpu, sched_group_cpus(eenv->sg_cap)) {
+ util = cpu_util_wake(cpu, eenv->task);
- for_each_cpu(i, sched_group_cpus(eenv->sg_cap)) {
- delta = calc_util_delta(eenv, i);
- max_util = max(max_util, __cpu_util(i, delta));
+ /*
+ * If we are looking at the target CPU specified by the eenv,
+ * then we should add the (estimated) utilization of the task
+ * assuming we will wake it up on that CPU.
+ */
+ if (unlikely(cpu == eenv->trg_cpu))
+ util += eenv->util_delta;
+
+ max_util = max(max_util, util);
}
return max_util;
@@ -6511,44 +6518,56 @@ unsigned long group_max_util(struct energy_env *eenv)
/*
* group_norm_util() returns the approximated group util relative to it's
- * current capacity (busy ratio) in the range [0..SCHED_LOAD_SCALE] for use in
- * energy calculations. Since task executions may or may not overlap in time in
- * the group the true normalized util is between max(cpu_norm_util(i)) and
- * sum(cpu_norm_util(i)) when iterating over all cpus in the group, i. The
- * latter is used as the estimate as it leads to a more pessimistic energy
+ * current capacity (busy ratio), in the range [0..SCHED_LOAD_SCALE], for use
+ * in energy calculations.
+ *
+ * Since task executions may or may not overlap in time in the group the true
+ * normalized util is between MAX(cpu_norm_util(i)) and SUM(cpu_norm_util(i))
+ * when iterating over all CPUs in the group.
+ * The latter estimate is used as it leads to a more pessimistic energy
* estimate (more busy).
*/
static unsigned
long group_norm_util(struct energy_env *eenv, struct sched_group *sg)
{
- int i, delta;
- unsigned long util_sum = 0;
unsigned long capacity = sg->sge->cap_states[eenv->cap_idx].cap;
+ unsigned long util, util_sum = 0;
+ int cpu;
+
+ for_each_cpu(cpu, sched_group_cpus(sg)) {
+ util = cpu_util_wake(cpu, eenv->task);
+
+ /*
+ * If we are looking at the target CPU specified by the eenv,
+ * then we should add the (estimated) utilization of the task
+ * assuming we will wake it up on that CPU.
+ */
+ if (unlikely(cpu == eenv->trg_cpu))
+ util += eenv->util_delta;
- for_each_cpu(i, sched_group_cpus(sg)) {
- delta = calc_util_delta(eenv, i);
- util_sum += __cpu_norm_util(i, capacity, delta);
+ util_sum += __cpu_norm_util(util, capacity);
}
- if (util_sum > SCHED_CAPACITY_SCALE)
- return SCHED_CAPACITY_SCALE;
- return util_sum;
+ return min_t(unsigned long, util_sum, SCHED_CAPACITY_SCALE);
}
static int find_new_capacity(struct energy_env *eenv,
const struct sched_group_energy * const sge)
{
- int idx;
+ int idx, max_idx = sge->nr_cap_states - 1;
unsigned long util = group_max_util(eenv);
+ /* default is max_cap if we don't find a match */
+ eenv->cap_idx = max_idx;
+
for (idx = 0; idx < sge->nr_cap_states; idx++) {
- if (sge->cap_states[idx].cap >= util)
+ if (sge->cap_states[idx].cap >= util) {
+ eenv->cap_idx = idx;
break;
+ }
}
- eenv->cap_idx = idx;
-
- return idx;
+ return eenv->cap_idx;
}
static int group_idle_state(struct energy_env *eenv, struct sched_group *sg)
@@ -6721,6 +6740,8 @@ static inline bool cpu_in_sg(struct sched_group *sg, int cpu)
return cpu != -1 && cpumask_test_cpu(cpu, sched_group_cpus(sg));
}
+static inline unsigned long task_util(struct task_struct *p);
+
/*
* energy_diff(): Estimate the energy impact of changing the utilization
* distribution. eenv specifies the change: utilisation amount, source, and
@@ -6736,11 +6757,13 @@ static inline int __energy_diff(struct energy_env *eenv)
int diff, margin;
struct energy_env eenv_before = {
- .util_delta = 0,
+ .util_delta = task_util(eenv->task),
.src_cpu = eenv->src_cpu,
.dst_cpu = eenv->dst_cpu,
+ .trg_cpu = eenv->src_cpu,
.nrg = { 0, 0, 0, 0},
.cap = { 0, 0, 0 },
+ .task = eenv->task,
};
if (eenv->src_cpu == eenv->dst_cpu)
@@ -6799,7 +6822,11 @@ static inline int __energy_diff(struct energy_env *eenv)
#ifdef CONFIG_SCHED_TUNE
struct target_nrg schedtune_target_nrg;
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
extern bool schedtune_initialized;
+#endif /* CONFIG_CGROUP_SCHEDTUNE */
+
/*
* System energy normalization
* Returns the normalized value, in the range [0..SCHED_CAPACITY_SCALE],
@@ -6810,9 +6837,11 @@ normalize_energy(int energy_diff)
{
u32 normalized_nrg;
+#ifdef CONFIG_CGROUP_SCHEDTUNE
/* during early setup, we don't know the extents */
if (unlikely(!schedtune_initialized))
return energy_diff < 0 ? -1 : 1 ;
+#endif /* CONFIG_CGROUP_SCHEDTUNE */
#ifdef CONFIG_SCHED_DEBUG
{
@@ -6848,8 +6877,14 @@ energy_diff(struct energy_env *eenv)
__energy_diff(eenv);
/* Return energy diff when boost margin is 0 */
- if (boost == 0)
+ if (boost == 0) {
+ trace_sched_energy_diff(eenv->task,
+ eenv->src_cpu, eenv->dst_cpu, eenv->util_delta,
+ eenv->nrg.before, eenv->nrg.after, eenv->nrg.diff,
+ eenv->cap.before, eenv->cap.after, eenv->cap.delta,
+ 0, -eenv->nrg.diff);
return eenv->nrg.diff;
+ }
/* Compute normalized energy diff */
nrg_delta = normalize_energy(eenv->nrg.diff);
@@ -6892,15 +6927,18 @@ energy_diff(struct energy_env *eenv)
* being client/server, worker/dispatcher, interrupt source or whatever is
* irrelevant, spread criteria is apparent partner count exceeds socket size.
*/
-static int wake_wide(struct task_struct *p)
+static int wake_wide(struct task_struct *p, int sibling_count_hint)
{
unsigned int master = current->wakee_flips;
unsigned int slave = p->wakee_flips;
- int factor = this_cpu_read(sd_llc_size);
+ int llc_size = this_cpu_read(sd_llc_size);
+
+ if (sibling_count_hint >= llc_size)
+ return 1;
if (master < slave)
swap(master, slave);
- if (slave < factor || master < slave * factor)
+ if (slave < llc_size || master < slave * llc_size)
return 0;
return 1;
}
@@ -7106,8 +7144,6 @@ boosted_task_util(struct task_struct *task)
return util + margin;
}
-static int cpu_util_wake(int cpu, struct task_struct *p);
-
static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
{
return capacity_orig_of(cpu) - cpu_util_wake(cpu, p);
@@ -7116,6 +7152,8 @@ static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
+ *
+ * Assumes p is allowed on at least one CPU in sd.
*/
static struct sched_group *
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
@@ -7123,7 +7161,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
{
struct sched_group *idlest = NULL, *group = sd->groups;
struct sched_group *most_spare_sg = NULL;
- unsigned long min_load = ULONG_MAX, this_load = 0;
+ unsigned long min_load = ULONG_MAX, this_load = ULONG_MAX;
unsigned long most_spare = 0, this_spare = 0;
int load_idx = sd->forkexec_idx;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
@@ -7191,23 +7229,31 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
* utilized systems if we require spare_capacity > task_util(p),
* so we allow for some task stuffing by using
* spare_capacity > task_util(p)/2.
+ *
+ * Spare capacity can't be used for fork because the utilization has
+ * not been set yet, we must first select a rq to compute the initial
+ * utilization.
*/
+ if (sd_flag & SD_BALANCE_FORK)
+ goto skip_spare;
+
if (this_spare > task_util(p) / 2 &&
imbalance*this_spare > 100*most_spare)
return NULL;
else if (most_spare > task_util(p) / 2)
return most_spare_sg;
+skip_spare:
if (!idlest || 100*this_load < imbalance*min_load)
return NULL;
return idlest;
}
/*
- * find_idlest_cpu - find the idlest cpu among the cpus in group.
+ * find_idlest_group_cpu - find the idlest cpu among the cpus in group.
*/
static int
-find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
+find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
{
unsigned long load, min_load = ULONG_MAX;
unsigned int min_exit_latency = UINT_MAX;
@@ -7254,6 +7300,68 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
}
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
+ }
+
+static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
+ int cpu, int prev_cpu, int sd_flag)
+{
+ int new_cpu = cpu;
+ int wu = sd_flag & SD_BALANCE_WAKE;
+ int cas_cpu = -1;
+
+ if (wu) {
+ schedstat_inc(p, se.statistics.nr_wakeups_cas_attempts);
+ schedstat_inc(this_rq(), eas_stats.cas_attempts);
+ }
+
+ if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed))
+ return prev_cpu;
+
+ while (sd) {
+ struct sched_group *group;
+ struct sched_domain *tmp;
+ int weight;
+
+ if (wu)
+ schedstat_inc(sd, eas_stats.cas_attempts);
+
+ if (!(sd->flags & sd_flag)) {
+ sd = sd->child;
+ continue;
+ }
+
+ group = find_idlest_group(sd, p, cpu, sd_flag);
+ if (!group) {
+ sd = sd->child;
+ continue;
+ }
+
+ new_cpu = find_idlest_group_cpu(group, p, cpu);
+ if (new_cpu == cpu) {
+ /* Now try balancing at a lower domain level of cpu */
+ sd = sd->child;
+ continue;
+ }
+
+ /* Now try balancing at a lower domain level of new_cpu */
+ cpu = cas_cpu = new_cpu;
+ weight = sd->span_weight;
+ sd = NULL;
+ for_each_domain(cpu, tmp) {
+ if (weight <= tmp->span_weight)
+ break;
+ if (tmp->flags & sd_flag)
+ sd = tmp;
+ }
+ /* while loop will break here if sd == NULL */
+ }
+
+ if (wu && (cas_cpu >= 0)) {
+ schedstat_inc(p, se.statistics.nr_wakeups_cas_count);
+ schedstat_inc(this_rq(), eas_stats.cas_count);
+ }
+
+ return new_cpu;
}
/*
@@ -7386,9 +7494,6 @@ static int start_cpu(bool boosted)
{
struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
- RCU_LOCKDEP_WARN(rcu_read_lock_sched_held(),
- "sched RCU must be held");
-
return boosted ? rd->max_cap_orig_cpu : rd->min_cap_orig_cpu;
}
@@ -7400,7 +7505,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
unsigned long target_capacity = ULONG_MAX;
unsigned long min_wake_util = ULONG_MAX;
unsigned long target_max_spare_cap = 0;
- unsigned long target_util = ULONG_MAX;
unsigned long best_active_util = ULONG_MAX;
int best_idle_cstate = INT_MAX;
struct sched_domain *sd;
@@ -7539,6 +7643,19 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
}
/*
+ * Enforce EAS mode
+ *
+ * For non latency sensitive tasks, skip CPUs that
+ * will be overutilized by moving the task there.
+ *
+ * The goal here is to remain in EAS mode as long as
+ * possible at least for !prefer_idle tasks.
+ */
+ if ((new_util * capacity_margin) >
+ (capacity_orig * SCHED_CAPACITY_SCALE))
+ continue;
+
+ /*
* Case B) Non latency sensitive tasks on IDLE CPUs.
*
* Find an optimal backup IDLE CPU for non latency
@@ -7616,7 +7733,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
target_max_spare_cap = capacity_orig - new_util;
target_capacity = capacity_orig;
- target_util = new_util;
target_cpu = i;
}
@@ -7737,6 +7853,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
.src_cpu = prev_cpu,
.dst_cpu = target_cpu,
.task = p,
+ .trg_cpu = target_cpu,
};
@@ -7755,7 +7872,9 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
/* No energy saving for target_cpu, try backup */
target_cpu = tmp_backup;
eenv.dst_cpu = target_cpu;
- if (tmp_backup < 0 || energy_diff(&eenv) >= 0) {
+ if (tmp_backup < 0 ||
+ tmp_backup == prev_cpu ||
+ energy_diff(&eenv) >= 0) {
schedstat_inc(p, se.statistics.nr_wakeups_secb_no_nrg_sav);
schedstat_inc(this_rq(), eas_stats.secb_no_nrg_sav);
target_cpu = prev_cpu;
@@ -7790,7 +7909,8 @@ unlock:
* preempt must be disabled.
*/
static int
-select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
+select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags,
+ int sibling_count_hint)
{
struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
int cpu = smp_processor_id();
@@ -7802,9 +7922,12 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
return select_best_cpu(p, prev_cpu, 0, sync);
#endif
- if (sd_flag & SD_BALANCE_WAKE)
- want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
- && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
+ if (sd_flag & SD_BALANCE_WAKE) {
+ record_wakee(p);
+ want_affine = !wake_wide(p, sibling_count_hint) &&
+ !wake_cap(p, cpu, prev_cpu) &&
+ cpumask_test_cpu(cpu, &p->cpus_allowed);
+ }
if (energy_aware() && !(cpu_rq(prev_cpu)->rd->overutilized))
return select_energy_cpu_brute(p, prev_cpu, sync);
@@ -7836,61 +7959,21 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
new_cpu = cpu;
}
+ if (sd && !(sd_flag & SD_BALANCE_FORK)) {
+ /*
+ * We're going to need the task's util for capacity_spare_wake
+ * in find_idlest_group. Sync it up to prev_cpu's
+ * last_update_time.
+ */
+ sync_entity_load_avg(&p->se);
+ }
+
if (!sd) {
if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
} else {
- int wu = sd_flag & SD_BALANCE_WAKE;
- int cas_cpu = -1;
-
- if (wu) {
- schedstat_inc(p, se.statistics.nr_wakeups_cas_attempts);
- schedstat_inc(this_rq(), eas_stats.cas_attempts);
- }
-
- while (sd) {
- struct sched_group *group;
- int weight;
-
- if (wu)
- schedstat_inc(sd, eas_stats.cas_attempts);
-
- if (!(sd->flags & sd_flag)) {
- sd = sd->child;
- continue;
- }
-
- group = find_idlest_group(sd, p, cpu, sd_flag);
- if (!group) {
- sd = sd->child;
- continue;
- }
-
- new_cpu = find_idlest_cpu(group, p, cpu);
- if (new_cpu == -1 || new_cpu == cpu) {
- /* Now try balancing at a lower domain level of cpu */
- sd = sd->child;
- continue;
- }
-
- /* Now try balancing at a lower domain level of new_cpu */
- cpu = cas_cpu = new_cpu;
- weight = sd->span_weight;
- sd = NULL;
- for_each_domain(cpu, tmp) {
- if (weight <= tmp->span_weight)
- break;
- if (tmp->flags & sd_flag)
- sd = tmp;
- }
- /* while loop will break here if sd == NULL */
- }
-
- if (wu && (cas_cpu >= 0)) {
- schedstat_inc(p, se.statistics.nr_wakeups_cas_count);
- schedstat_inc(this_rq(), eas_stats.cas_count);
- }
+ new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
}
rcu_read_unlock();
@@ -10043,8 +10126,11 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
if (busiest->group_type == group_imbalanced)
goto force_balance;
- /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
- if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
+ /*
+ * When dst_cpu is idle, prevent SMP nice and/or asymmetric group
+ * capacities from resulting in underutilization due to avg_load.
+ */
+ if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) &&
busiest->group_no_capacity)
goto force_balance;
@@ -10276,6 +10362,7 @@ static int need_active_balance(struct lb_env *env)
if (energy_aware() &&
(capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) &&
+ ((capacity_orig_of(env->src_cpu) < capacity_orig_of(env->dst_cpu))) &&
env->src_rq->cfs.h_nr_running == 1 &&
cpu_overutilized(env->src_cpu) &&
!cpu_overutilized(env->dst_cpu)) {
@@ -10412,6 +10499,7 @@ redo:
more_balance:
raw_spin_lock_irqsave(&busiest->lock, flags);
+ update_rq_clock(busiest);
/* The world might have changed. Validate assumptions */
if (busiest->nr_running <= 1) {
@@ -10869,6 +10957,7 @@ static int active_load_balance_cpu_stop(void *data)
if (likely(sd)) {
env.sd = sd;
schedstat_inc(sd, alb_count);
+ update_rq_clock(busiest_rq);
p = detach_one_task(&env);
if (p) {
@@ -11532,31 +11621,17 @@ static void task_fork_fair(struct task_struct *p)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se, *curr;
- int this_cpu = smp_processor_id();
struct rq *rq = this_rq();
- unsigned long flags;
-
- raw_spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock(&rq->lock);
update_rq_clock(rq);
cfs_rq = task_cfs_rq(current);
curr = cfs_rq->curr;
-
- /*
- * Not only the cpu but also the task_group of the parent might have
- * been changed after parent->se.parent,cfs_rq were copied to
- * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
- * of child point to valid ones.
- */
- rcu_read_lock();
- __set_task_cpu(p, this_cpu);
- rcu_read_unlock();
-
- update_curr(cfs_rq);
-
- if (curr)
+ if (curr) {
+ update_curr(cfs_rq);
se->vruntime = curr->vruntime;
+ }
place_entity(cfs_rq, se, 1);
if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
@@ -11569,8 +11644,7 @@ static void task_fork_fair(struct task_struct *p)
}
se->vruntime -= cfs_rq->min_vruntime;
-
- raw_spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock(&rq->lock);
}
/*
@@ -11762,6 +11836,14 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
}
#ifdef CONFIG_FAIR_GROUP_SCHED
+static void task_set_group_fair(struct task_struct *p)
+{
+ struct sched_entity *se = &p->se;
+
+ set_task_rq(p, task_cpu(p));
+ se->depth = se->parent ? se->parent->depth + 1 : 0;
+}
+
static void task_move_group_fair(struct task_struct *p)
{
detach_task_cfs_rq(p);
@@ -11774,6 +11856,19 @@ static void task_move_group_fair(struct task_struct *p)
attach_task_cfs_rq(p);
}
+static void task_change_group_fair(struct task_struct *p, int type)
+{
+ switch (type) {
+ case TASK_SET_GROUP:
+ task_set_group_fair(p);
+ break;
+
+ case TASK_MOVE_GROUP:
+ task_move_group_fair(p);
+ break;
+ }
+}
+
void free_fair_sched_group(struct task_group *tg)
{
int i;
@@ -12005,7 +12100,7 @@ const struct sched_class fair_sched_class = {
.update_curr = update_curr_fair,
#ifdef CONFIG_FAIR_GROUP_SCHED
- .task_move_group = task_move_group_fair,
+ .task_change_group = task_change_group_fair,
#endif
#ifdef CONFIG_SCHED_HMP
.inc_hmp_sched_stats = inc_hmp_sched_stats_fair,
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 36c6634236fb..d562efb04775 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -9,7 +9,8 @@
#ifdef CONFIG_SMP
static int
-select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
+select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags,
+ int sibling_count_hint)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 23b68b051cee..47e97ef57eb8 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1479,7 +1479,8 @@ task_may_not_preempt(struct task_struct *task, int cpu)
}
static int
-select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
+select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
+ int sibling_count_hint)
{
struct task_struct *curr;
struct rq *rq;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c53970b5a8f0..cc5ae5ddee6b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -340,7 +340,15 @@ extern void sched_move_task(struct task_struct *tsk);
#ifdef CONFIG_FAIR_GROUP_SCHED
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
-#endif
+
+#ifdef CONFIG_SMP
+extern void set_task_rq_fair(struct sched_entity *se,
+ struct cfs_rq *prev, struct cfs_rq *next);
+#else /* !CONFIG_SMP */
+static inline void set_task_rq_fair(struct sched_entity *se,
+ struct cfs_rq *prev, struct cfs_rq *next) { }
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_FAIR_GROUP_SCHED */
extern struct task_group *css_tg(struct cgroup_subsys_state *css);
#else /* CONFIG_CGROUP_SCHED */
@@ -794,6 +802,19 @@ struct rq {
int curr_top;
#endif
+#ifdef CONFIG_SCHED_WALT
+ u64 cumulative_runnable_avg;
+ u64 window_start;
+ u64 curr_runnable_sum;
+ u64 prev_runnable_sum;
+ u64 nt_curr_runnable_sum;
+ u64 nt_prev_runnable_sum;
+ u64 cur_irqload;
+ u64 avg_irqload;
+ u64 irqload_ts;
+ u64 cum_window_demand;
+#endif /* CONFIG_SCHED_WALT */
+
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
u64 prev_irq_time;
#endif
@@ -1738,6 +1759,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
+ set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
p->se.cfs_rq = tg->cfs_rq[cpu];
p->se.parent = tg->se[cpu];
#endif
@@ -2024,7 +2046,8 @@ struct sched_class {
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
- int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
+ int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags,
+ int subling_count_hint);
void (*migrate_task_rq)(struct task_struct *p);
void (*task_waking) (struct task_struct *task);
@@ -2057,8 +2080,11 @@ struct sched_class {
void (*update_curr) (struct rq *rq);
+#define TASK_SET_GROUP 0
+#define TASK_MOVE_GROUP 1
+
#ifdef CONFIG_FAIR_GROUP_SCHED
- void (*task_move_group) (struct task_struct *p);
+ void (*task_change_group)(struct task_struct *p, int type);
#endif
#ifdef CONFIG_SCHED_HMP
void (*inc_hmp_sched_stats)(struct rq *rq, struct task_struct *p);
@@ -2330,7 +2356,7 @@ static inline unsigned long capacity_orig_of(int cpu)
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int walt_ravg_window;
-extern unsigned int walt_disabled;
+extern bool walt_disabled;
/*
* cpu_util returns the amount of capacity of a CPU that is used by CFS
@@ -2406,6 +2432,10 @@ static inline bool sched_freq(void)
return static_key_false(&__sched_freq);
}
+/*
+ * sched_capacity_reqs expects capacity requests to be normalised.
+ * All capacities should sum to the range of 0-1024.
+ */
DECLARE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
void update_cpu_capacity_request(int cpu, bool request);
@@ -2875,6 +2905,17 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
#endif /* CONFIG_CPU_FREQ */
+#ifdef CONFIG_SCHED_WALT
+
+static inline bool
+walt_task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
+{
+ return cpu_of(rq) == task_cpu(p) &&
+ (p->on_rq || p->last_sleep_ts >= rq->window_start);
+}
+
+#endif /* CONFIG_SCHED_WALT */
+
#ifdef arch_scale_freq_capacity
#ifndef arch_scale_freq_invariant
#define arch_scale_freq_invariant() (true)
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 134da1cc8fce..3278c81cefb1 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -11,7 +11,8 @@
#ifdef CONFIG_SMP
static int
-select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
+select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags,
+ int sibling_count_hint)
{
return task_cpu(p); /* stop tasks as never migrate */
}
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 28e999554463..8d25ffbe4fed 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -20,7 +20,6 @@
*/
#include <linux/syscore_ops.h>
-#include <linux/cpufreq.h>
#include <trace/events/sched.h>
#include "sched.h"
#include "walt.h"
@@ -42,48 +41,17 @@ static __read_mostly unsigned int walt_io_is_busy = 0;
unsigned int sysctl_sched_walt_init_task_load_pct = 15;
-/* 1 -> use PELT based load stats, 0 -> use window-based load stats */
-unsigned int __read_mostly walt_disabled = 0;
+/* true -> use PELT based load stats, false -> use window-based load stats */
+bool __read_mostly walt_disabled = false;
-static unsigned int max_possible_efficiency = 1024;
-static unsigned int min_possible_efficiency = 1024;
-
-/*
- * Maximum possible frequency across all cpus. Task demand and cpu
- * capacity (cpu_power) metrics are scaled in reference to it.
- */
-static unsigned int max_possible_freq = 1;
-
-/*
- * Minimum possible max_freq across all cpus. This will be same as
- * max_possible_freq on homogeneous systems and could be different from
- * max_possible_freq on heterogenous systems. min_max_freq is used to derive
- * capacity (cpu_power) of cpus.
- */
-static unsigned int min_max_freq = 1;
-
-static unsigned int max_load_scale_factor = 1024;
-static unsigned int max_possible_capacity = 1024;
-
-/* Mask of all CPUs that have max_possible_capacity */
-static cpumask_t mpc_mask = CPU_MASK_ALL;
-
-/* Window size (in ns) */
-__read_mostly unsigned int walt_ravg_window = 20000000;
-
-/* Min window size (in ns) = 10ms */
-#ifdef CONFIG_HZ_300
/*
- * Tick interval becomes to 3333333 due to
- * rounding error when HZ=300.
+ * Window size (in ns). Adjust for the tick size so that the window
+ * rollover occurs just before the tick boundary.
*/
-#define MIN_SCHED_RAVG_WINDOW (3333333 * 6)
-#else
-#define MIN_SCHED_RAVG_WINDOW 10000000
-#endif
-
-/* Max window size (in ns) = 1s */
-#define MAX_SCHED_RAVG_WINDOW 1000000000
+__read_mostly unsigned int walt_ravg_window =
+ (20000000 / TICK_NSEC) * TICK_NSEC;
+#define MIN_SCHED_RAVG_WINDOW ((10000000 / TICK_NSEC) * TICK_NSEC)
+#define MAX_SCHED_RAVG_WINDOW ((1000000000 / TICK_NSEC) * TICK_NSEC)
static unsigned int sync_cpu;
static ktime_t ktime_last;
@@ -94,11 +62,28 @@ static unsigned int task_load(struct task_struct *p)
return p->ravg.demand;
}
+static inline void fixup_cum_window_demand(struct rq *rq, s64 delta)
+{
+ rq->cum_window_demand += delta;
+ if (unlikely((s64)rq->cum_window_demand < 0))
+ rq->cum_window_demand = 0;
+}
+
void
walt_inc_cumulative_runnable_avg(struct rq *rq,
struct task_struct *p)
{
rq->cumulative_runnable_avg += p->ravg.demand;
+
+ /*
+ * Add a task's contribution to the cumulative window demand when
+ *
+ * (1) task is enqueued with on_rq = 1 i.e migration,
+ * prio/cgroup/class change.
+ * (2) task is waking for the first time in this window.
+ */
+ if (p->on_rq || (p->last_sleep_ts < rq->window_start))
+ fixup_cum_window_demand(rq, p->ravg.demand);
}
void
@@ -107,6 +92,14 @@ walt_dec_cumulative_runnable_avg(struct rq *rq,
{
rq->cumulative_runnable_avg -= p->ravg.demand;
BUG_ON((s64)rq->cumulative_runnable_avg < 0);
+
+ /*
+ * on_rq will be 1 for sleeping tasks. So check if the task
+ * is migrating or dequeuing in RUNNING state to change the
+ * prio/cgroup/class.
+ */
+ if (task_on_rq_migrating(p) || p->state == TASK_RUNNING)
+ fixup_cum_window_demand(rq, -(s64)p->ravg.demand);
}
static void
@@ -119,6 +112,8 @@ fixup_cumulative_runnable_avg(struct rq *rq,
if ((s64)rq->cumulative_runnable_avg < 0)
panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
task_load_delta, task_load(p));
+
+ fixup_cum_window_demand(rq, task_load_delta);
}
u64 walt_ktime_clock(void)
@@ -177,10 +172,28 @@ static int exiting_task(struct task_struct *p)
static int __init set_walt_ravg_window(char *str)
{
+ unsigned int adj_window;
+ bool no_walt = walt_disabled;
+
get_option(&str, &walt_ravg_window);
- walt_disabled = (walt_ravg_window < MIN_SCHED_RAVG_WINDOW ||
- walt_ravg_window > MAX_SCHED_RAVG_WINDOW);
+ /* Adjust for CONFIG_HZ */
+ adj_window = (walt_ravg_window / TICK_NSEC) * TICK_NSEC;
+
+ /* Warn if we're a bit too far away from the expected window size */
+ WARN(adj_window < walt_ravg_window - NSEC_PER_MSEC,
+ "tick-adjusted window size %u, original was %u\n", adj_window,
+ walt_ravg_window);
+
+ walt_ravg_window = adj_window;
+
+ walt_disabled = walt_disabled ||
+ (walt_ravg_window < MIN_SCHED_RAVG_WINDOW ||
+ walt_ravg_window > MAX_SCHED_RAVG_WINDOW);
+
+ WARN(!no_walt && walt_disabled,
+ "invalid window size, disabling WALT\n");
+
return 0;
}
@@ -204,26 +217,20 @@ update_window_start(struct rq *rq, u64 wallclock)
nr_windows = div64_u64(delta, walt_ravg_window);
rq->window_start += (u64)nr_windows * (u64)walt_ravg_window;
+
+ rq->cum_window_demand = rq->cumulative_runnable_avg;
}
+/*
+ * Translate absolute delta time accounted on a CPU
+ * to a scale where 1024 is the capacity of the most
+ * capable CPU running at FMAX
+ */
static u64 scale_exec_time(u64 delta, struct rq *rq)
{
- unsigned int cur_freq = rq->cur_freq;
- int sf;
-
- if (unlikely(cur_freq > max_possible_freq))
- cur_freq = rq->max_possible_freq;
-
- /* round up div64 */
- delta = div64_u64(delta * cur_freq + max_possible_freq - 1,
- max_possible_freq);
+ unsigned long capcurr = capacity_curr_of(cpu_of(rq));
- sf = DIV_ROUND_UP(rq->efficiency * 1024, max_possible_efficiency);
-
- delta *= sf;
- delta >>= 10;
-
- return delta;
+ return (delta * capcurr) >> SCHED_CAPACITY_SHIFT;
}
static int cpu_is_waiting_on_io(struct rq *rq)
@@ -600,10 +607,20 @@ static void update_history(struct rq *rq, struct task_struct *p,
* A throttled deadline sched class task gets dequeued without
* changing p->on_rq. Since the dequeue decrements hmp stats
* avoid decrementing it here again.
+ *
+ * When window is rolled over, the cumulative window demand
+ * is reset to the cumulative runnable average (contribution from
+ * the tasks on the runqueue). If the current task is dequeued
+ * already, it's demand is not included in the cumulative runnable
+ * average. So add the task demand separately to cumulative window
+ * demand.
*/
- if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
- !p->dl.dl_throttled))
- fixup_cumulative_runnable_avg(rq, p, demand);
+ if (!task_has_dl_policy(p) || !p->dl.dl_throttled) {
+ if (task_on_rq_queued(p))
+ fixup_cumulative_runnable_avg(rq, p, demand);
+ else if (rq->curr == p)
+ fixup_cum_window_demand(rq, demand);
+ }
p->ravg.demand = demand;
@@ -746,33 +763,6 @@ done:
p->ravg.mark_start = wallclock;
}
-unsigned long __weak arch_get_cpu_efficiency(int cpu)
-{
- return SCHED_LOAD_SCALE;
-}
-
-void walt_init_cpu_efficiency(void)
-{
- int i, efficiency;
- unsigned int max = 0, min = UINT_MAX;
-
- for_each_possible_cpu(i) {
- efficiency = arch_get_cpu_efficiency(i);
- cpu_rq(i)->efficiency = efficiency;
-
- if (efficiency > max)
- max = efficiency;
- if (efficiency < min)
- min = efficiency;
- }
-
- if (max)
- max_possible_efficiency = max;
-
- if (min)
- min_possible_efficiency = min;
-}
-
static void reset_task_stats(struct task_struct *p)
{
u32 sum = 0;
@@ -851,6 +841,17 @@ void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0);
+ /*
+ * When a task is migrating during the wakeup, adjust
+ * the task's contribution towards cumulative window
+ * demand.
+ */
+ if (p->state == TASK_WAKING &&
+ p->last_sleep_ts >= src_rq->window_start) {
+ fixup_cum_window_demand(src_rq, -(s64)p->ravg.demand);
+ fixup_cum_window_demand(dest_rq, p->ravg.demand);
+ }
+
if (p->ravg.curr_window) {
src_rq->curr_runnable_sum -= p->ravg.curr_window;
dest_rq->curr_runnable_sum += p->ravg.curr_window;
@@ -877,242 +878,6 @@ void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
double_rq_unlock(src_rq, dest_rq);
}
-/*
- * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
- * least efficient cpu gets capacity of 1024
- */
-static unsigned long capacity_scale_cpu_efficiency(int cpu)
-{
- return (1024 * cpu_rq(cpu)->efficiency) / min_possible_efficiency;
-}
-
-/*
- * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
- * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
- */
-static unsigned long capacity_scale_cpu_freq(int cpu)
-{
- return (1024 * cpu_rq(cpu)->max_freq) / min_max_freq;
-}
-
-/*
- * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
- * that "most" efficient cpu gets a load_scale_factor of 1
- */
-static unsigned long load_scale_cpu_efficiency(int cpu)
-{
- return DIV_ROUND_UP(1024 * max_possible_efficiency,
- cpu_rq(cpu)->efficiency);
-}
-
-/*
- * Return load_scale_factor of a cpu in reference to cpu with best max_freq
- * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
- * of 1.
- */
-static unsigned long load_scale_cpu_freq(int cpu)
-{
- return DIV_ROUND_UP(1024 * max_possible_freq, cpu_rq(cpu)->max_freq);
-}
-
-static int compute_capacity(int cpu)
-{
- int capacity = 1024;
-
- capacity *= capacity_scale_cpu_efficiency(cpu);
- capacity >>= 10;
-
- capacity *= capacity_scale_cpu_freq(cpu);
- capacity >>= 10;
-
- return capacity;
-}
-
-static int compute_load_scale_factor(int cpu)
-{
- int load_scale = 1024;
-
- /*
- * load_scale_factor accounts for the fact that task load
- * is in reference to "best" performing cpu. Task's load will need to be
- * scaled (up) by a factor to determine suitability to be placed on a
- * (little) cpu.
- */
- load_scale *= load_scale_cpu_efficiency(cpu);
- load_scale >>= 10;
-
- load_scale *= load_scale_cpu_freq(cpu);
- load_scale >>= 10;
-
- return load_scale;
-}
-
-static int cpufreq_notifier_policy(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
- int i, update_max = 0;
- u64 highest_mpc = 0, highest_mplsf = 0;
- const struct cpumask *cpus = policy->related_cpus;
- unsigned int orig_min_max_freq = min_max_freq;
- unsigned int orig_max_possible_freq = max_possible_freq;
- /* Initialized to policy->max in case policy->related_cpus is empty! */
- unsigned int orig_max_freq = policy->max;
-
- if (val != CPUFREQ_NOTIFY)
- return 0;
-
- for_each_cpu(i, policy->related_cpus) {
- cpumask_copy(&cpu_rq(i)->freq_domain_cpumask,
- policy->related_cpus);
- orig_max_freq = cpu_rq(i)->max_freq;
- cpu_rq(i)->min_freq = policy->min;
- cpu_rq(i)->max_freq = policy->max;
- cpu_rq(i)->cur_freq = policy->cur;
- cpu_rq(i)->max_possible_freq = policy->cpuinfo.max_freq;
- }
-
- max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
- if (min_max_freq == 1)
- min_max_freq = UINT_MAX;
- min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
- BUG_ON(!min_max_freq);
- BUG_ON(!policy->max);
-
- /* Changes to policy other than max_freq don't require any updates */
- if (orig_max_freq == policy->max)
- return 0;
-
- /*
- * A changed min_max_freq or max_possible_freq (possible during bootup)
- * needs to trigger re-computation of load_scale_factor and capacity for
- * all possible cpus (even those offline). It also needs to trigger
- * re-computation of nr_big_task count on all online cpus.
- *
- * A changed rq->max_freq otoh needs to trigger re-computation of
- * load_scale_factor and capacity for just the cluster of cpus involved.
- * Since small task definition depends on max_load_scale_factor, a
- * changed load_scale_factor of one cluster could influence
- * classification of tasks in another cluster. Hence a changed
- * rq->max_freq will need to trigger re-computation of nr_big_task
- * count on all online cpus.
- *
- * While it should be sufficient for nr_big_tasks to be
- * re-computed for only online cpus, we have inadequate context
- * information here (in policy notifier) with regard to hotplug-safety
- * context in which notification is issued. As a result, we can't use
- * get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
- * fixed up to issue notification always in hotplug-safe context,
- * re-compute nr_big_task for all possible cpus.
- */
-
- if (orig_min_max_freq != min_max_freq ||
- orig_max_possible_freq != max_possible_freq) {
- cpus = cpu_possible_mask;
- update_max = 1;
- }
-
- /*
- * Changed load_scale_factor can trigger reclassification of tasks as
- * big or small. Make this change "atomic" so that tasks are accounted
- * properly due to changed load_scale_factor
- */
- for_each_cpu(i, cpus) {
- struct rq *rq = cpu_rq(i);
-
- rq->capacity = compute_capacity(i);
- rq->load_scale_factor = compute_load_scale_factor(i);
-
- if (update_max) {
- u64 mpc, mplsf;
-
- mpc = div_u64(((u64) rq->capacity) *
- rq->max_possible_freq, rq->max_freq);
- rq->max_possible_capacity = (int) mpc;
-
- mplsf = div_u64(((u64) rq->load_scale_factor) *
- rq->max_possible_freq, rq->max_freq);
-
- if (mpc > highest_mpc) {
- highest_mpc = mpc;
- cpumask_clear(&mpc_mask);
- cpumask_set_cpu(i, &mpc_mask);
- } else if (mpc == highest_mpc) {
- cpumask_set_cpu(i, &mpc_mask);
- }
-
- if (mplsf > highest_mplsf)
- highest_mplsf = mplsf;
- }
- }
-
- if (update_max) {
- max_possible_capacity = highest_mpc;
- max_load_scale_factor = highest_mplsf;
- }
-
- return 0;
-}
-
-static int cpufreq_notifier_trans(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
- unsigned int cpu = freq->cpu, new_freq = freq->new;
- unsigned long flags;
- int i;
-
- if (val != CPUFREQ_POSTCHANGE)
- return 0;
-
- BUG_ON(!new_freq);
-
- if (cpu_rq(cpu)->cur_freq == new_freq)
- return 0;
-
- for_each_cpu(i, &cpu_rq(cpu)->freq_domain_cpumask) {
- struct rq *rq = cpu_rq(i);
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
- walt_ktime_clock(), 0);
- rq->cur_freq = new_freq;
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- }
-
- return 0;
-}
-
-static struct notifier_block notifier_policy_block = {
- .notifier_call = cpufreq_notifier_policy
-};
-
-static struct notifier_block notifier_trans_block = {
- .notifier_call = cpufreq_notifier_trans
-};
-
-static int register_sched_callback(void)
-{
- int ret;
-
- ret = cpufreq_register_notifier(&notifier_policy_block,
- CPUFREQ_POLICY_NOTIFIER);
-
- if (!ret)
- ret = cpufreq_register_notifier(&notifier_trans_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
- return 0;
-}
-
-/*
- * cpufreq callbacks can be registered at core_initcall or later time.
- * Any registration done prior to that is "forgotten" by cpufreq. See
- * initialization of variable init_cpufreq_transition_notifier_list_called
- * for further information.
- */
-core_initcall(register_sched_callback);
-
void walt_init_new_task_load(struct task_struct *p)
{
int i;
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index f56c4da16d0b..de7edac43674 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -59,6 +59,6 @@ static inline u64 walt_ktime_clock(void) { return 0; }
#endif /* CONFIG_SCHED_WALT */
-extern unsigned int walt_disabled;
+extern bool walt_disabled;
#endif
diff --git a/lib/digsig.c b/lib/digsig.c
index 07be6c1ef4e2..00c5c8179393 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -87,6 +87,12 @@ static int digsig_verify_rsa(struct key *key,
down_read(&key->sem);
ukp = user_key_payload(key);
+ if (!ukp) {
+ /* key was revoked before we acquired its semaphore */
+ err = -EKEYREVOKED;
+ goto err1;
+ }
+
if (ukp->datalen < sizeof(*pkh))
goto err1;
diff --git a/mm/memblock.c b/mm/memblock.c
index 351a4840a407..241225579f3a 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1605,11 +1605,12 @@ int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size
memblock.memory.regions[idx].size) >= end;
}
-int __init_memblock memblock_overlaps_memory(phys_addr_t base, phys_addr_t size)
+bool __init_memblock memblock_overlaps_memory(phys_addr_t base,
+ phys_addr_t size)
{
memblock_cap_size(base, &size);
- return memblock_overlaps_region(&memblock.memory, base, size) >= 0;
+ return memblock_overlaps_region(&memblock.memory, base, size);
}
/**
diff --git a/mm/slab_common.c b/mm/slab_common.c
index bec2fce9fafc..01e7246de8df 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -250,7 +250,7 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
{
struct kmem_cache *s;
- if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
+ if (slab_nomerge)
return NULL;
if (ctor)
@@ -261,6 +261,9 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
size = ALIGN(size, align);
flags = kmem_cache_flags(size, flags, name, NULL);
+ if (flags & SLAB_NEVER_MERGE)
+ return NULL;
+
list_for_each_entry_reverse(s, &slab_caches, list) {
if (slab_unmergeable(s))
continue;
diff --git a/net/core/sock.c b/net/core/sock.c
index 39e9ab7c598e..acc60ec11630 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1528,6 +1528,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
sock_copy(newsk, sk);
+ newsk->sk_prot_creator = sk->sk_prot;
+
/* SANITY */
if (likely(newsk->sk_net_refcnt))
get_net(sock_net(newsk));
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index c79b85eb4d4c..6abc5012200b 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -224,7 +224,7 @@ static int dns_resolver_match_preparse(struct key_match_data *match_data)
static void dns_resolver_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
- if (key_is_instantiated(key)) {
+ if (key_is_positive(key)) {
int err = PTR_ERR(key->payload.data[dns_key_error]);
if (err)
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 65036891e080..a03f834f16d5 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -156,6 +156,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_parm *parms = &tunnel->parms;
struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev; /* Device to other host */
+ int pkt_len = skb->len;
int err;
if (!dst) {
@@ -199,7 +200,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
err = dst_output(tunnel->net, skb->sk, skb);
if (net_xmit_eval(err) == 0)
- err = skb->len;
+ err = pkt_len;
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return NETDEV_TX_OK;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index ab0efaca4a78..6150a038711b 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1177,24 +1177,25 @@ static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
}
static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type,
- const void *daddr, const void *saddr, unsigned int len)
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
{
struct ip6_tnl *t = netdev_priv(dev);
- struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
- __be16 *p = (__be16 *)(ipv6h+1);
+ struct ipv6hdr *ipv6h;
+ __be16 *p;
- ip6_flow_hdr(ipv6h, 0,
- ip6_make_flowlabel(dev_net(dev), skb,
- t->fl.u.ip6.flowlabel, true,
- &t->fl.u.ip6));
+ ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen + sizeof(*ipv6h));
+ ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
+ t->fl.u.ip6.flowlabel,
+ true, &t->fl.u.ip6));
ipv6h->hop_limit = t->parms.hop_limit;
ipv6h->nexthdr = NEXTHDR_GRE;
ipv6h->saddr = t->parms.laddr;
ipv6h->daddr = t->parms.raddr;
- p[0] = t->parms.o_flags;
- p[1] = htons(type);
+ p = (__be16 *)(ipv6h + 1);
+ p[0] = t->parms.o_flags;
+ p[1] = htons(type);
/*
* Set the source hardware address.
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 5b7433887eda..f615f982961a 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -434,6 +434,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev;
struct xfrm_state *x;
+ int pkt_len = skb->len;
int err = -1;
int mtu;
@@ -487,7 +488,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
- tstats->tx_bytes += skb->len;
+ tstats->tx_bytes += pkt_len;
tstats->tx_packets++;
u64_stats_update_end(&tstats->syncp);
} else {
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index d3dec414fd44..d48281ca9c72 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1321,6 +1321,9 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
struct sock *sk = NULL;
tunnel = container_of(work, struct l2tp_tunnel, del_work);
+
+ l2tp_tunnel_closeall(tunnel);
+
sk = l2tp_tunnel_sock_lookup(tunnel);
if (!sk)
goto out;
@@ -1640,15 +1643,12 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
/* This function is used by the netlink TUNNEL_DELETE command.
*/
-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
{
- l2tp_tunnel_inc_refcount(tunnel);
- l2tp_tunnel_closeall(tunnel);
- if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
- l2tp_tunnel_dec_refcount(tunnel);
- return 1;
+ if (!test_and_set_bit(0, &tunnel->dead)) {
+ l2tp_tunnel_inc_refcount(tunnel);
+ queue_work(l2tp_wq, &tunnel->del_work);
}
- return 0;
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 555d962a62d2..9cf546846edb 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -169,6 +169,9 @@ struct l2tp_tunnel_cfg {
struct l2tp_tunnel {
int magic; /* Should be L2TP_TUNNEL_MAGIC */
+
+ unsigned long dead;
+
struct rcu_head rcu;
rwlock_t hlist_lock; /* protect session_hlist */
struct hlist_head session_hlist[L2TP_HASH_SIZE];
@@ -253,7 +256,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
struct l2tp_tunnel **tunnelp);
void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
struct l2tp_session *l2tp_session_create(int priv_size,
struct l2tp_tunnel *tunnel,
u32 session_id, u32 peer_session_id,
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index d2075804cbff..a450be6812b8 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -662,7 +662,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
}
/* No need to do anything if the driver does all */
- if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
+ if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim)
return;
if (sta->dead)
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index acf5c7b3f378..7f16d19d6198 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -395,7 +395,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
struct net *net = nf_ct_exp_net(expect);
struct hlist_node *next;
unsigned int h;
- int ret = 1;
+ int ret = 0;
if (!master_help) {
ret = -ESHUTDOWN;
@@ -445,7 +445,7 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
spin_lock_bh(&nf_conntrack_expect_lock);
ret = __nf_ct_expect_check(expect);
- if (ret <= 0)
+ if (ret < 0)
goto out;
ret = nf_ct_expect_insert(expect);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index b70055fc30cb..241f69039a72 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1652,10 +1652,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
mutex_lock(&fanout_mutex);
- err = -EINVAL;
- if (!po->running)
- goto out;
-
err = -EALREADY;
if (po->fanout)
goto out;
@@ -1704,7 +1700,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
list_add(&match->list, &fanout_list);
}
err = -EINVAL;
- if (match->type == type &&
+
+ spin_lock(&po->bind_lock);
+ if (po->running &&
+ match->type == type &&
match->prot_hook.type == po->prot_hook.type &&
match->prot_hook.dev == po->prot_hook.dev) {
err = -ENOSPC;
@@ -1716,6 +1715,13 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
err = 0;
}
}
+ spin_unlock(&po->bind_lock);
+
+ if (err && !atomic_read(&match->sk_ref)) {
+ list_del(&match->list);
+ kfree(match);
+ }
+
out:
if (err && rollover) {
kfree(rollover);
@@ -2650,6 +2656,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
int vnet_hdr_len;
struct packet_sock *po = pkt_sk(sk);
unsigned short gso_type = 0;
+ bool has_vnet_hdr = false;
int hlen, tlen, linear;
int extra_len = 0;
ssize_t n;
@@ -2737,6 +2744,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
goto out_unlock;
}
+ has_vnet_hdr = true;
}
if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
@@ -2796,7 +2804,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
packet_pick_tx_queue(dev, skb);
- if (po->has_vnet_hdr) {
+ if (has_vnet_hdr) {
if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start);
u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset);
@@ -2938,13 +2946,15 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
int ret = 0;
bool unlisted = false;
- if (po->fanout)
- return -EINVAL;
-
lock_sock(sk);
spin_lock(&po->bind_lock);
rcu_read_lock();
+ if (po->fanout) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
if (name) {
dev = dev_get_by_name_rcu(sock_net(sk), name);
if (!dev) {
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 8740930f0787..67bddcb2ff46 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -541,7 +541,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
return false;
if (msg_errcode(msg))
return false;
- *err = -TIPC_ERR_NO_NAME;
+ *err = TIPC_ERR_NO_NAME;
if (skb_linearize(skb))
return false;
msg = buf_msg(skb);
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index 907c1522ee46..08c4cc5c2973 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -138,7 +138,7 @@ void big_key_revoke(struct key *key)
/* clear the quota */
key_payload_reserve(key, 0);
- if (key_is_instantiated(key) &&
+ if (key_is_positive(key) &&
(size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
vfs_truncate(path, 0);
}
@@ -170,7 +170,7 @@ void big_key_describe(const struct key *key, struct seq_file *m)
seq_puts(m, key->description);
- if (key_is_instantiated(key))
+ if (key_is_positive(key))
seq_printf(m, ": %zu [%s]",
datalen,
datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 31898856682e..ce295c0c1da0 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -315,6 +315,13 @@ static struct key *request_user_key(const char *master_desc, const u8 **master_k
down_read(&ukey->sem);
upayload = user_key_payload(ukey);
+ if (!upayload) {
+ /* key was revoked before we acquired its semaphore */
+ up_read(&ukey->sem);
+ key_put(ukey);
+ ukey = ERR_PTR(-EKEYREVOKED);
+ goto error;
+ }
*master_key = upayload->data;
*master_keylen = upayload->datalen;
error:
@@ -845,7 +852,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
size_t datalen = prep->datalen;
int ret = 0;
- if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+ if (key_is_negative(key))
return -ENOKEY;
if (datalen <= 0 || datalen > 32767 || !prep->data)
return -EINVAL;
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 9cb4fe4478a1..1659094d684d 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -129,15 +129,15 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
while (!list_empty(keys)) {
struct key *key =
list_entry(keys->next, struct key, graveyard_link);
+ short state = key->state;
+
list_del(&key->graveyard_link);
kdebug("- %u", key->serial);
key_check(key);
/* Throw away the key data if the key is instantiated */
- if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
- !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
- key->type->destroy)
+ if (state == KEY_IS_POSITIVE && key->type->destroy)
key->type->destroy(key);
security_key_free(key);
@@ -151,7 +151,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
}
atomic_dec(&key->user->nkeys);
- if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+ if (state != KEY_IS_UNINSTANTIATED)
atomic_dec(&key->user->nikeys);
key_user_put(key->user);
diff --git a/security/keys/key.c b/security/keys/key.c
index 51d23c623424..4d971bf88ac3 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -396,6 +396,18 @@ int key_payload_reserve(struct key *key, size_t datalen)
EXPORT_SYMBOL(key_payload_reserve);
/*
+ * Change the key state to being instantiated.
+ */
+static void mark_key_instantiated(struct key *key, int reject_error)
+{
+ /* Commit the payload before setting the state; barrier versus
+ * key_read_state().
+ */
+ smp_store_release(&key->state,
+ (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
+}
+
+/*
* Instantiate a key and link it into the target keyring atomically. Must be
* called with the target keyring's semaphore writelocked. The target key's
* semaphore need not be locked as instantiation is serialised by
@@ -418,14 +430,14 @@ static int __key_instantiate_and_link(struct key *key,
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
- if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+ if (key->state == KEY_IS_UNINSTANTIATED) {
/* instantiate the key */
ret = key->type->instantiate(key, prep);
if (ret == 0) {
/* mark the key as being instantiated */
atomic_inc(&key->user->nikeys);
- set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
+ mark_key_instantiated(key, 0);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
@@ -553,13 +565,10 @@ int key_reject_and_link(struct key *key,
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
- if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+ if (key->state == KEY_IS_UNINSTANTIATED) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
- key->reject_error = -error;
- smp_wmb();
- set_bit(KEY_FLAG_NEGATIVE, &key->flags);
- set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
+ mark_key_instantiated(key, -error);
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
@@ -731,8 +740,8 @@ static inline key_ref_t __key_update(key_ref_t key_ref,
ret = key->type->update(key, prep);
if (ret == 0)
- /* updating a negative key instantiates it */
- clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
+ /* Updating a negative key positively instantiates it */
+ mark_key_instantiated(key, 0);
up_write(&key->sem);
@@ -907,6 +916,16 @@ error:
*/
__key_link_end(keyring, &index_key, edit);
+ key = key_ref_to_ptr(key_ref);
+ if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
+ ret = wait_for_key_construction(key, true);
+ if (ret < 0) {
+ key_ref_put(key_ref);
+ key_ref = ERR_PTR(ret);
+ goto error_free_prep;
+ }
+ }
+
key_ref = __key_update(key_ref, &prep);
goto error_free_prep;
}
@@ -957,8 +976,8 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
ret = key->type->update(key, &prep);
if (ret == 0)
- /* updating a negative key instantiates it */
- clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
+ /* Updating a negative key positively instantiates it */
+ mark_key_instantiated(key, 0);
up_write(&key->sem);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index a009dc66eb8f..2e741e1a8712 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -738,10 +738,9 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
key = key_ref_to_ptr(key_ref);
- if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
- ret = -ENOKEY;
- goto error2;
- }
+ ret = key_read_state(key);
+ if (ret < 0)
+ goto error2; /* Negatively instantiated */
/* see if we can read it directly */
ret = key_permission(key_ref, KEY_NEED_READ);
@@ -873,7 +872,7 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
atomic_dec(&key->user->nkeys);
atomic_inc(&newowner->nkeys);
- if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+ if (key->state != KEY_IS_UNINSTANTIATED) {
atomic_dec(&key->user->nikeys);
atomic_inc(&newowner->nikeys);
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 0c8dd4fbe130..ef828238cdc0 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -407,7 +407,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
else
seq_puts(m, "[anon]");
- if (key_is_instantiated(keyring)) {
+ if (key_is_positive(keyring)) {
if (keyring->keys.nr_leaves_on_tree != 0)
seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
else
@@ -522,7 +522,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
{
struct keyring_search_context *ctx = iterator_data;
const struct key *key = keyring_ptr_to_key(object);
- unsigned long kflags = key->flags;
+ unsigned long kflags = READ_ONCE(key->flags);
+ short state = READ_ONCE(key->state);
kenter("{%d}", key->serial);
@@ -566,9 +567,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
/* we set a different error code if we pass a negative key */
- if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
- smp_rmb();
- ctx->result = ERR_PTR(key->reject_error);
+ if (state < 0) {
+ ctx->result = ERR_PTR(state);
kleave(" = %d [neg]", ctx->skipped_ret);
goto skipped;
}
diff --git a/security/keys/proc.c b/security/keys/proc.c
index b9f531c9e4fa..036128682463 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -182,6 +182,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
unsigned long timo;
key_ref_t key_ref, skey_ref;
char xbuf[16];
+ short state;
int rc;
struct keyring_search_context ctx = {
@@ -240,17 +241,19 @@ static int proc_keys_show(struct seq_file *m, void *v)
sprintf(xbuf, "%luw", timo / (60*60*24*7));
}
+ state = key_read_state(key);
+
#define showflag(KEY, LETTER, FLAG) \
(test_bit(FLAG, &(KEY)->flags) ? LETTER : '-')
seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
key->serial,
- showflag(key, 'I', KEY_FLAG_INSTANTIATED),
+ state != KEY_IS_UNINSTANTIATED ? 'I' : '-',
showflag(key, 'R', KEY_FLAG_REVOKED),
showflag(key, 'D', KEY_FLAG_DEAD),
showflag(key, 'Q', KEY_FLAG_IN_QUOTA),
showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT),
- showflag(key, 'N', KEY_FLAG_NEGATIVE),
+ state < 0 ? 'N' : '-',
showflag(key, 'i', KEY_FLAG_INVALIDATED),
atomic_read(&key->usage),
xbuf,
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 7dd050f24261..ac1d5b2b1626 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -727,7 +727,7 @@ try_again:
ret = -EIO;
if (!(lflags & KEY_LOOKUP_PARTIAL) &&
- !test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+ key_read_state(key) == KEY_IS_UNINSTANTIATED)
goto invalid_key;
/* check the permissions */
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index c7a117c9a8f3..2ce733342b5a 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -594,10 +594,9 @@ int wait_for_key_construction(struct key *key, bool intr)
intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (ret)
return -ERESTARTSYS;
- if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
- smp_rmb();
- return key->reject_error;
- }
+ ret = key_read_state(key);
+ if (ret < 0)
+ return ret;
return key_validate(key);
}
EXPORT_SYMBOL(wait_for_key_construction);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 4f0f112fe276..217775fcd0f3 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -73,7 +73,7 @@ static void request_key_auth_describe(const struct key *key,
seq_puts(m, "key:");
seq_puts(m, key->description);
- if (key_is_instantiated(key))
+ if (key_is_positive(key))
seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
}
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 16dec53184b6..509aedcf8310 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1014,7 +1014,7 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
char *datablob;
int ret = 0;
- if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+ if (key_is_negative(key))
return -ENOKEY;
p = key->payload.data[0];
if (!p->migratable)
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 8705d79b2c6f..eba8a516ee9e 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -120,7 +120,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
if (ret == 0) {
/* attach the new data, displacing the old */
- if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+ if (key_is_positive(key))
zap = key->payload.data[0];
else
zap = NULL;
@@ -174,7 +174,7 @@ EXPORT_SYMBOL_GPL(user_destroy);
void user_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
- if (key_is_instantiated(key))
+ if (key_is_positive(key))
seq_printf(m, ": %u", key->datalen);
}
diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
index 12ba83367b1b..ba5752ee9af3 100644
--- a/sound/core/seq/seq_lock.c
+++ b/sound/core/seq/seq_lock.c
@@ -23,8 +23,6 @@
#include <sound/core.h>
#include "seq_lock.h"
-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
-
/* wait until all locks are released */
void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
{
@@ -42,5 +40,3 @@ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
}
EXPORT_SYMBOL(snd_use_lock_sync_helper);
-
-#endif
diff --git a/sound/core/seq/seq_lock.h b/sound/core/seq/seq_lock.h
index 54044bc2c9ef..ac38031c370e 100644
--- a/sound/core/seq/seq_lock.h
+++ b/sound/core/seq/seq_lock.h
@@ -3,8 +3,6 @@
#include <linux/sched.h>
-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
-
typedef atomic_t snd_use_lock_t;
/* initialize lock */
@@ -20,14 +18,4 @@ typedef atomic_t snd_use_lock_t;
void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line);
#define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__)
-#else /* SMP || CONFIG_SND_DEBUG */
-
-typedef spinlock_t snd_use_lock_t; /* dummy */
-#define snd_use_lock_init(lockp) /**/
-#define snd_use_lock_use(lockp) /**/
-#define snd_use_lock_free(lockp) /**/
-#define snd_use_lock_sync(lockp) /**/
-
-#endif /* SMP || CONFIG_SND_DEBUG */
-
#endif /* __SND_SEQ_LOCK_H */
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 83741887faa1..3324f98c35f6 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1755,7 +1755,7 @@ static int get_kctl_0dB_offset(struct hda_codec *codec,
return -1;
if (*step_to_check && *step_to_check != step) {
codec_err(codec, "Mismatching dB step for vmaster slave (%d!=%d)\n",
-- *step_to_check, step);
+ *step_to_check, step);
return -1;
}
*step_to_check = step;
diff --git a/sound/soc/msm/apq8096-auto.c b/sound/soc/msm/apq8096-auto.c
index f1af708f9a50..1fbdc7049d13 100644
--- a/sound/soc/msm/apq8096-auto.c
+++ b/sound/soc/msm/apq8096-auto.c
@@ -53,9 +53,11 @@ static int msm_hdmi_rx_ch = 2;
static int msm_proxy_rx_ch = 2;
static int hdmi_rx_sample_rate = SAMPLING_RATE_48KHZ;
static int msm_sec_mi2s_tx_ch = 2;
+static int msm_sec_mi2s_rx_ch = 2;
static int msm_tert_mi2s_tx_ch = 2;
static int msm_quat_mi2s_rx_ch = 2;
static int msm_sec_mi2s_tx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_mi2s_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_tert_mi2s_tx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_quat_mi2s_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_sec_mi2s_rate = SAMPLING_RATE_48KHZ;
@@ -775,6 +777,41 @@ static int msm_tert_mi2s_tx_bit_format_put(struct snd_kcontrol *kcontrol,
return 0;
}
+static int msm_sec_mi2s_rx_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_mi2s_rx_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_mi2s_rx_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_mi2s_rx_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_mi2s_rx_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_mi2s_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_mi2s_rx_bit_format = %d\n",
+ __func__, msm_sec_mi2s_rx_bit_format);
+ return 0;
+}
+
+
static int msm_sec_mi2s_tx_bit_format_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -2897,16 +2934,38 @@ static int msm_hdmi_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
static int msm_mi2s_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
- pr_debug("%s: channel:%d\n", __func__, msm_quat_mi2s_rx_ch);
rate->min = rate->max = SAMPLING_RATE_48KHZ;
- channels->min = channels->max = msm_quat_mi2s_rx_ch;
- param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
- msm_quat_mi2s_rx_bit_format);
+
+ switch (cpu_dai->id) {
+ case 1: /*MSM_SEC_MI2S*/
+ pr_debug("%s: channel:%d\n", __func__, msm_sec_mi2s_rx_ch);
+ rate->min = rate->max = msm_sec_mi2s_rate;
+ channels->min = channels->max = msm_sec_mi2s_rx_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_mi2s_rx_bit_format);
+ break;
+ case 3: /*MSM_QUAT_MI2S*/
+ pr_debug("%s: channel:%d\n", __func__, msm_quat_mi2s_rx_ch);
+ rate->min = rate->max = SAMPLING_RATE_48KHZ;
+ channels->min = channels->max = msm_quat_mi2s_rx_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_quat_mi2s_rx_bit_format);
+ break;
+ default:
+ pr_err("%s: dai id 0x%x not supported\n",
+ __func__, cpu_dai->id);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: dai id = 0x%x channels = %d rate = %d format = 0x%x\n",
+ __func__, cpu_dai->id, channels->max, rate->max,
+ params_format(params));
return 0;
}
@@ -3835,6 +3894,9 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
SOC_ENUM_EXT("TERT_MI2S_TX Bit Format", msm_snd_enum[7],
msm_tert_mi2s_tx_bit_format_get,
msm_tert_mi2s_tx_bit_format_put),
+ SOC_ENUM_EXT("SEC_MI2S_RX Bit Format", msm_snd_enum[7],
+ msm_sec_mi2s_rx_bit_format_get,
+ msm_sec_mi2s_rx_bit_format_put),
SOC_ENUM_EXT("SEC_MI2S_TX Bit Format", msm_snd_enum[7],
msm_sec_mi2s_tx_bit_format_get,
msm_sec_mi2s_tx_bit_format_put),
@@ -5541,6 +5603,21 @@ static struct snd_soc_dai_link apq8096_common_be_dai_links[] = {
static struct snd_soc_dai_link apq8096_auto_be_dai_links[] = {
/* Backend DAI Links */
+ {
+ .name = LPASS_BE_SEC_MI2S_RX,
+ .stream_name = "Secondary MI2S Playback",
+ .cpu_dai_name = "msm-dai-q6-mi2s.1",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+ .be_hw_params_fixup = msm_mi2s_rx_be_hw_params_fixup,
+ .ops = &apq8096_mi2s_be_ops,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ },
{
.name = LPASS_BE_SEC_MI2S_TX,
.stream_name = "Secondary MI2S Capture",
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 1cc20d138dae..9c5368e7ee23 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1305,6 +1305,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
+ case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
if (fp->altsetting == 2)
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;