diff options
183 files changed, 3288 insertions, 1439 deletions
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt index ed784f7f58a5..c9f099b49e7d 100644 --- a/Documentation/devicetree/bindings/platform/msm/ipa.txt +++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt @@ -59,6 +59,8 @@ memory allocation over a PCIe bridge a pipe reset via the IPA uC is required - qcom,ipa-wdi2: Boolean context flag to indicate whether using wdi-2.0 or not +- qcom,apps-shutdown-support: Boolean context flag to indicate whether + apps shutdown support is there or not. - qcom,use-64-bit-dma-mask: Boolean context flag to indicate whether using 64bit dma mask or not - qcom,use-dma-zone: Boolean context flag to indicate whether memory diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt index 9b40d44d363b..3793be6511b7 100644 --- a/Documentation/devicetree/bindings/usb/msm-phy.txt +++ b/Documentation/devicetree/bindings/usb/msm-phy.txt @@ -95,8 +95,6 @@ Required properties: - reg: Address and length of the register set for the device Required regs are: "qmp_phy_base" : QMP PHY Base register set. - - "vls_clamp_reg" : top-level CSR register to be written to enable phy vls - clamp which allows phy to detect autonomous mode. - <supply-name>-supply: phandle to the regulator device tree node Required "supply-name" examples are: "vdd" : vdd supply for SSPHY digital circuit operation @@ -125,6 +123,8 @@ Required properties: Optional properties: - reg: Additional register set of address and length to control QMP PHY are: + "vls_clamp_reg" : top-level CSR register to be written to enable phy vls + clamp which allows phy to detect autonomous mode. "tcsr_usb3_dp_phymode" : top-level CSR register to be written to select super speed usb qmp phy. - clocks: a list of phandles to the PHY clocks. Use as per diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index e47f2c950855..0ea9ef13f758 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -657,7 +657,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. clearcpuid=BITNUM [X86] Disable CPUID feature X for the kernel. See - arch/x86/include/asm/cpufeature.h for the valid bit + arch/x86/include/asm/cpufeatures.h for the valid bit numbers. Note the Linux specific bits are not necessarily stable over kernel options, but the vendor specific ones should be. @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 140 +SUBLEVEL = 141 EXTRAVERSION = NAME = Blurry Fish Butt diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi index 4842d3b205e6..12af84e66330 100644 --- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi @@ -1416,6 +1416,7 @@ &usb2s { status = "ok"; qcom,no-wakeup-src-in-hostmode; + qcom,disable-host-mode-pm; }; &usb3 { @@ -1424,6 +1425,7 @@ vdda33-supply = <&pm8994_l24>; vdda18-supply = <&pm8994_l12>; qcom,no-wakeup-src-in-hostmode; + qcom,disable-host-mode-pm; }; &blsp1_uart2 { diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi index 86e9dd72dcad..b69f7ab7e32c 100644 --- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi @@ -1246,6 +1246,7 @@ status = "ok"; vbus_dwc3-supply = <&usb2_otg_switch>; qcom,no-wakeup-src-in-hostmode; + qcom,disable-host-mode-pm; dwc3@7600000 { dr_mode = "host"; }; @@ -1257,6 +1258,7 @@ vdda33-supply = <&pm8994_l24>; vdda18-supply = <&pm8994_l12>; qcom,no-wakeup-src-in-hostmode; + qcom,disable-host-mode-pm; }; &blsp1_uart2 { diff --git a/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi b/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi index 3c72fff11ca5..3ecf23e84294 100644 --- a/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi @@ -232,7 +232,7 @@ reg = <0>; spi-max-frequency = <9600000>; interrupt-parent = <&tlmm>; - interrupts = <78 0>; + interrupts = <78 2>; qcom,reset-gpio = <&tlmm 71 GPIO_ACTIVE_LOW>; qcom,clk-freq-mhz = <20000000>; qcom,max-can-channels = <2>; diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-modem.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-modem.dtsi index 9fb33950541c..2cc02a5aea1a 100644 --- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-modem.dtsi +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-modem.dtsi @@ -72,6 +72,7 @@ qcom,ee = <0>; qcom,use-ipa-tethering-bridge; qcom,ipa-bam-remote-mode; + qcom,apps-shutdown-support; qcom,modem-cfg-emb-pipe-flt; clocks = <&clock_virt clk_ipa_clk>; clock-names = "core_clk"; diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-usb.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-usb.dtsi index b7505743986e..faaf00b6ac25 100644 --- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-usb.dtsi +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-usb.dtsi @@ -88,6 +88,7 @@ resets = <&clock_virt USB_30_BCR>; reset-names = "core_reset"; + qcom,disable-host-mode-pm; dwc3@6a00000 { compatible = "snps,dwc3"; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a025138b0992..b6f3d353e1f9 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -77,6 +77,7 @@ config ARM64 select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_GENERIC_DMA_COHERENT + select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_IRQ_TIME_ACCOUNTING select HAVE_MEMBLOCK select HAVE_PATA_PLATFORM @@ -260,6 +261,15 @@ config PGTABLE_LEVELS default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47 default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48 +config MSM_GVM + bool "Enable virtualization Support for MSM kernel" + help + This enables support for MSM Kernel based virtual + machine for any platform. + This helps to enable virtual driver support. + This should work on 64bit machine. + If you don't know what to do here, say N. + config MSM_GVM_QUIN bool "Enable virtualization Support for MSM kernel required for QUIN platform" help diff --git a/arch/arm64/configs/msm-auto-gvm-perf_defconfig b/arch/arm64/configs/msm-auto-gvm-perf_defconfig index d93941991c01..dda4a86c89f4 100644 --- a/arch/arm64/configs/msm-auto-gvm-perf_defconfig +++ b/arch/arm64/configs/msm-auto-gvm-perf_defconfig @@ -1,3 +1,4 @@ +CONFIG_MSM_GVM=y CONFIG_LOCALVERSION="-perf" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y @@ -267,7 +268,6 @@ CONFIG_MEDIA_CONTROLLER=y CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_V4L_PLATFORM_DRIVERS=y CONFIG_DRM=y -# CONFIG_DRM_MSM is not set CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_SOC=y diff --git a/arch/arm64/configs/msm-auto-gvm_defconfig b/arch/arm64/configs/msm-auto-gvm_defconfig index 30a163c9743b..640083b94a22 100644 --- a/arch/arm64/configs/msm-auto-gvm_defconfig +++ b/arch/arm64/configs/msm-auto-gvm_defconfig @@ -1,3 +1,4 @@ +CONFIG_MSM_GVM=y CONFIG_SYSVIPC=y CONFIG_AUDIT=y CONFIG_NO_HZ=y @@ -259,7 +260,6 @@ CONFIG_MEDIA_CONTROLLER=y CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_V4L_PLATFORM_DRIVERS=y CONFIG_DRM=y -# CONFIG_DRM_MSM is not set CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_SOC=y diff --git a/arch/arm64/configs/msm-auto-perf_defconfig b/arch/arm64/configs/msm-auto-perf_defconfig index 81e18d05b0d0..fcebb93d2b30 100644 --- a/arch/arm64/configs/msm-auto-perf_defconfig +++ b/arch/arm64/configs/msm-auto-perf_defconfig @@ -585,12 +585,16 @@ CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y CONFIG_EXT4_FS_ENCRYPTION=y CONFIG_EXT4_FS_ICE_ENCRYPTION=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_ECRYPT_FS=y CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_SDCARD_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y diff --git a/arch/arm64/configs/msm-auto_defconfig b/arch/arm64/configs/msm-auto_defconfig index cc1000edbef0..34edf61fb5a3 100644 --- a/arch/arm64/configs/msm-auto_defconfig +++ b/arch/arm64/configs/msm-auto_defconfig @@ -594,12 +594,16 @@ CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y CONFIG_EXT4_FS_ENCRYPTION=y CONFIG_EXT4_FS_ICE_ENCRYPTION=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_ECRYPT_FS=y CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_SDCARD_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig index ad8600c38a28..548831ddb181 100644 --- a/arch/arm64/configs/msmcortex-perf_defconfig +++ b/arch/arm64/configs/msmcortex-perf_defconfig @@ -101,6 +101,7 @@ CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_VERBOSE=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPVTI=y CONFIG_INET_AH=y CONFIG_INET_ESP=y CONFIG_INET_IPCOMP=y @@ -114,6 +115,7 @@ CONFIG_INET6_AH=y CONFIG_INET6_ESP=y CONFIG_INET6_IPCOMP=y CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_NETFILTER=y diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig index 5cd5eb4d3bca..b4d11587f3fc 100644 --- a/arch/arm64/configs/msmcortex_defconfig +++ b/arch/arm64/configs/msmcortex_defconfig @@ -100,6 +100,7 @@ CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_VERBOSE=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPVTI=y CONFIG_INET_AH=y CONFIG_INET_ESP=y CONFIG_INET_IPCOMP=y @@ -112,6 +113,7 @@ CONFIG_INET6_AH=y CONFIG_INET6_ESP=y CONFIG_INET6_IPCOMP=y CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_NETFILTER=y diff --git a/arch/arm64/configs/sdm660-perf_defconfig b/arch/arm64/configs/sdm660-perf_defconfig index d8eeb6f53286..340e0a207726 100644 --- a/arch/arm64/configs/sdm660-perf_defconfig +++ b/arch/arm64/configs/sdm660-perf_defconfig @@ -102,6 +102,7 @@ CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_VERBOSE=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPVTI=y CONFIG_INET_AH=y CONFIG_INET_ESP=y CONFIG_INET_IPCOMP=y @@ -114,6 +115,7 @@ CONFIG_INET6_AH=y CONFIG_INET6_ESP=y CONFIG_INET6_IPCOMP=y CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_NETFILTER=y diff --git a/arch/arm64/configs/sdm660_defconfig b/arch/arm64/configs/sdm660_defconfig index 6946c1ae970b..03435e186de5 100644 --- a/arch/arm64/configs/sdm660_defconfig +++ b/arch/arm64/configs/sdm660_defconfig @@ -102,6 +102,7 @@ CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_VERBOSE=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPVTI=y CONFIG_INET_AH=y CONFIG_INET_ESP=y CONFIG_INET_IPCOMP=y @@ -114,6 +115,7 @@ CONFIG_INET6_AH=y CONFIG_INET6_ESP=y CONFIG_INET6_IPCOMP=y CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_NETFILTER=y diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 8d5008cbdc0f..a853a83f2944 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -9,6 +9,7 @@ #include <linux/module.h> #include <asm/addrspace.h> #include <asm/byteorder.h> +#include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -97,6 +98,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, return error; } +static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, + void *arg) +{ + unsigned long i; + + for (i = 0; i < nr_pages; i++) { + if (pfn_valid(start_pfn + i) && + !PageReserved(pfn_to_page(start_pfn + i))) + return 1; + } + + return 0; +} + /* * Generic mapping function (not visible outside): */ @@ -115,8 +130,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags) { + unsigned long offset, pfn, last_pfn; struct vm_struct * area; - unsigned long offset; phys_addr_t last_addr; void * addr; @@ -136,18 +151,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long return (void __iomem *) CKSEG1ADDR(phys_addr); /* - * Don't allow anybody to remap normal RAM that we're using.. + * Don't allow anybody to remap RAM that may be allocated by the page + * allocator, since that could lead to races & data clobbering. */ - if (phys_addr < virt_to_phys(high_memory)) { - char *t_addr, *t_end; - struct page *page; - - t_addr = __va(phys_addr); - t_end = t_addr + (size - 1); - - for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) - if(!PageReserved(page)) - return NULL; + pfn = PFN_DOWN(phys_addr); + last_pfn = PFN_DOWN(last_addr); + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, + __ioremap_check_ram) == 1) { + WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", + &phys_addr, &last_addr); + return NULL; } /* diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 33bb33e5bf05..cdbf73f532c1 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -369,6 +369,17 @@ config X86_FEATURE_NAMES If in doubt, say Y. +config X86_FAST_FEATURE_TESTS + bool "Fast CPU feature tests" if EMBEDDED + default y + ---help--- + Some fast-paths in the kernel depend on the capabilities of the CPU. + Say Y here for the kernel to patch in the appropriate code at runtime + based on the capabilities of the CPU. The infrastructure for patching + code at runtime takes up some additional space; space-constrained + embedded systems may wish to say N here to produce smaller, slightly + slower code. + config X86_X2APIC bool "Support x2apic" depends on X86_LOCAL_APIC && X86_64 && (IRQ_REMAP || HYPERVISOR_GUEST) diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index e5031f46c443..3cb8e179f2f2 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -355,16 +355,6 @@ config DEBUG_IMR_SELFTEST If unsure say N here. -config X86_DEBUG_STATIC_CPU_HAS - bool "Debug alternatives" - depends on DEBUG_KERNEL - ---help--- - This option causes additional code to be generated which - fails if static_cpu_has() is used before alternatives have - run. - - If unsure, say N. - config X86_DEBUG_FPU bool "Debug the x86 FPU code" depends on DEBUG_KERNEL diff --git a/arch/x86/boot/cpuflags.h b/arch/x86/boot/cpuflags.h index ea97697e51e4..4cb404fd45ce 100644 --- a/arch/x86/boot/cpuflags.h +++ b/arch/x86/boot/cpuflags.h @@ -1,7 +1,7 @@ #ifndef BOOT_CPUFLAGS_H #define BOOT_CPUFLAGS_H -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/processor-flags.h> struct cpu_features { diff --git a/arch/x86/boot/mkcpustr.c b/arch/x86/boot/mkcpustr.c index 637097e66a62..f72498dc90d2 100644 --- a/arch/x86/boot/mkcpustr.c +++ b/arch/x86/boot/mkcpustr.c @@ -17,7 +17,7 @@ #include "../include/asm/required-features.h" #include "../include/asm/disabled-features.h" -#include "../include/asm/cpufeature.h" +#include "../include/asm/cpufeatures.h" #include "../kernel/cpu/capflags.c" int main(void) diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c index 07d2c6c86a54..27226df3f7d8 100644 --- a/arch/x86/crypto/crc32-pclmul_glue.c +++ b/arch/x86/crypto/crc32-pclmul_glue.c @@ -33,7 +33,7 @@ #include <linux/crc32.h> #include <crypto/internal/hash.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/cpu_device_id.h> #include <asm/fpu/api.h> diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c index 15f5c7675d42..715399b14ed7 100644 --- a/arch/x86/crypto/crc32c-intel_glue.c +++ b/arch/x86/crypto/crc32c-intel_glue.c @@ -30,7 +30,7 @@ #include <linux/kernel.h> #include <crypto/internal/hash.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/cpu_device_id.h> #include <asm/fpu/internal.h> diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c index a3fcfc97a311..cd4df9322501 100644 --- a/arch/x86/crypto/crct10dif-pclmul_glue.c +++ b/arch/x86/crypto/crct10dif-pclmul_glue.c @@ -30,7 +30,7 @@ #include <linux/string.h> #include <linux/kernel.h> #include <asm/fpu/api.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/cpu_device_id.h> asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf, diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index b5eb1cca70a0..071582a3b5c0 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -27,6 +27,7 @@ #include <asm/traps.h> #include <asm/vdso.h> #include <asm/uaccess.h> +#include <asm/cpufeature.h> #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index d437f3871e53..49a8c9f7a379 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -40,7 +40,7 @@ #include <asm/processor-flags.h> #include <asm/ftrace.h> #include <asm/irq_vectors.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/alternative-asm.h> #include <asm/asm.h> #include <asm/smap.h> diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c index a7508d7e20b7..3f9d1a83891a 100644 --- a/arch/x86/entry/vdso/vdso32-setup.c +++ b/arch/x86/entry/vdso/vdso32-setup.c @@ -11,7 +11,6 @@ #include <linux/kernel.h> #include <linux/mm_types.h> -#include <asm/cpufeature.h> #include <asm/processor.h> #include <asm/vdso.h> diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S index 3a1d9297074b..0109ac6cb79c 100644 --- a/arch/x86/entry/vdso/vdso32/system_call.S +++ b/arch/x86/entry/vdso/vdso32/system_call.S @@ -3,7 +3,7 @@ */ #include <asm/dwarf2.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/alternative-asm.h> /* diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index b8f69e264ac4..6b46648588d8 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -20,6 +20,7 @@ #include <asm/page.h> #include <asm/hpet.h> #include <asm/desc.h> +#include <asm/cpufeature.h> #if defined(CONFIG_X86_64) unsigned int __read_mostly vdso64_enabled = 1; @@ -254,7 +255,7 @@ static void vgetcpu_cpu_init(void *arg) #ifdef CONFIG_NUMA node = cpu_to_node(cpu); #endif - if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP)) + if (static_cpu_has(X86_FEATURE_RDTSCP)) write_rdtscp_aux((node << 12) | cpu); /* diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 215ea9214215..002fcd901f07 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -154,12 +154,6 @@ static inline int alternatives_text_reserved(void *start, void *end) ".popsection\n" /* - * This must be included *after* the definition of ALTERNATIVE due to - * <asm/arch_hweight.h> - */ -#include <asm/cpufeature.h> - -/* * Alternative instructions for different CPU types or capabilities. * * This allows to use optimized instructions even on generic binary diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 163769d82475..fd810a57ab1b 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -6,7 +6,6 @@ #include <asm/alternative.h> #include <asm/cpufeature.h> -#include <asm/processor.h> #include <asm/apicdef.h> #include <linux/atomic.h> #include <asm/fixmap.h> diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h index 44f825c80ed5..e7cd63175de4 100644 --- a/arch/x86/include/asm/arch_hweight.h +++ b/arch/x86/include/asm/arch_hweight.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_HWEIGHT_H #define _ASM_X86_HWEIGHT_H +#include <asm/cpufeatures.h> + #ifdef CONFIG_64BIT /* popcnt %edi, %eax */ #define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc7" diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index ae5fb83e6d91..3e8674288198 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -3,7 +3,6 @@ #include <linux/compiler.h> #include <linux/types.h> -#include <asm/processor.h> #include <asm/alternative.h> #include <asm/cmpxchg.h> #include <asm/rmwcc.h> diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index a11c30b77fb5..a984111135b1 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -3,7 +3,6 @@ #include <linux/compiler.h> #include <linux/types.h> -#include <asm/processor.h> //#include <asm/cmpxchg.h> /* An 64bit atomic type */ diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index ad19841eddfe..9733361fed6f 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -2,6 +2,7 @@ #define ASM_X86_CMPXCHG_H #include <linux/compiler.h> +#include <asm/cpufeatures.h> #include <asm/alternative.h> /* Provides LOCK_PREFIX */ /* diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 232621c5e859..dd0089841a0f 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -1,294 +1,35 @@ -/* - * Defines x86 CPU feature bits - */ #ifndef _ASM_X86_CPUFEATURE_H #define _ASM_X86_CPUFEATURE_H -#ifndef _ASM_X86_REQUIRED_FEATURES_H -#include <asm/required-features.h> -#endif - -#ifndef _ASM_X86_DISABLED_FEATURES_H -#include <asm/disabled-features.h> -#endif - -#define NCAPINTS 14 /* N 32-bit words worth of info */ -#define NBUGINTS 1 /* N 32-bit bug flags */ - -/* - * Note: If the comment begins with a quoted string, that string is used - * in /proc/cpuinfo instead of the macro name. If the string is "", - * this feature bit is not displayed in /proc/cpuinfo at all. - */ - -/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ -#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ -#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ -#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ -#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ -#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ -#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ -#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ -#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ -#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ -#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ -#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ -#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ -#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ -#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ -#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ - /* (plus FCMOVcc, FCOMI with FPU) */ -#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ -#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ -#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ -#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ -#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ -#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ -#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ -#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ -#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ -#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ -#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ -#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ -#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ -#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ -#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ - -/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ -/* Don't duplicate feature flags which are redundant with Intel! */ -#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ -#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ -#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ -#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ -#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ -#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ -#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ -#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ -#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ -#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ - -/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ -#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ -#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ -#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ - -/* Other features, Linux-defined mapping, word 3 */ -/* This range is used for feature bits which conflict or are synthesized */ -#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ -#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ -#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ -#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ -/* cpu types for specific tunings: */ -#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ -#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ -#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ -#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ -#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ -#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ -/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */ -#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ -#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ -#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ -#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ -#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ -#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ -#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ -#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ -/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */ -#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ -#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ -#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ -#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ -#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ -/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */ -#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ -#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ -#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ -/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */ -#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ - -/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ -#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ -#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ -#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ -#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ -#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ -#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ -#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ -#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ -#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ -#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ -#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ -#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ -#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ -#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ -#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ -#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ -#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ -#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ -#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ -#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ -#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ -#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ -#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ -#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ -#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ -#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ -#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ -#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ -#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ -#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ -#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ - -/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ -#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ -#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ -#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ -#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ -#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ -#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ -#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ -#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ -#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ -#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ - -/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ -#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ -#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ -#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ -#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ -#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ -#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ -#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ -#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ -#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ -#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ -#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ -#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ -#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ -#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ -#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ -#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ -#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ -#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ -#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ -#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ -#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ -#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ -#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ -#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */ -#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ - -/* - * Auxiliary flags: Linux defined - For features scattered in various - * CPUID levels like 0x6, 0xA etc, word 7 - */ -#define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */ -#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */ -#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ -#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ -#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */ -#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */ -#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */ -#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */ -#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ -#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ -#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ -#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ -#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ -#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ -#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ -#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ -#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */ - -#define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */ -#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */ -/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */ -#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */ - -/* Virtualization flags: Linux defined, word 8 */ -#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ -#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ -#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ -#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ -#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ -#define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */ -#define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */ -#define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ -#define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ -#define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */ -#define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */ -#define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */ -#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */ -#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */ -#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */ -#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ -#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ - - -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ -#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ -#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ -#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ -#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ -#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ -#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ -#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ -#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ -#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ -#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ -#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ -#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ -#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ -#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ -#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ -#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ -#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */ -#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ -#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ -#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ -#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ -#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ -#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ - -/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ -#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ -#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ -#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ -#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ - -/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ -#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ - -/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ -#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ - -/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ -#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ - -/* - * BUG word(s) - */ -#define X86_BUG(x) (NCAPINTS*32 + (x)) - -#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ -#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ -#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ -#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ -#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ -#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ -#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ -#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ -#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ -#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ -#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ -#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ +#include <asm/processor.h> #if defined(__KERNEL__) && !defined(__ASSEMBLY__) #include <asm/asm.h> #include <linux/bitops.h> +enum cpuid_leafs +{ + CPUID_1_EDX = 0, + CPUID_8000_0001_EDX, + CPUID_8086_0001_EDX, + CPUID_LNX_1, + CPUID_1_ECX, + CPUID_C000_0001_EDX, + CPUID_8000_0001_ECX, + CPUID_LNX_2, + CPUID_LNX_3, + CPUID_7_0_EBX, + CPUID_D_1_EAX, + CPUID_F_0_EDX, + CPUID_F_1_EDX, + CPUID_8000_0008_EBX, + CPUID_6_EAX, + CPUID_8000_000A_EDX, + CPUID_7_ECX, + CPUID_8000_0007_EBX, +}; + #ifdef CONFIG_X86_FEATURE_NAMES extern const char * const x86_cap_flags[NCAPINTS*32]; extern const char * const x86_power_flags[32]; @@ -308,29 +49,59 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; #define test_cpu_cap(c, bit) \ test_bit(bit, (unsigned long *)((c)->x86_capability)) -#define REQUIRED_MASK_BIT_SET(bit) \ - ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ - (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ - (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ - (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \ - (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ - (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ - (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ - (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \ - (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \ - (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) ) - -#define DISABLED_MASK_BIT_SET(bit) \ - ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0)) || \ - (((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1)) || \ - (((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2)) || \ - (((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3)) || \ - (((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4)) || \ - (((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5)) || \ - (((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6)) || \ - (((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7)) || \ - (((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8)) || \ - (((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9)) ) +/* + * There are 32 bits/features in each mask word. The high bits + * (selected with (bit>>5) give us the word number and the low 5 + * bits give us the bit/feature number inside the word. + * (1UL<<((bit)&31) gives us a mask for the feature_bit so we can + * see if it is set in the mask word. + */ +#define CHECK_BIT_IN_MASK_WORD(maskname, word, bit) \ + (((bit)>>5)==(word) && (1UL<<((bit)&31) & maskname##word )) + +#define REQUIRED_MASK_BIT_SET(feature_bit) \ + ( CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 0, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 1, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 2, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 3, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 4, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 5, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 6, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 7, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 8, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 9, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 10, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 11, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 12, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 13, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 14, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \ + REQUIRED_MASK_CHECK || \ + BUILD_BUG_ON_ZERO(NCAPINTS != 18)) + +#define DISABLED_MASK_BIT_SET(feature_bit) \ + ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 1, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 2, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 3, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 4, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 5, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 6, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 7, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 8, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 9, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 10, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 11, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 12, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 13, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 14, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \ + DISABLED_MASK_CHECK || \ + BUILD_BUG_ON_ZERO(NCAPINTS != 18)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ @@ -349,8 +120,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; * is not relevant. */ #define cpu_feature_enabled(bit) \ - (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : \ - cpu_has(&boot_cpu_data, bit)) + (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : static_cpu_has(bit)) #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) @@ -388,106 +158,19 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) /* - * Do not add any more of those clumsy macros - use static_cpu_has_safe() for + * Do not add any more of those clumsy macros - use static_cpu_has() for * fast paths and boot_cpu_has() otherwise! */ -#if __GNUC__ >= 4 -extern void warn_pre_alternatives(void); -extern bool __static_cpu_has_safe(u16 bit); - +#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS) /* * Static testing of CPU features. Used the same as boot_cpu_has(). - * These are only valid after alternatives have run, but will statically - * patch the target code for additional performance. + * These will statically patch the target code for additional + * performance. */ -static __always_inline __pure bool __static_cpu_has(u16 bit) -{ -#ifdef CC_HAVE_ASM_GOTO - -#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS - - /* - * Catch too early usage of this before alternatives - * have run. - */ - asm_volatile_goto("1: jmp %l[t_warn]\n" - "2:\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" - " .long 0\n" /* no replacement */ - " .word %P0\n" /* 1: do replace */ - " .byte 2b - 1b\n" /* source len */ - " .byte 0\n" /* replacement len */ - " .byte 0\n" /* pad len */ - ".previous\n" - /* skipping size check since replacement size = 0 */ - : : "i" (X86_FEATURE_ALWAYS) : : t_warn); - -#endif - - asm_volatile_goto("1: jmp %l[t_no]\n" - "2:\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" - " .long 0\n" /* no replacement */ - " .word %P0\n" /* feature bit */ - " .byte 2b - 1b\n" /* source len */ - " .byte 0\n" /* replacement len */ - " .byte 0\n" /* pad len */ - ".previous\n" - /* skipping size check since replacement size = 0 */ - : : "i" (bit) : : t_no); - return true; - t_no: - return false; - -#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS - t_warn: - warn_pre_alternatives(); - return false; -#endif - -#else /* CC_HAVE_ASM_GOTO */ - - u8 flag; - /* Open-coded due to __stringify() in ALTERNATIVE() */ - asm volatile("1: movb $0,%0\n" - "2:\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" - " .long 3f - .\n" - " .word %P1\n" /* feature bit */ - " .byte 2b - 1b\n" /* source len */ - " .byte 4f - 3f\n" /* replacement len */ - " .byte 0\n" /* pad len */ - ".previous\n" - ".section .discard,\"aw\",@progbits\n" - " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ - ".previous\n" - ".section .altinstr_replacement,\"ax\"\n" - "3: movb $1,%0\n" - "4:\n" - ".previous\n" - : "=qm" (flag) : "i" (bit)); - return flag; - -#endif /* CC_HAVE_ASM_GOTO */ -} - -#define static_cpu_has(bit) \ -( \ - __builtin_constant_p(boot_cpu_has(bit)) ? \ - boot_cpu_has(bit) : \ - __builtin_constant_p(bit) ? \ - __static_cpu_has(bit) : \ - boot_cpu_has(bit) \ -) - -static __always_inline __pure bool _static_cpu_has_safe(u16 bit) +static __always_inline __pure bool _static_cpu_has(u16 bit) { -#ifdef CC_HAVE_ASM_GOTO - asm_volatile_goto("1: jmp %l[t_dynamic]\n" + asm_volatile_goto("1: jmp 6f\n" "2:\n" ".skip -(((5f-4f) - (2b-1b)) > 0) * " "((5f-4f) - (2b-1b)),0x90\n" @@ -512,66 +195,34 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) " .byte 0\n" /* repl len */ " .byte 0\n" /* pad len */ ".previous\n" - : : "i" (bit), "i" (X86_FEATURE_ALWAYS) - : : t_dynamic, t_no); + ".section .altinstr_aux,\"ax\"\n" + "6:\n" + " testb %[bitnum],%[cap_byte]\n" + " jnz %l[t_yes]\n" + " jmp %l[t_no]\n" + ".previous\n" + : : "i" (bit), "i" (X86_FEATURE_ALWAYS), + [bitnum] "i" (1 << (bit & 7)), + [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) + : : t_yes, t_no); + t_yes: return true; t_no: return false; - t_dynamic: - return __static_cpu_has_safe(bit); -#else - u8 flag; - /* Open-coded due to __stringify() in ALTERNATIVE() */ - asm volatile("1: movb $2,%0\n" - "2:\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" /* src offset */ - " .long 3f - .\n" /* repl offset */ - " .word %P2\n" /* always replace */ - " .byte 2b - 1b\n" /* source len */ - " .byte 4f - 3f\n" /* replacement len */ - " .byte 0\n" /* pad len */ - ".previous\n" - ".section .discard,\"aw\",@progbits\n" - " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ - ".previous\n" - ".section .altinstr_replacement,\"ax\"\n" - "3: movb $0,%0\n" - "4:\n" - ".previous\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" /* src offset */ - " .long 5f - .\n" /* repl offset */ - " .word %P1\n" /* feature bit */ - " .byte 4b - 3b\n" /* src len */ - " .byte 6f - 5f\n" /* repl len */ - " .byte 0\n" /* pad len */ - ".previous\n" - ".section .discard,\"aw\",@progbits\n" - " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */ - ".previous\n" - ".section .altinstr_replacement,\"ax\"\n" - "5: movb $1,%0\n" - "6:\n" - ".previous\n" - : "=qm" (flag) - : "i" (bit), "i" (X86_FEATURE_ALWAYS)); - return (flag == 2 ? __static_cpu_has_safe(bit) : flag); -#endif /* CC_HAVE_ASM_GOTO */ } -#define static_cpu_has_safe(bit) \ +#define static_cpu_has(bit) \ ( \ __builtin_constant_p(boot_cpu_has(bit)) ? \ boot_cpu_has(bit) : \ - _static_cpu_has_safe(bit) \ + _static_cpu_has(bit) \ ) #else /* - * gcc 3.x is too stupid to do the static test; fall back to dynamic. + * Fall back to dynamic for gcc versions which don't support asm goto. Should be + * a minority now anyway. */ #define static_cpu_has(bit) boot_cpu_has(bit) -#define static_cpu_has_safe(bit) boot_cpu_has(bit) #endif #define cpu_has_bug(c, bit) cpu_has(c, (bit)) @@ -579,7 +230,6 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) #define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit)) #define static_cpu_has_bug(bit) static_cpu_has((bit)) -#define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit)) #define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit)) #define MAX_CPU_FEATURES (NCAPINTS * 32) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h new file mode 100644 index 000000000000..205ce70c1d6c --- /dev/null +++ b/arch/x86/include/asm/cpufeatures.h @@ -0,0 +1,306 @@ +#ifndef _ASM_X86_CPUFEATURES_H +#define _ASM_X86_CPUFEATURES_H + +#ifndef _ASM_X86_REQUIRED_FEATURES_H +#include <asm/required-features.h> +#endif + +#ifndef _ASM_X86_DISABLED_FEATURES_H +#include <asm/disabled-features.h> +#endif + +/* + * Defines x86 CPU feature bits + */ +#define NCAPINTS 18 /* N 32-bit words worth of info */ +#define NBUGINTS 1 /* N 32-bit bug flags */ + +/* + * Note: If the comment begins with a quoted string, that string is used + * in /proc/cpuinfo instead of the macro name. If the string is "", + * this feature bit is not displayed in /proc/cpuinfo at all. + */ + +/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ +#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ +#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ +#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ +#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ +#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ +#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ +#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ +#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ +#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ +#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ +#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ +#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ +#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ +#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ +#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ + /* (plus FCMOVcc, FCOMI with FPU) */ +#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ +#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ +#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ +#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ +#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ +#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ +#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ +#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ +#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ +#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ +#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ +#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ +#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ +#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ +#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ + +/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ +/* Don't duplicate feature flags which are redundant with Intel! */ +#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ +#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ +#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ +#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ +#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ +#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ +#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ +#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ +#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ +#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ + +/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ +#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ +#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ +#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ + +/* Other features, Linux-defined mapping, word 3 */ +/* This range is used for feature bits which conflict or are synthesized */ +#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ +#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ +#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ +#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ +/* cpu types for specific tunings: */ +#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ +#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ +#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ +#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ +#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ +#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ +/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */ +#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ +#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ +#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ +#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ +#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ +#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ +#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ +#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ +/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */ +#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ +#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ +#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ +#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ +#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ +/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */ +#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ +#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ +#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ +/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */ +#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ + +/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ +#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ +#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ +#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ +#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ +#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ +#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ +#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ +#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ +#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ +#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ +#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ +#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ +#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ +#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ +#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ +#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ +#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ +#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ +#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ +#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ +#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ +#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ +#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ +#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ +#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ +#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ +#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ +#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ +#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ +#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ +#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ + +/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ +#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ +#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ +#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ +#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ +#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ +#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ +#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ +#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ +#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ +#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ + +/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ +#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ +#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ +#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ +#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ +#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ +#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ +#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ +#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ +#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ +#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ +#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ +#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ +#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ +#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ +#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ +#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ +#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ +#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ +#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ +#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ +#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ +#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ +#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ +#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */ +#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ + +/* + * Auxiliary flags: Linux defined - For features scattered in various + * CPUID levels like 0x6, 0xA etc, word 7. + * + * Reuse free bits when adding new feature flags! + */ + +#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ +#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ +#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */ + +#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ +#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ + +#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ +#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */ + +#define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */ +/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */ +#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */ + +/* Virtualization flags: Linux defined, word 8 */ +#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ +#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ +#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ +#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ +#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ + +#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ +#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ + + +/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ +#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ +#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ +#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ +#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ +#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ +#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ +#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ +#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ +#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ +#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ +#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ +#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ +#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ +#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ +#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ +#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ +#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */ +#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ +#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ +#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ +#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ +#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ +#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ + +/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ +#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ +#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ +#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ +#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ + +/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ +#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ + +/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ +#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ + +/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ +#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ + +/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ +#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ +#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ +#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ +#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ +#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ +#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ +#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ +#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ +#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ +#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ + +/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */ +#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ +#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ +#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ +#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ +#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ +#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ +#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ +#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ +#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ +#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ + +/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ +#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ +#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ + +/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ +#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ +#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ +#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ + +/* + * BUG word(s) + */ +#define X86_BUG(x) (NCAPINTS*32 + (x)) + +#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ +#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ +#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ +#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ +#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ +#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ +#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ +#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ +#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ +#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ +#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ +#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ + +#endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 8b17c2ad1048..21c5ac15657b 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -30,6 +30,14 @@ # define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31)) #endif /* CONFIG_X86_64 */ +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS +# define DISABLE_PKU 0 +# define DISABLE_OSPKE 0 +#else +# define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31)) +# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31)) +#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ + /* * Make sure to add features to the correct mask */ @@ -43,5 +51,14 @@ #define DISABLED_MASK7 0 #define DISABLED_MASK8 0 #define DISABLED_MASK9 (DISABLE_MPX) +#define DISABLED_MASK10 0 +#define DISABLED_MASK11 0 +#define DISABLED_MASK12 0 +#define DISABLED_MASK13 0 +#define DISABLED_MASK14 0 +#define DISABLED_MASK15 0 +#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) +#define DISABLED_MASK17 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 146d838e6ee7..ec2aedb6f92a 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -17,6 +17,7 @@ #include <asm/user.h> #include <asm/fpu/api.h> #include <asm/fpu/xstate.h> +#include <asm/cpufeature.h> /* * High level FPU state handling functions: @@ -63,17 +64,17 @@ static __always_inline __pure bool use_eager_fpu(void) static __always_inline __pure bool use_xsaveopt(void) { - return static_cpu_has_safe(X86_FEATURE_XSAVEOPT); + return static_cpu_has(X86_FEATURE_XSAVEOPT); } static __always_inline __pure bool use_xsave(void) { - return static_cpu_has_safe(X86_FEATURE_XSAVE); + return static_cpu_has(X86_FEATURE_XSAVE); } static __always_inline __pure bool use_fxsr(void) { - return static_cpu_has_safe(X86_FEATURE_FXSR); + return static_cpu_has(X86_FEATURE_FXSR); } /* @@ -225,18 +226,67 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" -/* xstate instruction fault handler: */ -#define xstate_fault(__err) \ - \ - ".section .fixup,\"ax\"\n" \ - \ - "3: movl $-2,%[_err]\n" \ - " jmp 2b\n" \ - \ - ".previous\n" \ - \ - _ASM_EXTABLE(1b, 3b) \ - : [_err] "=r" (__err) +#define XSTATE_OP(op, st, lmask, hmask, err) \ + asm volatile("1:" op "\n\t" \ + "xor %[err], %[err]\n" \ + "2:\n\t" \ + ".pushsection .fixup,\"ax\"\n\t" \ + "3: movl $-2,%[err]\n\t" \ + "jmp 2b\n\t" \ + ".popsection\n\t" \ + _ASM_EXTABLE(1b, 3b) \ + : [err] "=r" (err) \ + : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ + : "memory") + +/* + * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact + * format and supervisor states in addition to modified optimization in + * XSAVEOPT. + * + * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT + * supports modified optimization which is not supported by XSAVE. + * + * We use XSAVE as a fallback. + * + * The 661 label is defined in the ALTERNATIVE* macros as the address of the + * original instruction which gets replaced. We need to use it here as the + * address of the instruction where we might get an exception at. + */ +#define XSTATE_XSAVE(st, lmask, hmask, err) \ + asm volatile(ALTERNATIVE_2(XSAVE, \ + XSAVEOPT, X86_FEATURE_XSAVEOPT, \ + XSAVES, X86_FEATURE_XSAVES) \ + "\n" \ + "xor %[err], %[err]\n" \ + "3:\n" \ + ".pushsection .fixup,\"ax\"\n" \ + "4: movl $-2, %[err]\n" \ + "jmp 3b\n" \ + ".popsection\n" \ + _ASM_EXTABLE(661b, 4b) \ + : [err] "=r" (err) \ + : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ + : "memory") + +/* + * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact + * XSAVE area format. + */ +#define XSTATE_XRESTORE(st, lmask, hmask, err) \ + asm volatile(ALTERNATIVE(XRSTOR, \ + XRSTORS, X86_FEATURE_XSAVES) \ + "\n" \ + "xor %[err], %[err]\n" \ + "3:\n" \ + ".pushsection .fixup,\"ax\"\n" \ + "4: movl $-2, %[err]\n" \ + "jmp 3b\n" \ + ".popsection\n" \ + _ASM_EXTABLE(661b, 4b) \ + : [err] "=r" (err) \ + : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ + : "memory") /* * This function is called only during boot time when x86 caps are not set @@ -247,22 +297,14 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) u64 mask = -1; u32 lmask = mask; u32 hmask = mask >> 32; - int err = 0; + int err; WARN_ON(system_state != SYSTEM_BOOTING); - if (boot_cpu_has(X86_FEATURE_XSAVES)) - asm volatile("1:"XSAVES"\n\t" - "2:\n\t" - xstate_fault(err) - : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) - : "memory"); + if (static_cpu_has(X86_FEATURE_XSAVES)) + XSTATE_OP(XSAVES, xstate, lmask, hmask, err); else - asm volatile("1:"XSAVE"\n\t" - "2:\n\t" - xstate_fault(err) - : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) - : "memory"); + XSTATE_OP(XSAVE, xstate, lmask, hmask, err); /* We should never fault when copying to a kernel buffer: */ WARN_ON_FPU(err); @@ -277,22 +319,14 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) u64 mask = -1; u32 lmask = mask; u32 hmask = mask >> 32; - int err = 0; + int err; WARN_ON(system_state != SYSTEM_BOOTING); - if (boot_cpu_has(X86_FEATURE_XSAVES)) - asm volatile("1:"XRSTORS"\n\t" - "2:\n\t" - xstate_fault(err) - : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) - : "memory"); + if (static_cpu_has(X86_FEATURE_XSAVES)) + XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); else - asm volatile("1:"XRSTOR"\n\t" - "2:\n\t" - xstate_fault(err) - : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) - : "memory"); + XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); /* We should never fault when copying from a kernel buffer: */ WARN_ON_FPU(err); @@ -306,33 +340,11 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate) u64 mask = -1; u32 lmask = mask; u32 hmask = mask >> 32; - int err = 0; + int err; WARN_ON(!alternatives_patched); - /* - * If xsaves is enabled, xsaves replaces xsaveopt because - * it supports compact format and supervisor states in addition to - * modified optimization in xsaveopt. - * - * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave - * because xsaveopt supports modified optimization which is not - * supported by xsave. - * - * If none of xsaves and xsaveopt is enabled, use xsave. - */ - alternative_input_2( - "1:"XSAVE, - XSAVEOPT, - X86_FEATURE_XSAVEOPT, - XSAVES, - X86_FEATURE_XSAVES, - [xstate] "D" (xstate), "a" (lmask), "d" (hmask) : - "memory"); - asm volatile("2:\n\t" - xstate_fault(err) - : "0" (err) - : "memory"); + XSTATE_XSAVE(xstate, lmask, hmask, err); /* We should never fault when copying to a kernel buffer: */ WARN_ON_FPU(err); @@ -345,23 +357,9 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask) { u32 lmask = mask; u32 hmask = mask >> 32; - int err = 0; + int err; - /* - * Use xrstors to restore context if it is enabled. xrstors supports - * compacted format of xsave area which is not supported by xrstor. - */ - alternative_input( - "1: " XRSTOR, - XRSTORS, - X86_FEATURE_XSAVES, - "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask) - : "memory"); - - asm volatile("2:\n" - xstate_fault(err) - : "0" (err) - : "memory"); + XSTATE_XRESTORE(xstate, lmask, hmask, err); /* We should never fault when copying from a kernel buffer: */ WARN_ON_FPU(err); @@ -389,12 +387,10 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf) if (unlikely(err)) return -EFAULT; - __asm__ __volatile__(ASM_STAC "\n" - "1:"XSAVE"\n" - "2: " ASM_CLAC "\n" - xstate_fault(err) - : "D" (buf), "a" (-1), "d" (-1), "0" (err) - : "memory"); + stac(); + XSTATE_OP(XSAVE, buf, -1, -1, err); + clac(); + return err; } @@ -406,14 +402,12 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask) struct xregs_state *xstate = ((__force struct xregs_state *)buf); u32 lmask = mask; u32 hmask = mask >> 32; - int err = 0; - - __asm__ __volatile__(ASM_STAC "\n" - "1:"XRSTOR"\n" - "2: " ASM_CLAC "\n" - xstate_fault(err) - : "D" (xstate), "a" (lmask), "d" (hmask), "0" (err) - : "memory"); /* memory required? */ + int err; + + stac(); + XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); + clac(); + return err; } @@ -467,7 +461,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate) * pending. Clear the x87 state here by setting it to fixed values. * "m" is a random variable that should be in L1. */ - if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) { + if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) { asm volatile( "fnclex\n\t" "emms\n\t" diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h index 78162f8e248b..d0afb05c84fc 100644 --- a/arch/x86/include/asm/irq_work.h +++ b/arch/x86/include/asm/irq_work.h @@ -1,7 +1,7 @@ #ifndef _ASM_IRQ_WORK_H #define _ASM_IRQ_WORK_H -#include <asm/processor.h> +#include <asm/cpufeature.h> static inline bool arch_irq_work_has_interrupt(void) { diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index c70689b5e5aa..0deeb2d26df7 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -3,6 +3,8 @@ #include <linux/sched.h> +#include <asm/cpufeature.h> + #define MWAIT_SUBSTATE_MASK 0xf #define MWAIT_CSTATE_MASK 0xf #define MWAIT_SUBSTATE_SIZE 4 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 249f1c769f21..8b910416243c 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -5,7 +5,7 @@ #include <asm/alternative.h> #include <asm/alternative-asm.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> /* * Fill the CPU return stack buffer. diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 9e77cea2a8ef..8e415cf65457 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -13,7 +13,7 @@ struct vm86; #include <asm/types.h> #include <uapi/asm/sigcontext.h> #include <asm/current.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/page.h> #include <asm/pgtable_types.h> #include <asm/percpu.h> @@ -24,7 +24,6 @@ struct vm86; #include <asm/fpu/types.h> #include <linux/personality.h> -#include <linux/cpumask.h> #include <linux/cache.h> #include <linux/threads.h> #include <linux/math64.h> diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index 5c6e4fb370f5..fac9a5c0abe9 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -92,5 +92,14 @@ #define REQUIRED_MASK7 0 #define REQUIRED_MASK8 0 #define REQUIRED_MASK9 0 +#define REQUIRED_MASK10 0 +#define REQUIRED_MASK11 0 +#define REQUIRED_MASK12 0 +#define REQUIRED_MASK13 0 +#define REQUIRED_MASK14 0 +#define REQUIRED_MASK15 0 +#define REQUIRED_MASK16 0 +#define REQUIRED_MASK17 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h index ba665ebd17bb..db333300bd4b 100644 --- a/arch/x86/include/asm/smap.h +++ b/arch/x86/include/asm/smap.h @@ -15,7 +15,7 @@ #include <linux/stringify.h> #include <asm/nops.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> /* "Raw" instruction opcodes */ #define __ASM_CLAC .byte 0x0f,0x01,0xca diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index a438c5598a90..04d6eef5f8a5 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -16,7 +16,6 @@ #endif #include <asm/thread_info.h> #include <asm/cpumask.h> -#include <asm/cpufeature.h> extern int smp_num_siblings; extern unsigned int num_processors; diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index c706b7796870..913468b17b9c 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -49,7 +49,7 @@ */ #ifndef __ASSEMBLY__ struct task_struct; -#include <asm/processor.h> +#include <asm/cpufeature.h> #include <linux/atomic.h> struct thread_info { diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index a691b66cc40a..e2a89d2577fb 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -5,6 +5,7 @@ #include <linux/sched.h> #include <asm/processor.h> +#include <asm/cpufeature.h> #include <asm/special_insns.h> #include <asm/smp.h> diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 2957c8237c28..ec9d2bcc8c24 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -8,7 +8,7 @@ #include <linux/errno.h> #include <linux/lockdep.h> #include <asm/alternative.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/page.h> /* diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index 2bd2292a316d..bac0805ea1d9 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c @@ -30,7 +30,7 @@ static unsigned int numachip1_get_apic_id(unsigned long x) unsigned long value; unsigned int id = (x >> 24) & 0xff; - if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { + if (static_cpu_has(X86_FEATURE_NODEID_MSR)) { rdmsrl(MSR_FAM10H_NODE_ID, value); id |= (value << 2) & 0xff00; } @@ -178,7 +178,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) this_cpu_write(cpu_llc_id, node); /* Account for nodes per socket in multi-core-module processors */ - if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { + if (static_cpu_has(X86_FEATURE_NODEID_MSR)) { rdmsrl(MSR_FAM10H_NODE_ID, val); nodes = ((val >> 3) & 7) + 1; } diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 606ebe494756..1e5184092ee6 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -66,7 +66,7 @@ ifdef CONFIG_X86_FEATURE_NAMES quiet_cmd_mkcapflags = MKCAP $@ cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@ -cpufeature = $(src)/../../include/asm/cpufeature.h +cpufeature = $(src)/../../include/asm/cpufeatures.h targets += capflags.c $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index d8fba5c15fbd..6608c03c2126 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -1,7 +1,7 @@ #include <linux/bitops.h> #include <linux/kernel.h> -#include <asm/processor.h> +#include <asm/cpufeature.h> #include <asm/e820.h> #include <asm/mtrr.h> #include <asm/msr.h> @@ -43,7 +43,7 @@ static void init_c3(struct cpuinfo_x86 *c) /* store Centaur Extended Feature Flags as * word 5 of the CPU capability bit array */ - c->x86_capability[5] = cpuid_edx(0xC0000001); + c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001); } #ifdef CONFIG_X86_32 /* Cyrix III family needs CX8 & PGE explicitly enabled. */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 0498ad3702f5..814276d0eed1 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -676,50 +676,48 @@ static void apply_forced_caps(struct cpuinfo_x86 *c) void get_cpu_cap(struct cpuinfo_x86 *c) { - u32 tfms, xlvl; - u32 ebx; + u32 eax, ebx, ecx, edx; /* Intel-defined flags: level 0x00000001 */ if (c->cpuid_level >= 0x00000001) { - u32 capability, excap; + cpuid(0x00000001, &eax, &ebx, &ecx, &edx); - cpuid(0x00000001, &tfms, &ebx, &excap, &capability); - c->x86_capability[0] = capability; - c->x86_capability[4] = excap; + c->x86_capability[CPUID_1_ECX] = ecx; + c->x86_capability[CPUID_1_EDX] = edx; } /* Additional Intel-defined flags: level 0x00000007 */ if (c->cpuid_level >= 0x00000007) { - u32 eax, ebx, ecx, edx; - cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); - c->x86_capability[9] = ebx; + c->x86_capability[CPUID_7_0_EBX] = ebx; + + c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); + c->x86_capability[CPUID_7_ECX] = ecx; } /* Extended state features: level 0x0000000d */ if (c->cpuid_level >= 0x0000000d) { - u32 eax, ebx, ecx, edx; - cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); - c->x86_capability[10] = eax; + c->x86_capability[CPUID_D_1_EAX] = eax; } /* Additional Intel-defined flags: level 0x0000000F */ if (c->cpuid_level >= 0x0000000F) { - u32 eax, ebx, ecx, edx; /* QoS sub-leaf, EAX=0Fh, ECX=0 */ cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx); - c->x86_capability[11] = edx; + c->x86_capability[CPUID_F_0_EDX] = edx; + if (cpu_has(c, X86_FEATURE_CQM_LLC)) { /* will be overridden if occupancy monitoring exists */ c->x86_cache_max_rmid = ebx; /* QoS sub-leaf, EAX=0Fh, ECX=1 */ cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx); - c->x86_capability[12] = edx; + c->x86_capability[CPUID_F_1_EDX] = edx; + if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) { c->x86_cache_max_rmid = ecx; c->x86_cache_occ_scale = ebx; @@ -731,30 +729,39 @@ void get_cpu_cap(struct cpuinfo_x86 *c) } /* AMD-defined flags: level 0x80000001 */ - xlvl = cpuid_eax(0x80000000); - c->extended_cpuid_level = xlvl; + eax = cpuid_eax(0x80000000); + c->extended_cpuid_level = eax; + + if ((eax & 0xffff0000) == 0x80000000) { + if (eax >= 0x80000001) { + cpuid(0x80000001, &eax, &ebx, &ecx, &edx); - if ((xlvl & 0xffff0000) == 0x80000000) { - if (xlvl >= 0x80000001) { - c->x86_capability[1] = cpuid_edx(0x80000001); - c->x86_capability[6] = cpuid_ecx(0x80000001); + c->x86_capability[CPUID_8000_0001_ECX] = ecx; + c->x86_capability[CPUID_8000_0001_EDX] = edx; } } + if (c->extended_cpuid_level >= 0x80000007) { + cpuid(0x80000007, &eax, &ebx, &ecx, &edx); + + c->x86_capability[CPUID_8000_0007_EBX] = ebx; + c->x86_power = edx; + } + if (c->extended_cpuid_level >= 0x80000008) { - u32 eax = cpuid_eax(0x80000008); + cpuid(0x80000008, &eax, &ebx, &ecx, &edx); c->x86_virt_bits = (eax >> 8) & 0xff; c->x86_phys_bits = eax & 0xff; - c->x86_capability[13] = cpuid_ebx(0x80000008); + c->x86_capability[CPUID_8000_0008_EBX] = ebx; } #ifdef CONFIG_X86_32 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) c->x86_phys_bits = 36; #endif - if (c->extended_cpuid_level >= 0x80000007) - c->x86_power = cpuid_edx(0x80000007); + if (c->extended_cpuid_level >= 0x8000000a) + c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); init_scattered_cpuid_features(c); } @@ -1574,20 +1581,6 @@ void cpu_init(void) } #endif -#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS -void warn_pre_alternatives(void) -{ - WARN(1, "You're using static_cpu_has before alternatives have run!\n"); -} -EXPORT_SYMBOL_GPL(warn_pre_alternatives); -#endif - -inline bool __static_cpu_has_safe(u16 bit) -{ - return boot_cpu_has(bit); -} -EXPORT_SYMBOL_GPL(__static_cpu_has_safe); - static void bsp_resume(void) { if (this_cpu->c_bsp_resume) diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index aaf152e79637..15e47c1cd412 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -8,6 +8,7 @@ #include <linux/timer.h> #include <asm/pci-direct.h> #include <asm/tsc.h> +#include <asm/cpufeature.h> #include "cpu.h" diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 565648bc1a0a..9299e3bdfad6 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -8,7 +8,7 @@ #include <linux/module.h> #include <linux/uaccess.h> -#include <asm/processor.h> +#include <asm/cpufeature.h> #include <asm/pgtable.h> #include <asm/msr.h> #include <asm/bugs.h> diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 3fa72317ad78..3557b3ceab14 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -14,7 +14,7 @@ #include <linux/sysfs.h> #include <linux/pci.h> -#include <asm/processor.h> +#include <asm/cpufeature.h> #include <asm/amd_nb.h> #include <asm/smp.h> diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c index afa9f0d487ea..fbb5e90557a5 100644 --- a/arch/x86/kernel/cpu/match.c +++ b/arch/x86/kernel/cpu/match.c @@ -1,5 +1,5 @@ #include <asm/cpu_device_id.h> -#include <asm/processor.h> +#include <asm/cpufeature.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/slab.h> diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh index 3f20710a5b23..6988c74409a8 100644 --- a/arch/x86/kernel/cpu/mkcapflags.sh +++ b/arch/x86/kernel/cpu/mkcapflags.sh @@ -1,6 +1,6 @@ #!/bin/sh # -# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeature.h +# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeatures.h # IN=$1 @@ -49,8 +49,8 @@ dump_array() trap 'rm "$OUT"' EXIT ( - echo "#ifndef _ASM_X86_CPUFEATURE_H" - echo "#include <asm/cpufeature.h>" + echo "#ifndef _ASM_X86_CPUFEATURES_H" + echo "#include <asm/cpufeatures.h>" echo "#endif" echo "" diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index f924f41af89a..49bd700d9b7f 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -47,7 +47,7 @@ #include <linux/smp.h> #include <linux/syscore_ops.h> -#include <asm/processor.h> +#include <asm/cpufeature.h> #include <asm/e820.h> #include <asm/mtrr.h> #include <asm/msr.h> diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 608fb26c7254..8cb57df9398d 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -31,32 +31,12 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c) const struct cpuid_bit *cb; static const struct cpuid_bit cpuid_bits[] = { - { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 }, - { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, - { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, - { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, - { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, - { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, - { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 }, - { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, - { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, - { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 }, { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 }, { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 }, { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 }, - { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 }, - { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 }, - { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 }, - { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 }, - { X86_FEATURE_TSCRATEMSR, CR_EDX, 4, 0x8000000a, 0 }, - { X86_FEATURE_VMCBCLEAN, CR_EDX, 5, 0x8000000a, 0 }, - { X86_FEATURE_FLUSHBYASID, CR_EDX, 6, 0x8000000a, 0 }, - { X86_FEATURE_DECODEASSISTS, CR_EDX, 7, 0x8000000a, 0 }, - { X86_FEATURE_PAUSEFILTER, CR_EDX,10, 0x8000000a, 0 }, - { X86_FEATURE_PFTHRESHOLD, CR_EDX,12, 0x8000000a, 0 }, { 0, 0, 0, 0, 0 } }; diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index 3fa0e5ad86b4..a19a663282b5 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c @@ -1,6 +1,6 @@ #include <linux/kernel.h> #include <linux/mm.h> -#include <asm/processor.h> +#include <asm/cpufeature.h> #include <asm/msr.h> #include "cpu.h" @@ -12,7 +12,7 @@ static void early_init_transmeta(struct cpuinfo_x86 *c) xlvl = cpuid_eax(0x80860000); if ((xlvl & 0xffff0000) == 0x80860000) { if (xlvl >= 0x80860001) - c->x86_capability[2] = cpuid_edx(0x80860001); + c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001); } } @@ -82,7 +82,7 @@ static void init_transmeta(struct cpuinfo_x86 *c) /* Unhide possibly hidden capability flags */ rdmsr(0x80860004, cap_mask, uk); wrmsr(0x80860004, ~0, uk); - c->x86_capability[0] = cpuid_edx(0x00000001); + c->x86_capability[CPUID_1_EDX] = cpuid_edx(0x00000001); wrmsr(0x80860004, cap_mask, uk); /* All Transmeta CPUs have a constant TSC */ diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 52a2526c3fbe..19bc19d5e174 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -24,6 +24,7 @@ #include <asm/e820.h> #include <asm/proto.h> #include <asm/setup.h> +#include <asm/cpufeature.h> /* * The e820 map is the map that gets modified e.g. with command line parameters diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 70284d38fdc2..1c0b49fd6365 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -19,7 +19,7 @@ #include <asm/setup.h> #include <asm/processor-flags.h> #include <asm/msr-index.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/percpu.h> #include <asm/nops.h> #include <asm/bootparam.h> diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 4034e905741a..734ba1d0f686 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -76,9 +76,7 @@ startup_64: subq $_text - __START_KERNEL_map, %rbp /* Is the address not 2M aligned? */ - movq %rbp, %rax - andl $~PMD_PAGE_MASK, %eax - testl %eax, %eax + testl $~PMD_PAGE_MASK, %ebp jnz bad_address /* diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index f48eb8eeefe2..3fdc1e53aaac 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -12,6 +12,7 @@ #include <linux/pm.h> #include <linux/io.h> +#include <asm/cpufeature.h> #include <asm/irqdomain.h> #include <asm/fixmap.h> #include <asm/hpet.h> diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 113e70784854..f95ac5d435aa 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -40,7 +40,7 @@ #include <linux/uaccess.h> #include <linux/gfp.h> -#include <asm/processor.h> +#include <asm/cpufeature.h> #include <asm/msr.h> static struct class *msr_class; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index c6aace2bbe08..b8105289c60b 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -290,7 +290,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64); /* has the side-effect of processing the entire instruction */ insn_get_length(insn); - if (WARN_ON_ONCE(!insn_complete(insn))) + if (!insn_complete(insn)) return -ENOEXEC; if (is_prefix_bad(insn)) diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S index 4cf401f581e7..b7c9db5deebe 100644 --- a/arch/x86/kernel/verify_cpu.S +++ b/arch/x86/kernel/verify_cpu.S @@ -30,7 +30,7 @@ * appropriately. Either display a message or halt. */ -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/msr-index.h> verify_cpu: diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index d6d64a519559..7f4839ef3608 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -358,7 +358,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) /* make room for real-mode segments */ tsk->thread.sp0 += 16; - if (static_cpu_has_safe(X86_FEATURE_SEP)) + if (static_cpu_has(X86_FEATURE_SEP)) tsk->thread.sysenter_cs = 0; load_sp0(tss, &tsk->thread); diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 31355fb1a2cc..3611136b1455 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -200,6 +200,17 @@ SECTIONS :init #endif + /* + * Section for code used exclusively before alternatives are run. All + * references to such code must be patched out by alternatives, normally + * by using X86_FEATURE_ALWAYS CPU feature bit. + * + * See static_cpu_has() for an example. + */ + .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) { + *(.altinstr_aux) + } + INIT_DATA_SECTION(16) .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S index a2fe51b00cce..65be7cfaf947 100644 --- a/arch/x86/lib/clear_page_64.S +++ b/arch/x86/lib/clear_page_64.S @@ -1,5 +1,5 @@ #include <linux/linkage.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/alternative-asm.h> /* diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S index 009f98216b7e..24ef1c2104d4 100644 --- a/arch/x86/lib/copy_page_64.S +++ b/arch/x86/lib/copy_page_64.S @@ -1,7 +1,7 @@ /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */ #include <linux/linkage.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/alternative-asm.h> /* diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 423644c230e7..accf7f2f557f 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -10,7 +10,7 @@ #include <asm/current.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/alternative-asm.h> #include <asm/asm.h> #include <asm/smap.h> diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 16698bba87de..a0de849435ad 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -1,7 +1,7 @@ /* Copyright 2002 Andi Kleen */ #include <linux/linkage.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/alternative-asm.h> /* diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index ca2afdd6d98e..90ce01bee00c 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -6,7 +6,7 @@ * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com> */ #include <linux/linkage.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/alternative-asm.h> #undef memmove diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 2661fad05827..c9c81227ea37 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -1,7 +1,7 @@ /* Copyright 2002 Andi Kleen, SuSE Labs */ #include <linux/linkage.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/alternative-asm.h> .weak memset diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 3d06b482ebc7..7bbb853e36bd 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -3,7 +3,7 @@ #include <linux/stringify.h> #include <linux/linkage.h> #include <asm/dwarf2.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/alternative-asm.h> #include <asm-generic/export.h> #include <asm/nospec-branch.h> diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c index 92e2eacb3321..f65a33f505b6 100644 --- a/arch/x86/mm/setup_nx.c +++ b/arch/x86/mm/setup_nx.c @@ -4,6 +4,7 @@ #include <asm/pgtable.h> #include <asm/proto.h> +#include <asm/cpufeature.h> static int disable_nx; diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 50d86c0e9ba4..660a83c8287b 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -24,7 +24,6 @@ #include <asm/nmi.h> #include <asm/apic.h> #include <asm/processor.h> -#include <asm/cpufeature.h> #include "op_x86_model.h" #include "op_counter.h" diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h index 755481f14d90..764ac2fc53fe 100644 --- a/arch/x86/um/asm/barrier.h +++ b/arch/x86/um/asm/barrier.h @@ -3,7 +3,7 @@ #include <asm/asm.h> #include <asm/segment.h> -#include <asm/cpufeature.h> +#include <asm/cpufeatures.h> #include <asm/cmpxchg.h> #include <asm/nops.h> diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 5a6a01135470..34fdaa6e99ba 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1229,6 +1229,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) return strcmp(buf, dmi->driver_data) < 0; } +static bool ahci_broken_lpm(struct pci_dev *pdev) +{ + static const struct dmi_system_id sysids[] = { + /* Various Lenovo 50 series have LPM issues with older BIOSen */ + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"), + }, + .driver_data = "20180406", /* 1.31 */ + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"), + }, + .driver_data = "20180420", /* 1.28 */ + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"), + }, + .driver_data = "20180315", /* 1.33 */ + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"), + }, + /* + * Note date based on release notes, 2.35 has been + * reported to be good, but I've been unable to get + * a hold of the reporter to get the DMI BIOS date. + * TODO: fix this. + */ + .driver_data = "20180310", /* 2.35 */ + }, + { } /* terminate list */ + }; + const struct dmi_system_id *dmi = dmi_first_match(sysids); + int year, month, date; + char buf[9]; + + if (!dmi) + return false; + + dmi_get_date(DMI_BIOS_DATE, &year, &month, &date); + snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date); + + return strcmp(buf, dmi->driver_data) < 0; +} + static bool ahci_broken_online(struct pci_dev *pdev) { #define ENCODE_BUSDEVFN(bus, slot, func) \ @@ -1588,6 +1641,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) "quirky BIOS, skipping spindown on poweroff\n"); } + if (ahci_broken_lpm(pdev)) { + pi.flags |= ATA_FLAG_NO_LPM; + dev_warn(&pdev->dev, + "BIOS update required for Link Power Management support\n"); + } + if (ahci_broken_suspend(pdev)) { hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; dev_warn(&pdev->dev, diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 9afd06ee5b30..ba514fa733de 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2209,6 +2209,9 @@ int ata_dev_configure(struct ata_device *dev) (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) dev->horkage |= ATA_HORKAGE_NOLPM; + if (ap->flags & ATA_FLAG_NO_LPM) + dev->horkage |= ATA_HORKAGE_NOLPM; + if (dev->horkage & ATA_HORKAGE_NOLPM) { ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index e8165ec55e6f..da3902ac16c8 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -651,6 +651,36 @@ static void loop_reread_partitions(struct loop_device *lo, __func__, lo->lo_number, lo->lo_file_name, rc); } +static inline int is_loop_device(struct file *file) +{ + struct inode *i = file->f_mapping->host; + + return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; +} + +static int loop_validate_file(struct file *file, struct block_device *bdev) +{ + struct inode *inode = file->f_mapping->host; + struct file *f = file; + + /* Avoid recursion */ + while (is_loop_device(f)) { + struct loop_device *l; + + if (f->f_mapping->host->i_bdev == bdev) + return -EBADF; + + l = f->f_mapping->host->i_bdev->bd_disk->private_data; + if (l->lo_state == Lo_unbound) { + return -EINVAL; + } + f = l->lo_backing_file; + } + if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) + return -EINVAL; + return 0; +} + /* * loop_change_fd switched the backing store of a loopback device to * a new file. This is useful for operating system installers to free up @@ -680,14 +710,15 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, if (!file) goto out; + error = loop_validate_file(file, bdev); + if (error) + goto out_putf; + inode = file->f_mapping->host; old_file = lo->lo_backing_file; error = -EINVAL; - if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) - goto out_putf; - /* size of the new backing store needs to be the same */ if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) goto out_putf; @@ -708,13 +739,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, return error; } -static inline int is_loop_device(struct file *file) -{ - struct inode *i = file->f_mapping->host; - - return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; -} - /* loop sysfs attributes */ static ssize_t loop_attr_show(struct device *dev, char *page, @@ -811,16 +835,17 @@ static struct attribute_group loop_attribute_group = { .attrs= loop_attrs, }; -static int loop_sysfs_init(struct loop_device *lo) +static void loop_sysfs_init(struct loop_device *lo) { - return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, - &loop_attribute_group); + lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, + &loop_attribute_group); } static void loop_sysfs_exit(struct loop_device *lo) { - sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, - &loop_attribute_group); + if (lo->sysfs_inited) + sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, + &loop_attribute_group); } static void loop_config_discard(struct loop_device *lo) @@ -872,7 +897,7 @@ static int loop_prepare_queue(struct loop_device *lo) static int loop_set_fd(struct loop_device *lo, fmode_t mode, struct block_device *bdev, unsigned int arg) { - struct file *file, *f; + struct file *file; struct inode *inode; struct address_space *mapping; unsigned lo_blocksize; @@ -892,29 +917,13 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, if (lo->lo_state != Lo_unbound) goto out_putf; - /* Avoid recursion */ - f = file; - while (is_loop_device(f)) { - struct loop_device *l; - - if (f->f_mapping->host->i_bdev == bdev) - goto out_putf; - - l = f->f_mapping->host->i_bdev->bd_disk->private_data; - if (l->lo_state == Lo_unbound) { - error = -EINVAL; - goto out_putf; - } - f = l->lo_backing_file; - } + error = loop_validate_file(file, bdev); + if (error) + goto out_putf; mapping = file->f_mapping; inode = mapping->host; - error = -EINVAL; - if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) - goto out_putf; - if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || !file->f_op->write_iter) lo_flags |= LO_FLAGS_READ_ONLY; diff --git a/drivers/block/loop.h b/drivers/block/loop.h index fb2237c73e61..60f0fd2c0c65 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h @@ -59,6 +59,7 @@ struct loop_device { struct kthread_worker worker; struct task_struct *worker_task; bool use_dio; + bool sysfs_inited; struct request_queue *lo_queue; struct blk_mq_tag_set tag_set; diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c index aa45c2e7ec7b..befc015cd7d5 100644 --- a/drivers/char/diag/diag_memorydevice.c +++ b/drivers/char/diag/diag_memorydevice.c @@ -206,6 +206,7 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx) } found = 0; + mutex_lock(&driver->diagchar_mutex); for (i = 0; i < driver->num_clients && !found; i++) { if ((driver->client_map[i].pid != pid) || (driver->client_map[i].pid == 0)) @@ -219,6 +220,7 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx) pr_debug("diag: wake up logging process\n"); wake_up_interruptible(&driver->wait_q); } + mutex_unlock(&driver->diagchar_mutex); if (!found) return -EINVAL; diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c index bfdce051d405..6c16d80192c6 100644 --- a/drivers/char/diag/diagfwd_peripheral.c +++ b/drivers/char/diag/diagfwd_peripheral.c @@ -357,6 +357,8 @@ static void diagfwd_data_process_done(struct diagfwd_info *fwd_info, goto end; } } + mutex_unlock(&fwd_info->data_mutex); + mutex_unlock(&driver->hdlc_disable_mutex); if (write_len > 0) { err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len, @@ -364,18 +366,18 @@ static void diagfwd_data_process_done(struct diagfwd_info *fwd_info, if (err) { pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n", __func__, err); - goto end; + goto end_write; } } - mutex_unlock(&fwd_info->data_mutex); - mutex_unlock(&driver->hdlc_disable_mutex); + diagfwd_queue_read(fwd_info); return; end: - diag_ws_release(); mutex_unlock(&fwd_info->data_mutex); mutex_unlock(&driver->hdlc_disable_mutex); +end_write: + diag_ws_release(); if (buf) { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "Marking buffer as free p: %d, t: %d, buf_num: %d\n", @@ -694,24 +696,26 @@ static void diagfwd_data_read_done(struct diagfwd_info *fwd_info, } } + mutex_unlock(&fwd_info->data_mutex); + mutex_unlock(&driver->hdlc_disable_mutex); + if (write_len > 0) { err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len, temp_buf->ctxt); if (err) { pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n", __func__, err); - goto end; + goto end_write; } } - mutex_unlock(&fwd_info->data_mutex); - mutex_unlock(&driver->hdlc_disable_mutex); diagfwd_queue_read(fwd_info); return; end: - diag_ws_release(); mutex_unlock(&fwd_info->data_mutex); mutex_unlock(&driver->hdlc_disable_mutex); +end_write: + diag_ws_release(); if (temp_buf) { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "Marking buffer as free p: %d, t: %d, buf_num: %d\n", diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 7ca79714649e..d345ce59629c 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -121,14 +121,6 @@ config CPU_FREQ_DEFAULT_GOV_INTERACTIVE loading your cpufreq low-level hardware driver, using the 'interactive' governor for latency-sensitive workloads. -config CPU_FREQ_DEFAULT_GOV_SCHED - bool "sched" - select CPU_FREQ_GOV_SCHED - help - Use the CPUfreq governor 'sched' as default. This scales - cpu frequency using CPU utilization estimates from the - scheduler. - config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL bool "schedutil" depends on SMP diff --git a/drivers/gpu/drm/msm-hyp/Kconfig b/drivers/gpu/drm/msm-hyp/Kconfig index 676c0174c0ee..2a40a4bf036a 100644 --- a/drivers/gpu/drm/msm-hyp/Kconfig +++ b/drivers/gpu/drm/msm-hyp/Kconfig @@ -6,7 +6,7 @@ config DRM_MSM_HYP tristate "MSM DRM HYP" depends on DRM - depends on MSM_GVM_QUIN + depends on MSM_GVM depends on OF default y help diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index cb3b25ddd0da..5d390abef6bd 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -4,6 +4,7 @@ config DRM_MSM depends on DRM depends on ARCH_QCOM || (ARM && COMPILE_TEST) depends on OF + depends on !MSM_GVM select REGULATOR select DRM_KMS_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/msm/dba_bridge.c b/drivers/gpu/drm/msm/dba_bridge.c index b4a04931e52e..5ae564001309 100644 --- a/drivers/gpu/drm/msm/dba_bridge.c +++ b/drivers/gpu/drm/msm/dba_bridge.c @@ -16,6 +16,7 @@ #include "drm_edid.h" #include "sde_kms.h" #include "dba_bridge.h" +#include "sde/sde_recovery_manager.h" #undef pr_fmt #define pr_fmt(fmt) "dba_bridge:[%s] " fmt, __func__ @@ -36,6 +37,7 @@ * @num_of_input_lanes: Number of input lanes in case of DSI/LVDS * @pluggable: If it's pluggable * @panel_count: Number of panels attached to this display + * @client_info: bridge chip specific information for recovery manager */ struct dba_bridge { struct drm_bridge base; @@ -52,12 +54,17 @@ struct dba_bridge { bool pluggable; u32 panel_count; bool cont_splash_enabled; + struct recovery_client_info client_info; }; #define to_dba_bridge(x) container_of((x), struct dba_bridge, base) +static int _dba_bridge_recovery_callback(int err_code, + struct recovery_client_info *client_info); + static void _dba_bridge_cb(void *data, enum msm_dba_callback_event event) { struct dba_bridge *d_bridge = data; + int chip_err; if (!d_bridge) { SDE_ERROR("Invalid data\n"); @@ -73,6 +80,12 @@ static void _dba_bridge_cb(void *data, enum msm_dba_callback_event event) case MSM_DBA_CB_HPD_DISCONNECT: DRM_DEBUG("HPD DISCONNECT\n"); break; + case MSM_DBA_CB_DDC_I2C_ERROR: + case MSM_DBA_CB_DDC_TIMEOUT: + DRM_DEBUG("DDC FAILURE\n"); + chip_err = DBA_BRIDGE_CRITICAL_ERR + d_bridge->id; + sde_recovery_set_events(chip_err); + break; default: DRM_DEBUG("event:%d is not supported\n", event); break; @@ -83,6 +96,7 @@ static int _dba_bridge_attach(struct drm_bridge *bridge) { struct dba_bridge *d_bridge = to_dba_bridge(bridge); struct msm_dba_reg_info info; + struct recovery_client_info *client_info = &d_bridge->client_info; int ret = 0; if (!bridge) { @@ -115,6 +129,25 @@ static int _dba_bridge_attach(struct drm_bridge *bridge) goto error; } + snprintf(client_info->name, MAX_REC_NAME_LEN, "%s_%d", + d_bridge->chip_name, d_bridge->id); + + client_info->recovery_cb = _dba_bridge_recovery_callback; + + /* Identify individual chip by different error codes */ + client_info->err_supported[0].reported_err_code = + DBA_BRIDGE_CRITICAL_ERR + d_bridge->id; + client_info->err_supported[0].pre_err_code = 0; + client_info->err_supported[0].post_err_code = 0; + client_info->no_of_err = 1; + /* bridge chip context */ + client_info->pdata = d_bridge; + + ret = sde_recovery_client_register(client_info); + if (ret) + SDE_ERROR("%s recovery mgr register failed %d\n", + __func__, ret); + DRM_INFO("client:%s bridge:[%s:%d] attached\n", d_bridge->client_name, d_bridge->chip_name, d_bridge->id); @@ -242,6 +275,44 @@ static void _dba_bridge_post_disable(struct drm_bridge *bridge) } } +static int _dba_bridge_recovery_callback(int err_code, + struct recovery_client_info *client_info) +{ + int rc = 0; + struct dba_bridge *d_bridge; + + if (!client_info) { + SDE_ERROR("Invalid client info\n"); + rc = -EINVAL; + return rc; + } + + d_bridge = client_info->pdata; + + err_code = err_code - d_bridge->id; + + switch (err_code) { + case DBA_BRIDGE_CRITICAL_ERR: + SDE_DEBUG("%s critical bridge chip error\n", __func__); + + /* Power OFF */ + _dba_bridge_disable(&d_bridge->base); + _dba_bridge_post_disable(&d_bridge->base); + + /* settle power rails */ + msleep(100); + + /* Power On */ + _dba_bridge_pre_enable(&d_bridge->base); + _dba_bridge_enable(&d_bridge->base); + + break; + default: + SDE_ERROR("%s error %d undefined\n", __func__, err_code); + } + return rc; +} + static void _dba_bridge_mode_set(struct drm_bridge *bridge, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -372,6 +443,9 @@ void dba_bridge_cleanup(struct drm_bridge *bridge) if (!bridge) return; + sde_recovery_client_unregister(d_bridge->client_info.handle); + d_bridge->client_info.handle = NULL; + if (IS_ENABLED(CONFIG_MSM_DBA)) { if (!IS_ERR_OR_NULL(d_bridge->dba_ctx)) msm_dba_deregister_client(d_bridge->dba_ctx); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c index 4bf694e2e7fa..0277fd3b3831 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c @@ -2577,7 +2577,7 @@ error: int dsi_display_prepare(struct dsi_display *display) { - int rc = 0, i, j; + int rc = 0, i = 0, j = 0; if (!display) { pr_err("Invalid params\n"); diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index 74dea95d90de..03f8a55255c9 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -93,4 +93,8 @@ static inline void msm_mmu_disable(struct msm_mmu *mmu) int __init msm_smmu_driver_init(void); void __exit msm_smmu_driver_cleanup(void); +/* register custom fault handler for a specific domain */ +void msm_smmu_register_fault_handler(struct msm_mmu *mmu, + iommu_fault_handler_t handler); + #endif /* __MSM_MMU_H__ */ diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c index aefbe0988fe5..eed3cfcb99ee 100644 --- a/drivers/gpu/drm/msm/msm_smmu.c +++ b/drivers/gpu/drm/msm/msm_smmu.c @@ -335,6 +335,18 @@ static struct device *msm_smmu_device_create(struct device *dev, return &pdev->dev; } +void msm_smmu_register_fault_handler(struct msm_mmu *mmu, + iommu_fault_handler_t handler) +{ + struct msm_smmu *smmu = to_msm_smmu(mmu); + struct msm_smmu_client *client = msm_smmu_to_client(smmu); + + if (client) + iommu_set_fault_handler(client->mmu_mapping->domain, + handler, client->dev); + +} + struct msm_mmu *msm_smmu_new(struct device *dev, enum msm_mmu_domain_type domain) { diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c index 1bc3d0a926eb..a986f5db2c41 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.c +++ b/drivers/gpu/drm/msm/sde/sde_connector.c @@ -17,6 +17,8 @@ #include "sde_connector.h" #include "sde_backlight.h" #include "sde_splash.h" +#include <linux/workqueue.h> +#include <linux/atomic.h> #define SDE_DEBUG_CONN(c, fmt, ...) SDE_DEBUG("conn%d " fmt,\ (c) ? (c)->base.base.id : -1, ##__VA_ARGS__) @@ -51,6 +53,8 @@ static const struct drm_prop_enum_list hpd_clock_state[] = { {SDE_MODE_HPD_OFF, "OFF"}, }; +static struct work_struct cpu_up_work; + int sde_connector_get_info(struct drm_connector *connector, struct msm_display_info *info) { @@ -568,11 +572,18 @@ void sde_connector_prepare_fence(struct drm_connector *connector) sde_fence_prepare(&to_sde_connector(connector)->retire_fence); } +static void wake_up_cpu(struct work_struct *work) +{ + if (!cpu_up(1)) + pr_info("cpu1 is online\n"); +} + void sde_connector_complete_commit(struct drm_connector *connector) { struct drm_device *dev; struct msm_drm_private *priv; struct sde_connector *c_conn; + static atomic_t cpu_up_scheduled = ATOMIC_INIT(0); if (!connector) { SDE_ERROR("invalid connector\n"); @@ -587,7 +598,8 @@ void sde_connector_complete_commit(struct drm_connector *connector) /* * After LK totally exits, LK's early splash resource - * should be released. + * should be released, cpu1 is hot-plugged in case LK's + * early domain has reserved it. */ if (sde_splash_get_lk_complete_status(priv->kms)) { c_conn = to_sde_connector(connector); @@ -595,8 +607,11 @@ void sde_connector_complete_commit(struct drm_connector *connector) sde_splash_free_resource(priv->kms, &priv->phandle, c_conn->connector_type, c_conn->display); + if (atomic_add_unless(&cpu_up_scheduled, 1, 1)) { + INIT_WORK(&cpu_up_work, wake_up_cpu); + schedule_work(&cpu_up_work); + } } - } static int sde_connector_dpms(struct drm_connector *connector, diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 77fdcd86c920..dde742014125 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -44,6 +44,13 @@ /* timeout in frames waiting for frame done */ #define SDE_ENCODER_FRAME_DONE_TIMEOUT 60 +/* timeout in msecs */ +#define SDE_ENCODER_UNDERRUN_TIMEOUT 200 +/* underrun count threshold value */ +#define SDE_ENCODER_UNDERRUN_CNT_MAX 10 +/* 3 vsync time period in msec, report underrun */ +#define SDE_ENCODER_UNDERRUN_DELTA 50 + #define MISR_BUFF_SIZE 256 /* @@ -155,6 +162,11 @@ static struct sde_csc_cfg sde_csc_10bit_convert[SDE_MAX_CSC] = { * @crtc_frame_event: callback event * @frame_done_timeout: frame done timeout in Hz * @frame_done_timer: watchdog timer for frame done event + * @last_underrun_ts: variable to hold the last occurred underrun + * timestamp + * @underrun_cnt_dwork: underrun counter for delayed work + * @dwork: delayed work for deferring the reporting + * of underrun error */ struct sde_encoder_virt { struct drm_encoder base; @@ -181,6 +193,9 @@ struct sde_encoder_virt { u32 crtc_frame_event; atomic_t frame_done_timeout; struct timer_list frame_done_timer; + atomic_t last_underrun_ts; + atomic_t underrun_cnt_dwork; + struct delayed_work dwork; }; #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base) @@ -597,14 +612,28 @@ static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc, static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc, struct sde_encoder_phys *phy_enc) { + struct sde_encoder_virt *sde_enc = NULL; + if (!phy_enc) return; + sde_enc = to_sde_encoder_virt(drm_enc); + SDE_ATRACE_BEGIN("encoder_underrun_callback"); atomic_inc(&phy_enc->underrun_cnt); SDE_EVT32(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt)); - sde_recovery_set_events(SDE_UNDERRUN); + /* schedule delayed work if it has not scheduled or executed earlier */ + if ((!atomic_read(&sde_enc->last_underrun_ts)) && + (!atomic_read(&sde_enc->underrun_cnt_dwork))) { + schedule_delayed_work(&sde_enc->dwork, + msecs_to_jiffies(SDE_ENCODER_UNDERRUN_TIMEOUT)); + } + + /* take snapshot of current underrun and increment the count */ + atomic_set(&sde_enc->last_underrun_ts, jiffies); + atomic_inc(&sde_enc->underrun_cnt_dwork); + trace_sde_encoder_underrun(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt)); SDE_DBG_CTRL("stop_ftrace"); @@ -1391,6 +1420,37 @@ static void sde_encoder_frame_done_timeout(unsigned long data) SDE_ENCODER_FRAME_EVENT_ERROR); } +static void sde_encoder_underrun_work_func(struct work_struct *work) +{ + struct sde_encoder_virt *sde_enc = + container_of(work, struct sde_encoder_virt, dwork.work); + + unsigned long delta, time; + + if (!sde_enc) { + SDE_ERROR("invalid parameters\n"); + return; + } + + delta = jiffies - atomic_read(&sde_enc->last_underrun_ts); + time = jiffies_to_msecs(delta); + + /* + * report underrun error when it exceeds the threshold count + * and the occurrence of last underrun error is less than 3 + * vsync period. + */ + if (atomic_read(&sde_enc->underrun_cnt_dwork) > + SDE_ENCODER_UNDERRUN_CNT_MAX && + time < SDE_ENCODER_UNDERRUN_DELTA) { + sde_recovery_set_events(SDE_UNDERRUN); + } + + /* reset underrun last timestamp and counter */ + atomic_set(&sde_enc->last_underrun_ts, 0); + atomic_set(&sde_enc->underrun_cnt_dwork, 0); +} + struct drm_encoder *sde_encoder_init( struct drm_device *dev, struct msm_display_info *disp_info) @@ -1421,8 +1481,11 @@ struct drm_encoder *sde_encoder_init( drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs); atomic_set(&sde_enc->frame_done_timeout, 0); + atomic_set(&sde_enc->last_underrun_ts, 0); + atomic_set(&sde_enc->underrun_cnt_dwork, 0); setup_timer(&sde_enc->frame_done_timer, sde_encoder_frame_done_timeout, (unsigned long) sde_enc); + INIT_DELAYED_WORK(&sde_enc->dwork, sde_encoder_underrun_work_func); _sde_encoder_init_debugfs(drm_enc, sde_enc, sde_kms); diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 676d480ca802..a53b345071a6 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -67,7 +67,8 @@ static struct recovery_client_info info = { .recovery_cb = sde_kms_recovery_callback, .err_supported[0] = {SDE_UNDERRUN, 0, 0}, .err_supported[1] = {SDE_VSYNC_MISS, 0, 0}, - .no_of_err = 2, + .err_supported[2] = {SDE_SMMU_FAULT, 0, 0}, + .no_of_err = 3, .handle = NULL, .pdata = NULL, }; @@ -1140,6 +1141,18 @@ static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms) return 0; } +static int sde_smmu_fault_handler(struct iommu_domain *iommu, + struct device *dev, unsigned long iova, int flags, void *arg) +{ + + dev_info(dev, "%s: iova=0x%08lx, flags=0x%x, iommu=%pK\n", __func__, + iova, flags, iommu); + + sde_recovery_set_events(SDE_SMMU_FAULT); + + return 0; +} + static int _sde_kms_mmu_init(struct sde_kms *sde_kms) { struct msm_mmu *mmu; @@ -1158,6 +1171,8 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms) continue; } + msm_smmu_register_fault_handler(mmu, sde_smmu_fault_handler); + /* Attaching smmu means IOMMU HW starts to work immediately. * However, display HW in LK is still accessing memory * while the memory map is not done yet. @@ -1523,6 +1538,10 @@ static int sde_kms_recovery_callback(int err_code, pr_debug("%s [SDE_VSYNC_MISS] trigger soft reset\n", __func__); break; + case SDE_SMMU_FAULT: + pr_debug("%s [SDE_SMMU_FAULT] trigger soft reset\n", __func__); + break; + default: pr_err("%s error %d undefined\n", __func__, err_code); diff --git a/drivers/gpu/drm/msm/sde/sde_recovery_manager.h b/drivers/gpu/drm/msm/sde/sde_recovery_manager.h index 32fe17a187a0..aeaecbd194f4 100644 --- a/drivers/gpu/drm/msm/sde/sde_recovery_manager.h +++ b/drivers/gpu/drm/msm/sde/sde_recovery_manager.h @@ -29,11 +29,17 @@ #define MAX_REC_NAME_LEN (16) #define MAX_REC_UEVENT_LEN (64) -#define MAX_REC_ERR_SUPPORT (2) +#define MAX_REC_ERR_SUPPORT (3) /* MSM Recovery Manager Error Code */ +#define SDE_SMMU_FAULT 111 #define SDE_UNDERRUN 222 #define SDE_VSYNC_MISS 333 +/* + * instance id of bridge chip is added to make error code + * unique to individual bridge chip instance + */ +#define DBA_BRIDGE_CRITICAL_ERR 444 /** * struct recovery_mgr_info - Recovery manager information diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c index 711d7ba83e61..6392ec1f069b 100644 --- a/drivers/gpu/msm/adreno_snapshot.c +++ b/drivers/gpu/msm/adreno_snapshot.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -400,6 +400,8 @@ static void snapshot_rb_ibs(struct kgsl_device *device, ibsize = rbptr[index + 3]; } + index = (index + 1) % KGSL_RB_DWORDS; + /* Don't parse known global IBs */ if (iommu_is_setstate_addr(device, ibaddr, ibsize)) continue; @@ -410,9 +412,8 @@ static void snapshot_rb_ibs(struct kgsl_device *device, parse_ib(device, snapshot, snapshot->process, ibaddr, ibsize); - } - - index = (index + 1) % KGSL_RB_DWORDS; + } else + index = (index + 1) % KGSL_RB_DWORDS; } } diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index b316ab7e8996..60e2c9faa95f 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -512,6 +512,9 @@ #define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615 #define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070 +#define USB_VENDOR_ID_INNOMEDIA 0x1292 +#define USB_DEVICE_ID_INNEX_GENESIS_ATARI 0x4745 + #define USB_VENDOR_ID_ITE 0x048d #define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386 #define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index ce1543d69acb..c9a11315493b 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -152,6 +152,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI, HID_QUIRK_MULTI_INPUT }, { 0, 0 } }; diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index aa26f3c3416b..c151bb625179 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -33,6 +33,18 @@ config INFINIBAND_USER_ACCESS libibverbs, libibcm and a hardware driver library from <http://www.openfabrics.org/git/>. +config INFINIBAND_USER_ACCESS_UCM + bool "Userspace CM (UCM, DEPRECATED)" + depends on BROKEN + depends on INFINIBAND_USER_ACCESS + help + The UCM module has known security flaws, which no one is + interested to fix. The user-space part of this code was + dropped from the upstream a long time ago. + + This option is DEPRECATED and planned to be removed. + + config INFINIBAND_USER_MEM bool depends on INFINIBAND_USER_ACCESS != n diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index d43a8994ac5c..737612a442be 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -5,8 +5,8 @@ obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ ib_cm.o iw_cm.o ib_addr.o \ $(infiniband-y) obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o -obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ - $(user_access-y) +obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o $(user_access-y) +obj-$(CONFIG_INFINIBAND_USER_ACCESS_UCM) += ib_ucm.o $(user_access-y) ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ device.o fmr_pool.o cache.o netlink.o \ diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index e1629ab58db7..8218d714fa01 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -926,7 +926,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr) { struct c4iw_mr *mhp = to_c4iw_mr(ibmr); - if (unlikely(mhp->mpl_len == mhp->max_mpl_len)) + if (unlikely(mhp->mpl_len == mhp->attr.pbl_size)) return -ENOMEM; mhp->mpl[mhp->mpl_len++] = addr; diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c index 23e27e1179d1..18961e69aadc 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -712,7 +712,8 @@ int vfe_hw_probe(struct platform_device *pdev) spin_lock_init(&vfe_dev->shared_data_lock); spin_lock_init(&vfe_dev->reg_update_lock); spin_lock_init(&req_history_lock); - spin_lock_init(&vfe_dev->completion_lock); + spin_lock_init(&vfe_dev->reset_completion_lock); + spin_lock_init(&vfe_dev->halt_completion_lock); media_entity_init(&vfe_dev->subdev.sd.entity, 0, NULL, 0); vfe_dev->subdev.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV; vfe_dev->subdev.sd.entity.group_id = MSM_CAMERA_SUBDEV_VFE; diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h index acf0a90ed93d..da8fbb3cd5b9 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h @@ -804,7 +804,8 @@ struct vfe_device { struct mutex core_mutex; spinlock_t shared_data_lock; spinlock_t reg_update_lock; - spinlock_t completion_lock; + spinlock_t reset_completion_lock; + spinlock_t halt_completion_lock; /* Tasklet info */ atomic_t irq_cnt; diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c index 850f1b032a8a..f4e4ca6cb6dc 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c @@ -370,15 +370,24 @@ static void msm_vfe40_clear_status_reg(struct vfe_device *vfe_dev) static void msm_vfe40_process_reset_irq(struct vfe_device *vfe_dev, uint32_t irq_status0, uint32_t irq_status1) { - if (irq_status0 & (1 << 31)) + unsigned long flags; + + if (irq_status0 & (1 << 31)) { + spin_lock_irqsave(&vfe_dev->reset_completion_lock, flags); complete(&vfe_dev->reset_complete); + spin_unlock_irqrestore(&vfe_dev->reset_completion_lock, flags); + } } static void msm_vfe40_process_halt_irq(struct vfe_device *vfe_dev, uint32_t irq_status0, uint32_t irq_status1) { + unsigned long flags; + if (irq_status1 & (1 << 8)) { + spin_lock_irqsave(&vfe_dev->halt_completion_lock, flags); complete(&vfe_dev->halt_complete); + spin_unlock_irqrestore(&vfe_dev->halt_completion_lock, flags); msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x2C0); } } @@ -767,7 +776,11 @@ static long msm_vfe40_reset_hardware(struct vfe_device *vfe_dev, uint32_t first_start, uint32_t blocking_call) { long rc = 0; + unsigned long flags; + + spin_lock_irqsave(&vfe_dev->reset_completion_lock, flags); init_completion(&vfe_dev->reset_complete); + spin_unlock_irqrestore(&vfe_dev->reset_completion_lock, flags); if (first_start) { msm_camera_io_w_mb(0x1FF, vfe_dev->vfe_base + 0xC); @@ -1780,6 +1793,7 @@ static int msm_vfe40_axi_halt(struct vfe_device *vfe_dev, int rc = 0; enum msm_vfe_input_src i; struct msm_isp_timestamp ts; + unsigned long flags; /* Keep only halt and restart mask */ msm_vfe40_config_irq(vfe_dev, (1 << 31), (1 << 8), @@ -1796,7 +1810,9 @@ static int msm_vfe40_axi_halt(struct vfe_device *vfe_dev, msm_isp_stats_stream_update(vfe_dev); if (blocking) { + spin_lock_irqsave(&vfe_dev->halt_completion_lock, flags); init_completion(&vfe_dev->halt_complete); + spin_unlock_irqrestore(&vfe_dev->halt_completion_lock, flags); /* Halt AXI Bus Bridge */ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2C0); rc = wait_for_completion_interruptible_timeout( diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c index 0a969cc897b0..0daf2d914be5 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c @@ -443,10 +443,10 @@ void msm_vfe47_process_reset_irq(struct vfe_device *vfe_dev, unsigned long flags; if (irq_status0 & (1 << 31)) { - spin_lock_irqsave(&vfe_dev->completion_lock, flags); + spin_lock_irqsave(&vfe_dev->reset_completion_lock, flags); complete(&vfe_dev->reset_complete); vfe_dev->reset_pending = 0; - spin_unlock_irqrestore(&vfe_dev->completion_lock, flags); + spin_unlock_irqrestore(&vfe_dev->reset_completion_lock, flags); } } @@ -454,9 +454,12 @@ void msm_vfe47_process_halt_irq(struct vfe_device *vfe_dev, uint32_t irq_status0, uint32_t irq_status1) { uint32_t val = 0; + unsigned long flags; if (irq_status1 & (1 << 8)) { + spin_lock_irqsave(&vfe_dev->halt_completion_lock, flags); complete(&vfe_dev->halt_complete); + spin_unlock_irqrestore(&vfe_dev->halt_completion_lock, flags); msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x400); } @@ -774,9 +777,9 @@ long msm_vfe47_reset_hardware(struct vfe_device *vfe_dev, uint32_t reset; unsigned long flags; - spin_lock_irqsave(&vfe_dev->completion_lock, flags); + spin_lock_irqsave(&vfe_dev->reset_completion_lock, flags); init_completion(&vfe_dev->reset_complete); - spin_unlock_irqrestore(&vfe_dev->completion_lock, flags); + spin_unlock_irqrestore(&vfe_dev->reset_completion_lock, flags); if (blocking_call) vfe_dev->reset_pending = 1; @@ -1904,6 +1907,10 @@ void msm_vfe47_cfg_axi_ub_equal_default( stream_info = msm_isp_get_stream_common_data(vfe_dev, HANDLE_TO_IDX(axi_data->free_wm[i])); + if (!stream_info) { + pr_err("%s: stream_info is NULL!", __func__); + return; + } vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info); for (plane = 0; plane < stream_info->num_planes; @@ -1996,6 +2003,7 @@ int msm_vfe47_axi_halt(struct vfe_device *vfe_dev, enum msm_vfe_input_src i; uint32_t val = 0; struct msm_isp_timestamp ts; + unsigned long flags; val = msm_camera_io_r(vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET); val |= 0x1; @@ -2012,7 +2020,9 @@ int msm_vfe47_axi_halt(struct vfe_device *vfe_dev, __func__, vfe_dev->pdev->id, blocking); if (blocking) { + spin_lock_irqsave(&vfe_dev->halt_completion_lock, flags); init_completion(&vfe_dev->halt_complete); + spin_unlock_irqrestore(&vfe_dev->halt_completion_lock, flags); /* Halt AXI Bus Bridge */ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x400); rc = wait_for_completion_interruptible_timeout( diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c index c69bdbf191c6..661850d6d7c6 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c @@ -2830,6 +2830,10 @@ static int msm_isp_axi_update_cgc_override(struct vfe_device *vfe_dev_ioctl, } stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl, HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])); + if (!stream_info) { + pr_err("%s: stream_info is NULL", __func__); + return -EINVAL; + } for (j = 0; j < stream_info->num_planes; j++) { for (k = 0; k < stream_info->num_isp; k++) { vfe_dev = stream_info->vfe_dev[k]; @@ -3289,7 +3293,10 @@ static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev_ioctl, continue; stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl, HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])); - + if (!stream_info) { + pr_err("%s: stream_info is NULL", __func__); + return -EINVAL; + } spin_lock_irqsave(&stream_info->lock, flags); rc = __msm_isp_check_stream_state(stream_info, 0); spin_unlock_irqrestore(&stream_info->lock, flags); @@ -3326,6 +3333,10 @@ int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg) return -EINVAL; stream_info = msm_isp_get_stream_common_data(vfe_dev, HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])); + if (!stream_info) { + pr_err("%s: stream_info is NULL", __func__); + return -EINVAL; + } vfe_idx = msm_isp_get_vfe_idx_for_stream_user(vfe_dev, stream_info); if (vfe_idx == -ENOTTY || stream_info->stream_handle[vfe_idx] != @@ -3845,6 +3856,11 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg) } stream_info = msm_isp_get_stream_common_data(vfe_dev, HANDLE_TO_IDX(update_info->stream_handle)); + if (!stream_info) { + pr_err("%s:%d: stream_info is null", + __func__, __LINE__); + return -EINVAL; + } if (SRC_TO_INTF(stream_info->stream_src) >= VFE_SRC_MAX) continue; if (stream_info->state != ACTIVE && @@ -3885,6 +3901,11 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg) &update_cmd->update_info[i]; stream_info = msm_isp_get_stream_common_data(vfe_dev, HANDLE_TO_IDX(update_info->stream_handle)); + if (!stream_info) { + pr_err("%s:%d: stream_info is null", + __func__, __LINE__); + return -EINVAL; + } stream_info->buf_divert = 0; msm_isp_get_timestamp(×tamp, vfe_dev); frame_id = vfe_dev->axi_data.src_info[ @@ -3919,6 +3940,11 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg) &update_cmd->update_info[i]; stream_info = msm_isp_get_stream_common_data(vfe_dev, HANDLE_TO_IDX(update_info->stream_handle)); + if (!stream_info) { + pr_err("%s:%d: stream_info is null", + __func__, __LINE__); + return -EINVAL; + } spin_lock_irqsave(&stream_info->lock, flags); /* no change then break early */ if (stream_info->current_framedrop_period == @@ -3952,6 +3978,11 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg) &update_cmd->update_info[i]; stream_info = msm_isp_get_stream_common_data(vfe_dev, HANDLE_TO_IDX(update_info->stream_handle)); + if (!stream_info) { + pr_err("%s:%d: stream_info is null", + __func__, __LINE__); + return -EINVAL; + } sw_skip_info = &update_info->sw_skip_info; if (sw_skip_info->stream_src_mask != 0) { /* SW image buffer drop */ @@ -3976,6 +4007,11 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg) &update_cmd->update_info[i]; stream_info = msm_isp_get_stream_common_data(vfe_dev, HANDLE_TO_IDX(update_info->stream_handle)); + if (!stream_info) { + pr_err("%s:%d: stream_info is null", + __func__, __LINE__); + return -EINVAL; + } rc = msm_isp_stream_axi_cfg_update(vfe_dev, stream_info, update_info); if (rc) @@ -4009,6 +4045,11 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg) &update_cmd->update_info[i]; stream_info = msm_isp_get_stream_common_data(vfe_dev, HANDLE_TO_IDX(update_info->stream_handle)); + if (!stream_info) { + pr_err("%s:%d: stream_info is null", + __func__, __LINE__); + return -EINVAL; + } rc = msm_isp_add_buf_queue(vfe_dev, stream_info, update_info->user_stream_id); if (rc) @@ -4023,6 +4064,11 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg) &update_cmd->update_info[i]; stream_info = msm_isp_get_stream_common_data(vfe_dev, HANDLE_TO_IDX(update_info->stream_handle)); + if (!stream_info) { + pr_err("%s:%d: stream_info is null", + __func__, __LINE__); + return -EINVAL; + } msm_isp_remove_buf_queue(vfe_dev, stream_info, update_info->user_stream_id); pr_debug("%s, Remove bufq for Stream 0x%x\n", @@ -4059,6 +4105,11 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg) &update_cmd->update_info[i]; stream_info = msm_isp_get_stream_common_data(vfe_dev, HANDLE_TO_IDX(update_info->stream_handle)); + if (!stream_info) { + pr_err("%s:%d: stream_info is null", + __func__, __LINE__); + return -EINVAL; + } vfe_idx = msm_isp_get_vfe_idx_for_stream( vfe_dev, stream_info); msm_isp_stream_axi_cfg_update(vfe_dev, stream_info, diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_dev.c b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_dev.c index 06e3ee4c353b..5ef08cbe9aee 100644 --- a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_dev.c +++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -32,6 +32,8 @@ #define MSM_JPEG_NAME "jpeg" #define DEV_NAME_LEN 10 +static char devname[DEV_NAME_LEN]; + static int msm_jpeg_open(struct inode *inode, struct file *filp) { int rc = 0; @@ -185,7 +187,6 @@ static int msm_jpeg_init_dev(struct platform_device *pdev) struct msm_jpeg_device *msm_jpeg_device_p; const struct of_device_id *device_id; const struct msm_jpeg_priv_data *priv_data; - char devname[DEV_NAME_LEN]; msm_jpeg_device_p = kzalloc(sizeof(struct msm_jpeg_device), GFP_ATOMIC); if (!msm_jpeg_device_p) { @@ -328,6 +329,7 @@ static struct platform_driver msm_jpeg_driver = { static int __init msm_jpeg_driver_init(void) { int rc; + rc = platform_driver_register(&msm_jpeg_driver); return rc; } diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c index e8b933111e0d..92109cadc3fc 100644 --- a/drivers/misc/ibmasm/ibmasmfs.c +++ b/drivers/misc/ibmasm/ibmasmfs.c @@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file) static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { void __iomem *address = (void __iomem *)file->private_data; - unsigned char *page; - int retval; int len = 0; unsigned int value; - - if (*offset < 0) - return -EINVAL; - if (count == 0 || count > 1024) - return 0; - if (*offset != 0) - return 0; - - page = (unsigned char *)__get_free_page(GFP_KERNEL); - if (!page) - return -ENOMEM; + char lbuf[20]; value = readl(address); - len = sprintf(page, "%d\n", value); - - if (copy_to_user(buf, page, len)) { - retval = -EFAULT; - goto exit; - } - *offset += len; - retval = len; + len = snprintf(lbuf, sizeof(lbuf), "%d\n", value); -exit: - free_page((unsigned long)page); - return retval; + return simple_read_from_buffer(buf, count, offset, lbuf, len); } static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset) diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index fe90b7e04427..5e047bfc0cc4 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -467,7 +467,7 @@ static int vmballoon_send_batched_lock(struct vmballoon *b, unsigned int num_pages, bool is_2m_pages, unsigned int *target) { unsigned long status; - unsigned long pfn = page_to_pfn(b->page); + unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); STATS_INC(b->stats.lock[is_2m_pages]); @@ -515,7 +515,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b, unsigned int num_pages, bool is_2m_pages, unsigned int *target) { unsigned long status; - unsigned long pfn = page_to_pfn(b->page); + unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); STATS_INC(b->stats.unlock[is_2m_pages]); diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index e4e4e04e1d0c..76dbbbde884b 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -4572,8 +4572,8 @@ int mmc_pm_notify(struct notifier_block *notify_block, spin_lock_irqsave(&host->lock, flags); host->rescan_disable = 0; - if (host->ops->get_cd) - present = host->ops->get_cd(host); + if (mmc_card_is_removable(host)) + present = !!mmc_gpio_get_cd(host); if (mmc_bus_manual_resume(host) && !host->ignore_bus_resume_flags && diff --git a/drivers/net/can/spi/qti-can.c b/drivers/net/can/spi/qti-can.c index 7db6ecf8f354..f7ba4510d1bc 100644 --- a/drivers/net/can/spi/qti-can.c +++ b/drivers/net/can/spi/qti-can.c @@ -70,6 +70,7 @@ struct qti_can { bool can_fw_cmd_timeout_req; u32 rem_all_buffering_timeout_ms; u32 can_fw_cmd_timeout_ms; + s64 time_diff; }; struct qti_can_netdev_privdata { @@ -118,6 +119,10 @@ struct spi_miso { /* TLV for MISO line */ #define CMD_BEGIN_BOOT_ROM_UPGRADE 0x99 #define CMD_BOOT_ROM_UPGRADE_DATA 0x9A #define CMD_END_BOOT_ROM_UPGRADE 0x9B +#define CMD_END_FW_UPDATE_FILE 0x9C +#define CMD_UPDATE_TIME_INFO 0x9D +#define CMD_UPDATE_SUSPEND_EVENT 0x9E +#define CMD_UPDATE_RESUME_EVENT 0x9F #define IOCTL_RELEASE_CAN_BUFFER (SIOCDEVPRIVATE + 0) #define IOCTL_ENABLE_BUFFERING (SIOCDEVPRIVATE + 1) @@ -132,6 +137,7 @@ struct spi_miso { /* TLV for MISO line */ #define IOCTL_BEGIN_BOOT_ROM_UPGRADE (SIOCDEVPRIVATE + 11) #define IOCTL_BOOT_ROM_UPGRADE_DATA (SIOCDEVPRIVATE + 12) #define IOCTL_END_BOOT_ROM_UPGRADE (SIOCDEVPRIVATE + 13) +#define IOCTL_END_FW_UPDATE_FILE (SIOCDEVPRIVATE + 14) #define IFR_DATA_OFFSET 0x100 struct can_fw_resp { @@ -163,7 +169,7 @@ struct can_add_filter_resp { struct can_receive_frame { u8 can_if; - u32 ts; + __le64 ts; u32 mid; u8 dlc; u8 data[8]; @@ -178,6 +184,10 @@ struct can_config_bit_timing { u32 brp; } __packed; +struct can_time_info { + __le64 time; +} __packed; + static struct can_bittiming_const rh850_bittiming_const = { .name = "qti_can", .tseg1_min = 1, @@ -291,7 +301,7 @@ static void qti_can_receive_frame(struct qti_can *priv_data, return; } - LOGDI("rcv frame %d %d %x %d %x %x %x %x %x %x %x %x\n", + LOGDI("rcv frame %d %llu %x %d %x %x %x %x %x %x %x %x\n", frame->can_if, frame->ts, frame->mid, frame->dlc, frame->data[0], frame->data[1], frame->data[2], frame->data[3], frame->data[4], frame->data[5], frame->data[6], frame->data[7]); @@ -301,7 +311,8 @@ static void qti_can_receive_frame(struct qti_can *priv_data, for (i = 0; i < cf->can_dlc; i++) cf->data[i] = frame->data[i]; - nsec = ms_to_ktime(le32_to_cpu(frame->ts)); + nsec = ms_to_ktime(le64_to_cpu(frame->ts) + priv_data->time_diff); + skt = skb_hwtstamps(skb); skt->hwtstamp = nsec; LOGDI(" hwtstamp %lld\n", ktime_to_ms(skt->hwtstamp)); @@ -354,6 +365,8 @@ static int qti_can_process_response(struct qti_can *priv_data, struct spi_miso *resp, int length) { int ret = 0; + u64 mstime; + ktime_t ktime_now; LOGDI("<%x %2d [%d]\n", resp->cmd, resp->len, resp->seq); if (resp->cmd == CMD_CAN_RECEIVE_FRAME) { @@ -402,6 +415,12 @@ static int qti_can_process_response(struct qti_can *priv_data, ret |= (fw_resp->br_min & 0xFF) << 16; ret |= (fw_resp->maj & 0xF) << 8; ret |= (fw_resp->min & 0xFF); + } else if (resp->cmd == CMD_UPDATE_TIME_INFO) { + struct can_time_info *time_data = + (struct can_time_info *)resp->data; + ktime_now = ktime_get_boottime(); + mstime = ktime_to_ms(ktime_now); + priv_data->time_diff = mstime - (le64_to_cpu(time_data->time)); } if (resp->cmd == priv_data->wait_cmd) { @@ -445,8 +464,8 @@ static int qti_can_process_rx(struct qti_can *priv_data, char *rx_buf) } else { data = rx_buf + length_processed; resp = (struct spi_miso *)data; - if (resp->cmd == 0) { - /* special case. ignore cmd==0 */ + if (resp->cmd == 0x00 || resp->cmd == 0xFF) { + /* special case. ignore cmd==0x00, 0xFF */ length_processed += 1; continue; } @@ -563,6 +582,30 @@ static int qti_can_query_firmware_version(struct qti_can *priv_data) return ret; } +static int qti_can_notify_power_events(struct qti_can *priv_data, u8 event_type) +{ + char *tx_buf, *rx_buf; + int ret; + struct spi_mosi *req; + + mutex_lock(&priv_data->spi_lock); + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + req = (struct spi_mosi *)tx_buf; + req->cmd = event_type; + req->len = 0; + req->seq = atomic_inc_return(&priv_data->msg_seq); + + ret = qti_can_do_spi_transaction(priv_data); + mutex_unlock(&priv_data->spi_lock); + + return ret; +} + static int qti_can_set_bitrate(struct net_device *netdev) { char *tx_buf, *rx_buf; @@ -983,6 +1026,8 @@ static int qti_can_convert_ioctl_cmd_to_spi_cmd(int ioctl_cmd) return CMD_BOOT_ROM_UPGRADE_DATA; case IOCTL_END_BOOT_ROM_UPGRADE: return CMD_END_BOOT_ROM_UPGRADE; + case IOCTL_END_FW_UPDATE_FILE: + return CMD_END_FW_UPDATE_FILE; } return -EINVAL; } @@ -1119,6 +1164,7 @@ static int qti_can_netdev_do_ioctl(struct net_device *netdev, case IOCTL_BEGIN_BOOT_ROM_UPGRADE: case IOCTL_BOOT_ROM_UPGRADE_DATA: case IOCTL_END_BOOT_ROM_UPGRADE: + case IOCTL_END_FW_UPDATE_FILE: ret = qti_can_do_blocking_ioctl(netdev, ifr, cmd); break; } @@ -1242,6 +1288,7 @@ static int qti_can_probe(struct spi_device *spi) int err, retry = 0, query_err = -1, i; struct qti_can *priv_data = NULL; struct device *dev; + u32 irq_type; dev = &spi->dev; dev_info(dev, "qti_can_probe"); @@ -1318,7 +1365,7 @@ static int qti_can_probe(struct spi_device *spi) } priv_data->support_can_fd = of_property_read_bool(spi->dev.of_node, - "support-can-fd"); + "qcom,support-can-fd"); if (of_device_is_compatible(spi->dev.of_node, "qcom,nxp,mpc5746c")) qti_can_bittiming_const = flexcan_bittiming_const; @@ -1348,8 +1395,11 @@ static int qti_can_probe(struct spi_device *spi) } } + irq_type = irq_get_trigger_type(spi->irq); + if (irq_type == IRQ_TYPE_NONE) + irq_type = IRQ_TYPE_EDGE_FALLING; err = request_threaded_irq(spi->irq, NULL, qti_can_irq, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + irq_type | IRQF_ONESHOT, "qti-can", priv_data); if (err) { LOGDE("Failed to request irq: %d", err); @@ -1415,6 +1465,10 @@ static int qti_can_remove(struct spi_device *spi) static int qti_can_suspend(struct device *dev) { struct spi_device *spi = to_spi_device(dev); + struct qti_can *priv_data = spi_get_drvdata(spi); + u8 power_event = CMD_UPDATE_SUSPEND_EVENT; + + qti_can_notify_power_events(priv_data, power_event); enable_irq_wake(spi->irq); return 0; @@ -1424,9 +1478,10 @@ static int qti_can_resume(struct device *dev) { struct spi_device *spi = to_spi_device(dev); struct qti_can *priv_data = spi_get_drvdata(spi); + u8 power_event = CMD_UPDATE_RESUME_EVENT; disable_irq_wake(spi->irq); - qti_can_rx_message(priv_data); + qti_can_notify_power_events(priv_data, power_event); return 0; } diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index ff053b098c22..0f4ef3712dd7 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -1414,6 +1414,61 @@ void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv) CNSS_REASON_TIMEOUT); } +struct dma_iommu_mapping *cnss_smmu_get_mapping(struct device *dev) +{ + struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev)); + + if (!pci_priv) + return NULL; + + return pci_priv->smmu_mapping; +} +EXPORT_SYMBOL(cnss_smmu_get_mapping); + +int cnss_smmu_map(struct device *dev, + phys_addr_t paddr, uint32_t *iova_addr, size_t size) +{ + struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev)); + unsigned long iova; + size_t len; + int ret = 0; + + if (!pci_priv) + return -ENODEV; + + if (!iova_addr) { + cnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n", + &paddr, size); + return -EINVAL; + } + + len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE); + iova = roundup(pci_priv->smmu_iova_ipa_start, PAGE_SIZE); + + if (iova >= + (pci_priv->smmu_iova_ipa_start + pci_priv->smmu_iova_ipa_len)) { + cnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n", + iova, + &pci_priv->smmu_iova_ipa_start, + pci_priv->smmu_iova_ipa_len); + return -ENOMEM; + } + + ret = iommu_map(pci_priv->smmu_mapping->domain, iova, + rounddown(paddr, PAGE_SIZE), len, + IOMMU_READ | IOMMU_WRITE); + if (ret) { + cnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret); + return ret; + } + + pci_priv->smmu_iova_ipa_start = iova + len; + *iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE)); + + return 0; +} +EXPORT_SYMBOL(cnss_smmu_map); + int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info) { int ret = 0; @@ -2129,6 +2184,17 @@ static int cnss_pci_probe(struct pci_dev *pci_dev, &pci_priv->smmu_iova_start, pci_priv->smmu_iova_len); + res = platform_get_resource_byname(plat_priv->plat_dev, + IORESOURCE_MEM, + "smmu_iova_ipa"); + if (res) { + pci_priv->smmu_iova_ipa_start = res->start; + pci_priv->smmu_iova_ipa_len = resource_size(res); + cnss_pr_dbg("smmu_iova_ipa_start: %pa, smmu_iova_ipa_len: %zu\n", + &pci_priv->smmu_iova_ipa_start, + pci_priv->smmu_iova_ipa_len); + } + ret = cnss_pci_init_smmu(pci_priv); if (ret) { cnss_pr_err("Failed to init SMMU, err = %d\n", ret); diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h index 182355ae7577..e47f14e8a325 100644 --- a/drivers/net/wireless/cnss2/pci.h +++ b/drivers/net/wireless/cnss2/pci.h @@ -73,6 +73,8 @@ struct cnss_pci_data { struct dma_iommu_mapping *smmu_mapping; dma_addr_t smmu_iova_start; size_t smmu_iova_len; + dma_addr_t smmu_iova_ipa_start; + size_t smmu_iova_ipa_len; void __iomem *bar; struct cnss_msi_config *msi_config; u32 msi_ep_base_data; diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c index bc0263c371a1..66dd016e2132 100644 --- a/drivers/platform/msm/ipa/ipa_api.c +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -340,13 +340,13 @@ int ipa_disconnect(u32 clnt_hdl) EXPORT_SYMBOL(ipa_disconnect); /** -* ipa_clear_endpoint_delay() - Clear ep_delay. -* @clnt_hdl: [in] IPA client handle -* -* Returns: 0 on success, negative on failure -* -* Note: Should not be called from atomic context -*/ + * ipa_clear_endpoint_delay() - Clear ep_delay. + * @clnt_hdl: [in] IPA client handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ int ipa_clear_endpoint_delay(u32 clnt_hdl) { int ret; @@ -358,13 +358,13 @@ int ipa_clear_endpoint_delay(u32 clnt_hdl) EXPORT_SYMBOL(ipa_clear_endpoint_delay); /** -* ipa_reset_endpoint() - reset an endpoint from BAM perspective -* @clnt_hdl: [in] IPA client handle -* -* Returns: 0 on success, negative on failure -* -* Note: Should not be called from atomic context -*/ + * ipa_reset_endpoint() - reset an endpoint from BAM perspective + * @clnt_hdl: [in] IPA client handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ int ipa_reset_endpoint(u32 clnt_hdl) { int ret; @@ -376,13 +376,13 @@ int ipa_reset_endpoint(u32 clnt_hdl) EXPORT_SYMBOL(ipa_reset_endpoint); /** -* ipa_disable_endpoint() - Disable an endpoint from IPA perspective -* @clnt_hdl: [in] IPA client handle -* -* Returns: 0 on success, negative on failure -* -* Note: Should not be called from atomic context -*/ + * ipa_disable_endpoint() - Disable an endpoint from IPA perspective + * @clnt_hdl: [in] IPA client handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ int ipa_disable_endpoint(u32 clnt_hdl) { int ret; @@ -676,8 +676,28 @@ int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs) EXPORT_SYMBOL(ipa_add_hdr); /** - * ipa_del_hdr() - Remove the specified headers from SW and optionally commit them - * to IPA HW + * ipa_add_hdr_usr() - add the specified headers to SW and optionally + * commit them to IPA HW + * @hdrs: [inout] set of headers to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_hdr_usr, hdrs, user_only); + + return ret; +} +EXPORT_SYMBOL(ipa_add_hdr_usr); + +/** + * ipa_del_hdr() - Remove the specified headers from SW and optionally + * commit them to IPA HW * @hdls: [inout] set of headers to delete * * Returns: 0 on success, negative on failure @@ -715,15 +735,16 @@ EXPORT_SYMBOL(ipa_commit_hdr); * ipa_reset_hdr() - reset the current header table in SW (does not commit to * HW) * + * @user_only: [in] indicate delete rules installed by userspace * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa_reset_hdr(void) +int ipa_reset_hdr(bool user_only) { int ret; - IPA_API_DISPATCH_RETURN(ipa_reset_hdr); + IPA_API_DISPATCH_RETURN(ipa_reset_hdr, user_only); return ret; } @@ -793,16 +814,18 @@ EXPORT_SYMBOL(ipa_copy_hdr); * ipa_add_hdr_proc_ctx() - add the specified headers to SW * and optionally commit them to IPA HW * @proc_ctxs: [inout] set of processing context headers to add + * @user_only: [in] indicate rules installed by userspace * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs) +int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only) { int ret; - IPA_API_DISPATCH_RETURN(ipa_add_hdr_proc_ctx, proc_ctxs); + IPA_API_DISPATCH_RETURN(ipa_add_hdr_proc_ctx, proc_ctxs, user_only); return ret; } @@ -848,6 +871,26 @@ int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) EXPORT_SYMBOL(ipa_add_rt_rule); /** + * ipa_add_rt_rule_usr() - Add the specified routing rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of routing rules to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_rt_rule_usr, rules, user_only); + + return ret; +} +EXPORT_SYMBOL(ipa_add_rt_rule_usr); + +/** * ipa_del_rt_rule() - Remove the specified routing rules to SW and optionally * commit to IPA HW * @hdls: [inout] set of routing rules to delete @@ -889,16 +932,17 @@ EXPORT_SYMBOL(ipa_commit_rt); * ipa_reset_rt() - reset the current SW routing table of specified type * (does not commit to HW) * @ip: The family of routing tables + * @user_only: [in] indicate delete rules installed by userspace * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa_reset_rt(enum ipa_ip_type ip) +int ipa_reset_rt(enum ipa_ip_type ip, bool user_only) { int ret; - IPA_API_DISPATCH_RETURN(ipa_reset_rt, ip); + IPA_API_DISPATCH_RETURN(ipa_reset_rt, ip, user_only); return ret; } @@ -981,6 +1025,7 @@ EXPORT_SYMBOL(ipa_mdfy_rt_rule); /** * ipa_add_flt_rule() - Add the specified filtering rules to SW and optionally * commit to IPA HW + * @rules: [inout] set of filtering rules to add * * Returns: 0 on success, negative on failure * @@ -997,6 +1042,26 @@ int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) EXPORT_SYMBOL(ipa_add_flt_rule); /** + * ipa_add_flt_rule_usr() - Add the specified filtering rules to + * SW and optionally commit to IPA HW + * @rules: [inout] set of filtering rules to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_flt_rule_usr, rules, user_only); + + return ret; +} +EXPORT_SYMBOL(ipa_add_flt_rule_usr); + +/** * ipa_del_flt_rule() - Remove the specified filtering rules from SW and * optionally commit to IPA HW * @@ -1054,17 +1119,18 @@ EXPORT_SYMBOL(ipa_commit_flt); /** * ipa_reset_flt() - Reset the current SW filtering table of specified type * (does not commit to HW) - * @ip: [in] the family of routing tables + * @ip: [in] the family of routing tables + * @user_only: [in] indicate delete rules installed by userspace * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa_reset_flt(enum ipa_ip_type ip) +int ipa_reset_flt(enum ipa_ip_type ip, bool user_only) { int ret; - IPA_API_DISPATCH_RETURN(ipa_reset_flt, ip); + IPA_API_DISPATCH_RETURN(ipa_reset_flt, ip, user_only); return ret; } @@ -1710,20 +1776,20 @@ int ipa_uc_dereg_rdyCB(void) EXPORT_SYMBOL(ipa_uc_dereg_rdyCB); /** -* teth_bridge_init() - Initialize the Tethering bridge driver -* @params - in/out params for USB initialization API (please look at struct -* definition for more info) -* -* USB driver gets a pointer to a callback function (usb_notify_cb) and an -* associated data. USB driver installs this callback function in the call to -* ipa_connect(). -* -* Builds IPA resource manager dependency graph. -* -* Return codes: 0: success, -* -EINVAL - Bad parameter -* Other negative value - Failure -*/ + * teth_bridge_init() - Initialize the Tethering bridge driver + * @params - in/out params for USB initialization API (please look at struct + * definition for more info) + * + * USB driver gets a pointer to a callback function (usb_notify_cb) and an + * associated data. USB driver installs this callback function in the call to + * ipa_connect(). + * + * Builds IPA resource manager dependency graph. + * + * Return codes: 0: success, + * -EINVAL - Bad parameter + * Other negative value - Failure + */ int teth_bridge_init(struct teth_bridge_init_params *params) { int ret; @@ -1735,8 +1801,8 @@ int teth_bridge_init(struct teth_bridge_init_params *params) EXPORT_SYMBOL(teth_bridge_init); /** -* teth_bridge_disconnect() - Disconnect tethering bridge module -*/ + * teth_bridge_disconnect() - Disconnect tethering bridge module + */ int teth_bridge_disconnect(enum ipa_client_type client) { int ret; @@ -1748,14 +1814,14 @@ int teth_bridge_disconnect(enum ipa_client_type client) EXPORT_SYMBOL(teth_bridge_disconnect); /** -* teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call -* @connect_params: Connection info -* -* Return codes: 0: success -* -EINVAL: invalid parameters -* -EPERM: Operation not permitted as the bridge is already -* connected -*/ + * teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call + * @connect_params: Connection info + * + * Return codes: 0: success + * -EINVAL: invalid parameters + * -EPERM: Operation not permitted as the bridge is already + * connected + */ int teth_bridge_connect(struct teth_bridge_connect_params *connect_params) { int ret; @@ -2232,16 +2298,16 @@ int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in) EXPORT_SYMBOL(ipa_write_qmap_id); /** -* ipa_add_interrupt_handler() - Adds handler to an interrupt type -* @interrupt: Interrupt type -* @handler: The handler to be added -* @deferred_flag: whether the handler processing should be deferred in -* a workqueue -* @private_data: the client's private data -* -* Adds handler to an interrupt type and enable the specific bit -* in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled -*/ + * ipa_add_interrupt_handler() - Adds handler to an interrupt type + * @interrupt: Interrupt type + * @handler: The handler to be added + * @deferred_flag: whether the handler processing should be deferred in + * a workqueue + * @private_data: the client's private data + * + * Adds handler to an interrupt type and enable the specific bit + * in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled + */ int ipa_add_interrupt_handler(enum ipa_irq_type interrupt, ipa_irq_handler_t handler, bool deferred_flag, @@ -2257,11 +2323,11 @@ int ipa_add_interrupt_handler(enum ipa_irq_type interrupt, EXPORT_SYMBOL(ipa_add_interrupt_handler); /** -* ipa_remove_interrupt_handler() - Removes handler to an interrupt type -* @interrupt: Interrupt type -* -* Removes the handler and disable the specific bit in IRQ_EN register -*/ + * ipa_remove_interrupt_handler() - Removes handler to an interrupt type + * @interrupt: Interrupt type + * + * Removes the handler and disable the specific bit in IRQ_EN register + */ int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt) { int ret; @@ -2273,12 +2339,12 @@ int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt) EXPORT_SYMBOL(ipa_remove_interrupt_handler); /** -* ipa_restore_suspend_handler() - restores the original suspend IRQ handler -* as it was registered in the IPA init sequence. -* Return codes: -* 0: success -* -EPERM: failed to remove current handler or failed to add original handler -* */ + * ipa_restore_suspend_handler() - restores the original suspend IRQ handler + * as it was registered in the IPA init sequence. + * Return codes: + * 0: success + * -EPERM: failed to remove current handler or failed to add original handler + */ int ipa_restore_suspend_handler(void) { int ret; @@ -2621,10 +2687,10 @@ static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p) { int result; - /* - * IPA probe function can be called for multiple times as the same probe - * function handles multiple compatibilities - */ +/** + * IPA probe function can be called for multiple times as the same probe + * function handles multiple compatibilities + */ pr_debug("ipa: IPA driver probing started for %s\n", pdev_p->dev.of_node->name); @@ -2672,6 +2738,42 @@ static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p) return result; } +static void ipa_generic_plat_drv_shutdown(struct platform_device *pdev_p) +{ + int result; + + pr_info("ipa: IPA driver shutdown started for %s\n", + pdev_p->dev.of_node->name); + + if (!ipa_api_ctrl) { + pr_err("ipa: invalid ipa_api_ctrl\n"); + return; + } + + /* call probe based on IPA HW version */ + switch (ipa_api_hw_type) { + case IPA_HW_v2_0: + case IPA_HW_v2_1: + case IPA_HW_v2_5: + case IPA_HW_v2_6L: + result = ipa_plat_drv_shutdown(pdev_p, ipa_api_ctrl, + ipa_plat_drv_match); + break; + case IPA_HW_v3_0: + case IPA_HW_v3_1: + case IPA_HW_v3_5: + case IPA_HW_v3_5_1: + default: + pr_err("ipa: ipa_generic_plat_drv_shutdown, unsupported version %d\n", + ipa_api_hw_type); + return; + } + + if (result) + pr_err("ipa: ipa_generic_plat_drv_shutdown failed\n"); +} + + static int ipa_ap_suspend(struct device *dev) { int ret; @@ -2711,7 +2813,7 @@ EXPORT_SYMBOL(ipa_register_ipa_ready_cb); * * Return codes: * None -*/ + */ void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id) { IPA_API_DISPATCH(ipa_inc_client_enable_clks, id); @@ -2727,7 +2829,7 @@ EXPORT_SYMBOL(ipa_inc_client_enable_clks); * * Return codes: * None -*/ + */ void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id) { IPA_API_DISPATCH(ipa_dec_client_disable_clks, id); @@ -2757,14 +2859,14 @@ int ipa_inc_client_enable_clks_no_block( EXPORT_SYMBOL(ipa_inc_client_enable_clks_no_block); /** -* ipa_suspend_resource_no_block() - suspend client endpoints related to the -* IPA_RM resource and decrement active clients counter. This function is -* guaranteed to avoid sleeping. -* -* @resource: [IN] IPA Resource Manager resource -* -* Return codes: 0 on success, negative on failure. -*/ + * ipa_suspend_resource_no_block() - suspend client endpoints related to the + * IPA_RM resource and decrement active clients counter. This function is + * guaranteed to avoid sleeping. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource) { int ret; @@ -2975,6 +3077,7 @@ static struct platform_driver ipa_plat_drv = { .pm = &ipa_pm_ops, .of_match_table = ipa_plat_drv_match, }, + .shutdown = ipa_generic_plat_drv_shutdown, }; static int __init ipa_module_init(void) diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h index 1fb0e7122042..72db83c6e69d 100644 --- a/drivers/platform/msm/ipa/ipa_api.h +++ b/drivers/platform/msm/ipa/ipa_api.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -69,11 +69,13 @@ struct ipa_api_controller { int (*ipa_add_hdr)(struct ipa_ioc_add_hdr *hdrs); + int (*ipa_add_hdr_usr)(struct ipa_ioc_add_hdr *hdrs, bool user_only); + int (*ipa_del_hdr)(struct ipa_ioc_del_hdr *hdls); int (*ipa_commit_hdr)(void); - int (*ipa_reset_hdr)(void); + int (*ipa_reset_hdr)(bool user_only); int (*ipa_get_hdr)(struct ipa_ioc_get_hdr *lookup); @@ -81,17 +83,21 @@ struct ipa_api_controller { int (*ipa_copy_hdr)(struct ipa_ioc_copy_hdr *copy); - int (*ipa_add_hdr_proc_ctx)(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs); + int (*ipa_add_hdr_proc_ctx)(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only); int (*ipa_del_hdr_proc_ctx)(struct ipa_ioc_del_hdr_proc_ctx *hdls); int (*ipa_add_rt_rule)(struct ipa_ioc_add_rt_rule *rules); + int (*ipa_add_rt_rule_usr)(struct ipa_ioc_add_rt_rule *rules, + bool user_only); + int (*ipa_del_rt_rule)(struct ipa_ioc_del_rt_rule *hdls); int (*ipa_commit_rt)(enum ipa_ip_type ip); - int (*ipa_reset_rt)(enum ipa_ip_type ip); + int (*ipa_reset_rt)(enum ipa_ip_type ip, bool user_only); int (*ipa_get_rt_tbl)(struct ipa_ioc_get_rt_tbl *lookup); @@ -103,13 +109,16 @@ struct ipa_api_controller { int (*ipa_add_flt_rule)(struct ipa_ioc_add_flt_rule *rules); + int (*ipa_add_flt_rule_usr)(struct ipa_ioc_add_flt_rule *rules, + bool user_only); + int (*ipa_del_flt_rule)(struct ipa_ioc_del_flt_rule *hdls); int (*ipa_mdfy_flt_rule)(struct ipa_ioc_mdfy_flt_rule *rules); int (*ipa_commit_flt)(enum ipa_ip_type ip); - int (*ipa_reset_flt)(enum ipa_ip_type ip); + int (*ipa_reset_flt)(enum ipa_ip_type ip, bool user_only); int (*allocate_nat_device)(struct ipa_ioc_nat_alloc_mem *mem); @@ -384,12 +393,27 @@ struct ipa_api_controller { #ifdef CONFIG_IPA int ipa_plat_drv_probe(struct platform_device *pdev_p, struct ipa_api_controller *api_ctrl, struct of_device_id *pdrv_match); +int ipa_plat_drv_shutdown(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match); +void ipa_platform_shutdown(void); #else static inline int ipa_plat_drv_probe(struct platform_device *pdev_p, struct ipa_api_controller *api_ctrl, struct of_device_id *pdrv_match) { return -ENODEV; } +static inline int ipa_plat_drv_shutdown(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match) +{ + return -ENODEV; +} +static inline int ipa_platform_shutdown(void) +{ + return -ENODEV; +} + #endif /* (CONFIG_IPA) */ #ifdef CONFIG_IPA3 diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h index 911db0b19079..00c3515bae30 100644 --- a/drivers/platform/msm/ipa/ipa_common_i.h +++ b/drivers/platform/msm/ipa/ipa_common_i.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -296,11 +296,13 @@ struct ipa_mhi_connect_params_internal { * @link: entry's link in global header offset entries list * @offset: the offset * @bin: bin + * @ipacm_installed: indicate if installed by ipacm */ struct ipa_hdr_offset_entry { struct list_head link; u32 offset; u32 bin; + bool ipacm_installed; }; extern const char *ipa_clients_strings[]; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c index ad7d2d6175bd..d9f8912c0514 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c @@ -279,6 +279,28 @@ int ipa2_active_clients_log_print_table(char *buf, int size) return cnt; } + +static int ipa2_clean_modem_rule(void) +{ + struct ipa_install_fltr_rule_req_msg_v01 *req; + int val = 0; + + req = kzalloc( + sizeof(struct ipa_install_fltr_rule_req_msg_v01), + GFP_KERNEL); + if (!req) { + IPAERR("mem allocated failed!\n"); + return -ENOMEM; + } + req->filter_spec_list_valid = false; + req->filter_spec_list_len = 0; + req->source_pipe_index_valid = 0; + val = qmi_filter_request_send(req); + kfree(req); + + return val; +} + static int ipa2_active_clients_panic_notifier(struct notifier_block *this, unsigned long event, void *ptr) { @@ -531,7 +553,8 @@ static void ipa_wan_msg_free_cb(void *buff, u32 len, u32 type) kfree(buff); } -static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_cache) +static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, + bool is_cache) { int retval; struct ipa_wan_msg *wan_msg; @@ -716,7 +739,8 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (ipa2_add_hdr((struct ipa_ioc_add_hdr *)param)) { + if (ipa2_add_hdr_usr((struct ipa_ioc_add_hdr *)param, + true)) { retval = -EFAULT; break; } @@ -796,7 +820,8 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (ipa2_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) { + if (ipa2_add_rt_rule_usr((struct ipa_ioc_add_rt_rule *)param, + true)) { retval = -EFAULT; break; } @@ -915,7 +940,8 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) { + if (ipa2_add_flt_rule_usr((struct ipa_ioc_add_flt_rule *)param, + true)) { retval = -EFAULT; break; } @@ -1009,19 +1035,19 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = ipa2_commit_hdr(); break; case IPA_IOC_RESET_HDR: - retval = ipa2_reset_hdr(); + retval = ipa2_reset_hdr(false); break; case IPA_IOC_COMMIT_RT: retval = ipa2_commit_rt(arg); break; case IPA_IOC_RESET_RT: - retval = ipa2_reset_rt(arg); + retval = ipa2_reset_rt(arg, false); break; case IPA_IOC_COMMIT_FLT: retval = ipa2_commit_flt(arg); break; case IPA_IOC_RESET_FLT: - retval = ipa2_reset_flt(arg); + retval = ipa2_reset_flt(arg, false); break; case IPA_IOC_GET_RT_TBL: if (copy_from_user(header, (u8 *)arg, @@ -1401,7 +1427,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } if (ipa2_add_hdr_proc_ctx( - (struct ipa_ioc_add_hdr_proc_ctx *)param)) { + (struct ipa_ioc_add_hdr_proc_ctx *)param, true)) { retval = -EFAULT; break; } @@ -1465,7 +1491,22 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; - default: /* redundant, as cmd was checked against MAXNR */ + case IPA_IOC_CLEANUP: + /*Route and filter rules will also be clean*/ + IPADBG("Got IPA_IOC_CLEANUP\n"); + retval = ipa2_reset_hdr(true); + memset(&nat_del, 0, sizeof(nat_del)); + nat_del.table_index = 0; + retval = ipa2_nat_del_cmd(&nat_del); + retval = ipa2_clean_modem_rule(); + break; + + case IPA_IOC_QUERY_WLAN_CLIENT: + IPADBG("Got IPA_IOC_QUERY_WLAN_CLIENT\n"); + retval = ipa2_resend_wlan_msg(); + break; + + default: IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return -ENOTTY; } @@ -1478,7 +1519,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) /** * ipa_setup_dflt_rt_tables() - Setup default routing tables -* + * Return codes: * 0: success * -ENOMEM: failed to allocate memory @@ -1752,6 +1793,31 @@ int ipa_q6_pipe_delay(bool zip_pipes) return 0; } +/* Remove delay only for IPA consumer pipes */ +static void ipa_pipe_delay(bool set_reset) +{ + u32 reg_val = 0; + int client_idx; + int ep_idx; + + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + /* Break the processing for IPA PROD pipes and avoid looping. */ + if (IPA_CLIENT_IS_CONS(client_idx)) + break; + + ep_idx = ipa2_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + IPA_SETFIELD_IN_REG(reg_val, set_reset, + IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT, + IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_CTRL_N_OFST(ep_idx), reg_val); + } +} + int ipa_q6_monitor_holb_mitigation(bool enable) { int ep_idx; @@ -1830,6 +1896,51 @@ static int ipa_q6_avoid_holb(bool zip_pipes) return 0; } +/* + * Set HOLB drop on all IPA producer/client cons pipes, + * do not set suspend + */ +static void ipa_avoid_holb(void) +{ + u32 reg_val; + int ep_idx; + int client_idx = IPA_CLIENT_MAX - 1; + + for (; client_idx >= 0; client_idx--) { + /* Break the processing for IPA CONS pipes and avoid looping. */ + if (IPA_CLIENT_IS_PROD(client_idx)) + break; + + ep_idx = ipa2_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + /* + * ipa2_cfg_ep_holb is not used here because we are + * also setting HOLB on Q6 pipes, and from APPS perspective + * they are not valid, therefore, the above function + * will fail. + */ + reg_val = 0; + IPA_SETFIELD_IN_REG(reg_val, 0, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_SHFT, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(ep_idx), + reg_val); + + reg_val = 0; + IPA_SETFIELD_IN_REG(reg_val, 1, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_SHFT, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(ep_idx), + reg_val); + } +} + static u32 ipa_get_max_flt_rt_cmds(u32 num_pipes) { u32 max_cmds = 0; @@ -2089,6 +2200,49 @@ static int ipa_q6_set_ex_path_dis_agg(void) return retval; } +int register_ipa_platform_cb(int (*q6_cleanup_cb)(void)) +{ + IPAERR("In register_ipa_platform_cb\n"); + if (ipa_ctx) { + if (ipa_ctx->q6_cleanup_cb == NULL) { + IPAERR("reg q6_cleanup_cb\n"); + ipa_ctx->q6_cleanup_cb = q6_cleanup_cb; + } else + IPAERR("Already registered\n"); + } else { + IPAERR("IPA driver not initialized, retry\n"); + return -EAGAIN; + } + return 0; +} + +/** +* ipa_apps_shutdown_cleanup() - Take care Apps ep's cleanup +* 1) Set HOLB drop on all IPA producer pipes. +* 2) Remove delay for all IPA consumer pipes. +* 3) Wait for all IPA consumer pipes to go empty and +* reset it. +* 4) Do aggregation force close for all pipes. +* 5) Reset all IPA producer pipes + +* 0: success +*/ + +int ipa_apps_shutdown_cleanup(void) +{ + IPA_ACTIVE_CLIENTS_INC_SPECIAL("APPS_SHUTDOWN"); + + ipa_avoid_holb(); + + ipa_pipe_delay(false); + + ipa2_apps_shutdown_apps_ep_reset(); + + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("APPS_SHUTDOWN"); + + return 0; +} + /** * ipa_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration * in IPA HW before modem shutdown. This is performed in @@ -3907,6 +4061,8 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p, ipa_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset; ipa_ctx->use_dma_zone = resource_p->use_dma_zone; ipa_ctx->tethered_flow_control = resource_p->tethered_flow_control; + ipa_ctx->is_apps_shutdown_support = + resource_p->is_apps_shutdown_support; /* Setting up IPA RX Polling Timeout Seconds */ ipa_rx_timeout_min_max_calc(&ipa_ctx->ipa_rx_min_timeout_usec, @@ -4186,6 +4342,10 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p, init_waitqueue_head(&ipa_ctx->msg_waitq); mutex_init(&ipa_ctx->msg_lock); + /* store wlan client-connect-msg-list */ + INIT_LIST_HEAD(&ipa_ctx->msg_wlan_client_list); + mutex_init(&ipa_ctx->msg_wlan_client_lock); + mutex_init(&ipa_ctx->lock); mutex_init(&ipa_ctx->nat_mem.lock); mutex_init(&ipa_ctx->ipa_cne_evt_lock); @@ -4447,6 +4607,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev, ipa_drv_res->ipa_wdi2 = false; ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ; ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ; + ipa_drv_res->is_apps_shutdown_support = false; /* Get IPA HW Version */ result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver", @@ -4474,6 +4635,14 @@ static int get_ipa_dts_configuration(struct platform_device *pdev, ipa_drv_res->ipa_uc_monitor_holb ? "Enabled" : "Disabled"); + /* Check apps_shutdown_support enabled or disabled */ + ipa_drv_res->is_apps_shutdown_support = + of_property_read_bool(pdev->dev.of_node, + "qcom,apps-shutdown-support"); + IPAERR(": apps shutdown support = %s\n", + ipa_drv_res->is_apps_shutdown_support + ? "Enabled" : "Disabled"); + /* Get IPA WAN / LAN RX pool sizes */ result = of_property_read_u32(pdev->dev.of_node, "qcom,wan-rx-ring-size", @@ -4918,6 +5087,37 @@ static int ipa_smmu_ap_cb_probe(struct device *dev) return result; } +/** +* ipa_platform_shutdown() - Ensure Q6 ep cleanup is done and +* followed by APPS ep's cleanup. +*/ +void ipa_platform_shutdown(void) +{ + IPADBG("****************ipa_platform_shutdown****************\n"); + if (ipa_ctx->q6_cleanup_cb) + ipa_ctx->q6_cleanup_cb(); + else + IPADBG("No Q6 cleanup callback registered\n"); + ipa_apps_shutdown_cleanup(); +} + +int ipa_plat_drv_shutdown(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match) +{ + if (!ipa_ctx) { + pr_err("IPA driver not initialized\n"); + return -EOPNOTSUPP; + } + if (ipa_ctx->is_apps_shutdown_support) + ipa_platform_shutdown(); + else { + pr_err("There is no apps IPA driver shutdown support\n"); + return -EOPNOTSUPP; + } + return 0; +} + int ipa_plat_drv_probe(struct platform_device *pdev_p, struct ipa_api_controller *api_ctrl, struct of_device_id *pdrv_match) { diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c index 74e7394a80b1..b1eb67dbf02c 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -84,11 +84,20 @@ int ipa_disable_data_path(u32 clnt_hdl) IPA_ENDP_INIT_AGGR_N_OFST_v2_0(clnt_hdl)); if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >> IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) == IPA_ENABLE_AGGR) { - res = ipa_tag_aggr_force_close(clnt_hdl); - if (res) { - IPAERR("tag process timeout, client:%d err:%d\n", - clnt_hdl, res); - BUG(); + /* + * Tag process will not work for, + * APPS CMD PROD --> Uses the same for IMM cmd over tag + * APPS LAN CONS --> Already suspend is set + */ + if (!(ep->client == IPA_CLIENT_APPS_CMD_PROD || + ep->client == IPA_CLIENT_APPS_LAN_CONS)) { + res = ipa_tag_aggr_force_close(clnt_hdl); + if (res) { + IPAERR("tag process timeout"); + IPAERR("client:%d err:%d\n", + clnt_hdl, res); + ipa_assert(); + } } } @@ -765,6 +774,63 @@ bail: } /** +* ipa2_apps_shutdown_apps_ep_reset() - +* reset an endpoints from BAM perspective. +* +* Q6 ep reset is not handled here +*/ +void ipa2_apps_shutdown_apps_ep_reset(void) +{ + struct ipa_ep_context *ep; + int ep_idx, client_idx; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return; + } + + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + + ep_idx = ipa2_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + ep = &ipa_ctx->ep[ep_idx]; + if (ep->valid && (IPA_CLIENT_IS_APPS_PROD(client_idx) || + IPA_CLIENT_IS_APPS_CONS(client_idx))) { + /* + * we shouldn't reset APPS CMD PROD + * and LAN CONS in for loop + * these 2 ep's should be resetted at last, + * since it is used in Tag Process + */ + if (!(client_idx == IPA_CLIENT_APPS_CMD_PROD || + client_idx == IPA_CLIENT_APPS_LAN_CONS)) { + IPADBG("teardown ep (%d)\n", ep_idx); + ipa2_teardown_sys_pipe(ep_idx); + } + } + } + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + if (ep_idx != -1) { + ep = &ipa_ctx->ep[ep_idx]; + if (ep->valid) { + IPADBG("teardown ep (%d)\n", ep_idx); + ipa2_teardown_sys_pipe(ep_idx); + } + } + + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD); + if (ep_idx != -1) { + ep = &ipa_ctx->ep[ep_idx]; + if (ep->valid) { + IPADBG("teardown ep (%d)\n", ep_idx); + ipa2_teardown_sys_pipe(ep_idx); + } + } +} + +/** * ipa2_clear_endpoint_delay() - Remove ep delay set on the IPA pipe before * client disconnect. * @clnt_hdl: [in] opaque client handle assigned by IPA to client diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c index 834f028d3e48..6392c379b026 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1008,7 +1008,7 @@ fail_desc: static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip, const struct ipa_flt_rule *rule, u8 add_rear, - u32 *rule_hdl) + u32 *rule_hdl, bool user) { struct ipa_flt_entry *entry; struct ipa_rt_tbl *rt_tbl = NULL; @@ -1076,6 +1076,7 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip, } *rule_hdl = id; entry->id = id; + entry->ipacm_installed = user; IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt); return 0; @@ -1198,12 +1199,12 @@ static int __ipa_add_global_flt_rule(enum ipa_ip_type ip, tbl = &ipa_ctx->glob_flt_tbl[ip]; IPADBG_LOW("add global flt rule ip=%d\n", ip); - return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl); + return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, false); } static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep, const struct ipa_flt_rule *rule, u8 add_rear, - u32 *rule_hdl) + u32 *rule_hdl, bool user) { struct ipa_flt_tbl *tbl; int ipa_ep_idx; @@ -1225,12 +1226,13 @@ static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep, tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][ip]; IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep); - return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl); + return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, user); } /** * ipa2_add_flt_rule() - Add the specified filtering rules to SW and optionally * commit to IPA HW + * @rules: [inout] set of filtering rules to add * * Returns: 0 on success, negative on failure * @@ -1238,6 +1240,21 @@ static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep, */ int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) { + return ipa2_add_flt_rule_usr(rules, false); +} + +/** + * ipa2_add_flt_rule_usr() - Add the specified filtering rules + * to SW and optionally commit to IPA HW + * @rules: [inout] set of filtering rules to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only) +{ int i; int result; @@ -1259,7 +1276,8 @@ int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) result = __ipa_add_ep_flt_rule(rules->ip, rules->ep, &rules->rules[i].rule, rules->rules[i].at_rear, - &rules->rules[i].flt_rule_hdl); + &rules->rules[i].flt_rule_hdl, + user_only); if (result) { IPAERR_RL("failed to add flt rule %d\n", i); rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED; @@ -1396,13 +1414,14 @@ bail: /** * ipa2_reset_flt() - Reset the current SW filtering table of specified type * (does not commit to HW) - * @ip: [in] the family of routing tables + * @ip: [in] the family of routing tables + * @user_only: [in] indicate rules deleted by userspace * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa2_reset_flt(enum ipa_ip_type ip) +int ipa2_reset_flt(enum ipa_ip_type ip, bool user_only) { struct ipa_flt_tbl *tbl; struct ipa_flt_entry *entry; @@ -1435,16 +1454,19 @@ int ipa2_reset_flt(enum ipa_ip_type ip) IPA_INVALID_L4_PROTOCOL)) continue; - list_del(&entry->link); - entry->tbl->rule_cnt--; - if (entry->rt_tbl) - entry->rt_tbl->ref_cnt--; - entry->cookie = 0; - id = entry->id; - kmem_cache_free(ipa_ctx->flt_rule_cache, entry); + if (!user_only || + entry->ipacm_installed) { + list_del(&entry->link); + entry->tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + entry->cookie = 0; + id = entry->id; + kmem_cache_free(ipa_ctx->flt_rule_cache, entry); - /* remove the handle from the database */ - ipa_id_remove(id); + /* remove the handle from the database */ + ipa_id_remove(id); + } } for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { @@ -1456,16 +1478,21 @@ int ipa2_reset_flt(enum ipa_ip_type ip) mutex_unlock(&ipa_ctx->lock); return -EFAULT; } - list_del(&entry->link); - entry->tbl->rule_cnt--; - if (entry->rt_tbl) - entry->rt_tbl->ref_cnt--; - entry->cookie = 0; - id = entry->id; - kmem_cache_free(ipa_ctx->flt_rule_cache, entry); - /* remove the handle from the database */ - ipa_id_remove(id); + if (!user_only || + entry->ipacm_installed) { + list_del(&entry->link); + entry->tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + entry->cookie = 0; + id = entry->id; + kmem_cache_free(ipa_ctx->flt_rule_cache, + entry); + + /* remove the handle from the database */ + ipa_id_remove(id); + } } } mutex_unlock(&ipa_ctx->lock); @@ -1485,14 +1512,14 @@ void ipa_install_dflt_flt_rules(u32 ipa_ep_idx) tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4]; rule.action = IPA_PASS_TO_EXCEPTION; __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true, - &ep->dflt_flt4_rule_hdl); + &ep->dflt_flt4_rule_hdl, false); ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4); tbl->sticky_rear = true; tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6]; rule.action = IPA_PASS_TO_EXCEPTION; __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true, - &ep->dflt_flt6_rule_hdl); + &ep->dflt_flt6_rule_hdl, false); ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6); tbl->sticky_rear = true; mutex_unlock(&ipa_ctx->lock); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c index fbbb3f20b571..15c8f923d4f4 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -543,7 +543,7 @@ int __ipa_commit_hdr_v2_6L(void) } static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, - bool add_ref_hdr) + bool add_ref_hdr, bool user_only) { struct ipa_hdr_entry *hdr_entry; struct ipa_hdr_proc_ctx_entry *entry; @@ -581,6 +581,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, if (add_ref_hdr) hdr_entry->ref_cnt++; entry->cookie = IPA_PROC_HDR_COOKIE; + entry->ipacm_installed = user_only; needed_len = (proc_ctx->type == IPA_HDR_PROC_NONE) ? sizeof(struct ipa_hdr_proc_ctx_add_hdr_seq) : @@ -619,6 +620,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, */ offset->offset = htbl->end; offset->bin = bin; + offset->ipacm_installed = user_only; htbl->end += ipa_hdr_proc_ctx_bin_sz[bin]; list_add(&offset->link, &htbl->head_offset_list[bin]); @@ -627,6 +629,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, offset = list_first_entry(&htbl->head_free_offset_list[bin], struct ipa_hdr_proc_ctx_offset_entry, link); + offset->ipacm_installed = user_only; list_move(&offset->link, &htbl->head_offset_list[bin]); } @@ -664,7 +667,7 @@ bad_len: } -static int __ipa_add_hdr(struct ipa_hdr_add *hdr) +static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user) { struct ipa_hdr_entry *entry; struct ipa_hdr_offset_entry *offset = NULL; @@ -700,6 +703,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid; entry->eth2_ofst = hdr->eth2_ofst; entry->cookie = IPA_HDR_COOKIE; + entry->ipacm_installed = user; if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0]) bin = IPA_HDR_BIN0; @@ -760,6 +764,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) list_add(&offset->link, &htbl->head_offset_list[bin]); entry->offset_entry = offset; + offset->ipacm_installed = user; } } else { entry->is_hdr_proc_ctx = false; @@ -769,6 +774,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) struct ipa_hdr_offset_entry, link); list_move(&offset->link, &htbl->head_offset_list[bin]); entry->offset_entry = offset; + offset->ipacm_installed = user; } list_add(&entry->link, &htbl->head_hdr_entry_list); @@ -800,7 +806,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) IPADBG("adding processing context for header %s\n", hdr->name); proc_ctx.type = IPA_HDR_PROC_NONE; proc_ctx.hdr_hdl = id; - if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) { + if (__ipa_add_hdr_proc_ctx(&proc_ctx, false, user)) { IPAERR("failed to add hdr proc ctx\n"); goto fail_add_proc_ctx; } @@ -960,6 +966,21 @@ int __ipa_del_hdr(u32 hdr_hdl, bool by_user) */ int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs) { + return ipa2_add_hdr_usr(hdrs, false); +} + +/** + * ipa2_add_hdr_usr() - add the specified headers to SW + * and optionally commit them to IPA HW + * @hdrs: [inout] set of headers to add + * @user_only: [in] indicate installed from user + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only) +{ int i; int result = -EFAULT; @@ -977,7 +998,7 @@ int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs) IPADBG("adding %d headers to IPA driver internal data struct\n", hdrs->num_hdrs); for (i = 0; i < hdrs->num_hdrs; i++) { - if (__ipa_add_hdr(&hdrs->hdr[i])) { + if (__ipa_add_hdr(&hdrs->hdr[i], user_only)) { IPAERR_RL("failed to add hdr %d\n", i); hdrs->hdr[i].status = -1; } else { @@ -997,7 +1018,6 @@ bail: mutex_unlock(&ipa_ctx->lock); return result; } - /** * ipa2_del_hdr_by_user() - Remove the specified headers * from SW and optionally commit them to IPA HW @@ -1063,12 +1083,14 @@ int ipa2_del_hdr(struct ipa_ioc_del_hdr *hdls) * ipa2_add_hdr_proc_ctx() - add the specified headers to SW * and optionally commit them to IPA HW * @proc_ctxs: [inout] set of processing context headers to add + * @user_only: [in] indicate installed by user-space module * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs) +int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only) { int i; int result = -EFAULT; @@ -1089,7 +1111,8 @@ int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs) IPADBG("adding %d header processing contextes to IPA driver\n", proc_ctxs->num_proc_ctxs); for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) { - if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) { + if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], + true, user_only)) { IPAERR_RL("failed to add hdr pric ctx %d\n", i); proc_ctxs->proc_ctx[i].status = -1; } else { @@ -1211,11 +1234,12 @@ bail: * ipa2_reset_hdr() - reset the current header table in SW (does not commit to * HW) * + * @user_only: [in] indicate delete rules installed by userspace * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa2_reset_hdr(void) +int ipa2_reset_hdr(bool user_only) { struct ipa_hdr_entry *entry; struct ipa_hdr_entry *next; @@ -1231,9 +1255,9 @@ int ipa2_reset_hdr(void) * issue a reset on the routing module since routing rules point to * header table entries */ - if (ipa2_reset_rt(IPA_IP_v4)) + if (ipa2_reset_rt(IPA_IP_v4, user_only)) IPAERR("fail to reset v4 rt\n"); - if (ipa2_reset_rt(IPA_IP_v6)) + if (ipa2_reset_rt(IPA_IP_v6, user_only)) IPAERR("fail to reset v4 rt\n"); mutex_lock(&ipa_ctx->lock); @@ -1262,21 +1286,23 @@ int ipa2_reset_hdr(void) WARN_ON(1); return -EFAULT; } - if (entry->is_hdr_proc_ctx) { - dma_unmap_single(ipa_ctx->pdev, - entry->phys_base, - entry->hdr_len, - DMA_TO_DEVICE); - entry->proc_ctx = NULL; - } - list_del(&entry->link); - entry->ref_cnt = 0; - entry->cookie = 0; - /* remove the handle from the database */ - ipa_id_remove(entry->id); - kmem_cache_free(ipa_ctx->hdr_cache, entry); + if (!user_only || entry->ipacm_installed) { + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa_ctx->pdev, + entry->phys_base, + entry->hdr_len, + DMA_TO_DEVICE); + entry->proc_ctx = NULL; + } + list_del(&entry->link); + entry->ref_cnt = 0; + entry->cookie = 0; + /* remove the handle from the database */ + ipa_id_remove(entry->id); + kmem_cache_free(ipa_ctx->hdr_cache, entry); + } } for (i = 0; i < IPA_HDR_BIN_MAX; i++) { list_for_each_entry_safe(off_entry, off_next, @@ -1290,14 +1316,23 @@ int ipa2_reset_hdr(void) if (off_entry->offset == 0) continue; - list_del(&off_entry->link); - kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry); + if (!user_only || + off_entry->ipacm_installed) { + list_del(&off_entry->link); + kmem_cache_free(ipa_ctx->hdr_offset_cache, + off_entry); + } } list_for_each_entry_safe(off_entry, off_next, &ipa_ctx->hdr_tbl.head_free_offset_list[i], link) { - list_del(&off_entry->link); - kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry); + + if (!user_only || + off_entry->ipacm_installed) { + list_del(&off_entry->link); + kmem_cache_free(ipa_ctx->hdr_offset_cache, + off_entry); + } } } /* there is one header of size 8 */ @@ -1316,30 +1351,43 @@ int ipa2_reset_hdr(void) WARN_ON(1); return -EFAULT; } - list_del(&ctx_entry->link); - ctx_entry->ref_cnt = 0; - ctx_entry->cookie = 0; - /* remove the handle from the database */ - ipa_id_remove(ctx_entry->id); - kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, ctx_entry); + if (!user_only || + ctx_entry->ipacm_installed) { + list_del(&ctx_entry->link); + ctx_entry->ref_cnt = 0; + ctx_entry->cookie = 0; + /* remove the handle from the database */ + ipa_id_remove(ctx_entry->id); + kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, + ctx_entry); + } } for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) { list_for_each_entry_safe(ctx_off_entry, ctx_off_next, &ipa_ctx->hdr_proc_ctx_tbl.head_offset_list[i], link) { - list_del(&ctx_off_entry->link); - kmem_cache_free(ipa_ctx->hdr_proc_ctx_offset_cache, + if (!user_only || + ctx_off_entry->ipacm_installed) { + list_del(&ctx_off_entry->link); + kmem_cache_free( + ipa_ctx->hdr_proc_ctx_offset_cache, ctx_off_entry); + } } list_for_each_entry_safe(ctx_off_entry, ctx_off_next, &ipa_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i], link) { - list_del(&ctx_off_entry->link); - kmem_cache_free(ipa_ctx->hdr_proc_ctx_offset_cache, - ctx_off_entry); + + if (!user_only || + ctx_off_entry->ipacm_installed) { + list_del(&ctx_off_entry->link); + kmem_cache_free( + ipa_ctx->hdr_proc_ctx_offset_cache, + ctx_off_entry); + } } } ipa_ctx->hdr_proc_ctx_tbl.end = 0; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h index 28689eb83d4e..1d34564664bc 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -242,6 +242,8 @@ struct ipa_smmu_cb_ctx { * @tbl: filter table * @rt_tbl: routing table * @hw_len: entry's size + * @id: rule handle - globally unique + * @ipacm_installed: indicate if installed by ipacm */ struct ipa_flt_entry { struct list_head link; @@ -251,6 +253,7 @@ struct ipa_flt_entry { struct ipa_rt_tbl *rt_tbl; u32 hw_len; int id; + bool ipacm_installed; }; /** @@ -305,6 +308,7 @@ struct ipa_rt_tbl { * @is_eth2_ofst_valid: is eth2_ofst field valid? * @eth2_ofst: offset to start of Ethernet-II/802.3 header * @user_deleted: is the header deleted by the user? + * @ipacm_installed: indicate if installed by ipacm */ struct ipa_hdr_entry { struct list_head link; @@ -323,6 +327,7 @@ struct ipa_hdr_entry { u8 is_eth2_ofst_valid; u16 eth2_ofst; bool user_deleted; + bool ipacm_installed; }; /** @@ -346,11 +351,13 @@ struct ipa_hdr_tbl { * @link: entry's link in global processing context header offset entries list * @offset: the offset * @bin: bin + * @ipacm_installed: indicate if installed by ipacm */ struct ipa_hdr_proc_ctx_offset_entry { struct list_head link; u32 offset; u32 bin; + bool ipacm_installed; }; /** @@ -387,6 +394,7 @@ struct ipa_hdr_proc_ctx_add_hdr_cmd_seq { * @ref_cnt: reference counter of routing table * @id: processing context header entry id * @user_deleted: is the hdr processing context deleted by the user? + * @ipacm_installed: indicate if installed by ipacm */ struct ipa_hdr_proc_ctx_entry { struct list_head link; @@ -397,6 +405,7 @@ struct ipa_hdr_proc_ctx_entry { u32 ref_cnt; int id; bool user_deleted; + bool ipacm_installed; }; /** @@ -446,6 +455,8 @@ struct ipa_flt_tbl { * @hdr: header table * @proc_ctx: processing context table * @hw_len: the length of the table + * @id: rule handle - globaly unique + * @ipacm_installed: indicate if installed by ipacm */ struct ipa_rt_entry { struct list_head link; @@ -456,6 +467,7 @@ struct ipa_rt_entry { struct ipa_hdr_proc_ctx_entry *proc_ctx; u32 hw_len; int id; + bool ipacm_installed; }; /** @@ -1151,6 +1163,8 @@ struct ipa_context { struct list_head msg_list; struct list_head pull_msg_list; struct mutex msg_lock; + struct list_head msg_wlan_client_list; + struct mutex msg_wlan_client_lock; wait_queue_head_t msg_waitq; enum ipa_hw_type ipa_hw_type; enum ipa_hw_mode ipa_hw_mode; @@ -1207,6 +1221,8 @@ struct ipa_context { struct ipa_cne_evt ipa_cne_evt_req_cache[IPA_MAX_NUM_REQ_CACHE]; int num_ipa_cne_evt_req; struct mutex ipa_cne_evt_lock; + int (*q6_cleanup_cb)(void); + bool is_apps_shutdown_support; }; /** @@ -1263,6 +1279,7 @@ struct ipa_plat_drv_res { u32 ipa_rx_polling_sleep_msec; u32 ipa_polling_iteration; bool ipa_uc_monitor_holb; + bool is_apps_shutdown_support; }; struct ipa_mem_partition { @@ -1388,6 +1405,8 @@ int ipa2_disconnect(u32 clnt_hdl); */ int ipa2_reset_endpoint(u32 clnt_hdl); +void ipa2_apps_shutdown_apps_ep_reset(void); + /* * Remove ep delay */ @@ -1436,13 +1455,15 @@ int ipa2_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl); */ int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs); +int ipa2_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool by_user); + int ipa2_del_hdr(struct ipa_ioc_del_hdr *hdls); int ipa2_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user); int ipa2_commit_hdr(void); -int ipa2_reset_hdr(void); +int ipa2_reset_hdr(bool user_only); int ipa2_get_hdr(struct ipa_ioc_get_hdr *lookup); @@ -1453,7 +1474,8 @@ int ipa2_copy_hdr(struct ipa_ioc_copy_hdr *copy); /* * Header Processing Context */ -int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs); +int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only); int ipa2_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls); @@ -1465,11 +1487,14 @@ int ipa2_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls, */ int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules); +int ipa2_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, + bool user_only); + int ipa2_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls); int ipa2_commit_rt(enum ipa_ip_type ip); -int ipa2_reset_rt(enum ipa_ip_type ip); +int ipa2_reset_rt(enum ipa_ip_type ip, bool user_only); int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup); @@ -1484,13 +1509,16 @@ int ipa2_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules); */ int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules); +int ipa2_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, + bool user_only); + int ipa2_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls); int ipa2_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules); int ipa2_commit_flt(enum ipa_ip_type ip); -int ipa2_reset_flt(enum ipa_ip_type ip); +int ipa2_reset_flt(enum ipa_ip_type ip, bool user_only); /* * NAT @@ -1508,6 +1536,7 @@ int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del); */ int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff, ipa_msg_free_fn callback); +int ipa2_resend_wlan_msg(void); int ipa2_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback); int ipa2_deregister_pull_msg(struct ipa_msg_meta *meta); @@ -1781,7 +1810,7 @@ static inline u32 ipa_read_reg_field(void *base, u32 offset, return (ipa_read_reg(base, offset) & mask) >> shift; } -static inline void ipa_write_reg(void *base, u32 offset, u32 val) +static inline void ipa_write_reg(void __iomem *base, u32 offset, u32 val) { iowrite32(val, base + offset); } @@ -1868,6 +1897,8 @@ int ipa_tag_process(struct ipa_desc *desc, int num_descs, unsigned long timeout); int ipa_q6_pre_shutdown_cleanup(void); +int ipa_apps_shutdown_cleanup(void); +int register_ipa_platform_cb(int (*cb)(void)); int ipa_q6_post_shutdown_cleanup(void); int ipa_init_q6_smem(void); int ipa_q6_monitor_holb_mitigation(bool enable); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c index 9c4fc0ce8cc1..da56a2ed1b8d 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -13,6 +13,7 @@ #include <linux/fs.h> #include <linux/sched.h> #include "ipa_i.h" +#include <linux/msm_ipa.h> struct ipa_intf { char name[IPA_RESOURCE_NAME_MAX]; @@ -377,6 +378,108 @@ static void ipa2_send_msg_free(void *buff, u32 len, u32 type) kfree(buff); } +static int wlan_msg_process(struct ipa_msg_meta *meta, void *buff) +{ + struct ipa_push_msg *msg_dup; + struct ipa_wlan_msg_ex *event_ex_cur_con = NULL; + struct ipa_wlan_msg_ex *event_ex_list = NULL; + struct ipa_wlan_msg *event_ex_cur_discon = NULL; + void *data_dup = NULL; + struct ipa_push_msg *entry; + struct ipa_push_msg *next; + int cnt = 0, total = 0, max = 0; + uint8_t mac[IPA_MAC_ADDR_SIZE]; + uint8_t mac2[IPA_MAC_ADDR_SIZE]; + + if (meta->msg_type == WLAN_CLIENT_CONNECT_EX) { + /* debug print */ + event_ex_cur_con = buff; + for (cnt = 0; cnt < event_ex_cur_con->num_of_attribs; cnt++) { + if (event_ex_cur_con->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + IPADBG("%02x:%02x:%02x:%02x:%02x:%02x,(%d)\n", + event_ex_cur_con->attribs[cnt].u.mac_addr[0], + event_ex_cur_con->attribs[cnt].u.mac_addr[1], + event_ex_cur_con->attribs[cnt].u.mac_addr[2], + event_ex_cur_con->attribs[cnt].u.mac_addr[3], + event_ex_cur_con->attribs[cnt].u.mac_addr[4], + event_ex_cur_con->attribs[cnt].u.mac_addr[5], + meta->msg_type); + } + } + + mutex_lock(&ipa_ctx->msg_wlan_client_lock); + msg_dup = kzalloc(sizeof(struct ipa_push_msg), GFP_KERNEL); + if (msg_dup == NULL) { + IPAERR("fail to alloc ipa_msg container\n"); + return -ENOMEM; + } + msg_dup->meta = *meta; + if (meta->msg_len > 0 && buff) { + data_dup = kmalloc(meta->msg_len, GFP_KERNEL); + if (data_dup == NULL) { + IPAERR("fail to alloc data_dup container\n"); + kfree(msg_dup); + return -ENOMEM; + } + memcpy(data_dup, buff, meta->msg_len); + msg_dup->buff = data_dup; + msg_dup->callback = ipa2_send_msg_free; + } + list_add_tail(&msg_dup->link, &ipa_ctx->msg_wlan_client_list); + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + } + + /* remove the cache */ + if (meta->msg_type == WLAN_CLIENT_DISCONNECT) { + /* debug print */ + event_ex_cur_discon = buff; + IPADBG("Mac %02x:%02x:%02x:%02x:%02x:%02x,msg %d\n", + event_ex_cur_discon->mac_addr[0], + event_ex_cur_discon->mac_addr[1], + event_ex_cur_discon->mac_addr[2], + event_ex_cur_discon->mac_addr[3], + event_ex_cur_discon->mac_addr[4], + event_ex_cur_discon->mac_addr[5], + meta->msg_type); + memcpy(mac2, + event_ex_cur_discon->mac_addr, + sizeof(mac2)); + + mutex_lock(&ipa_ctx->msg_wlan_client_lock); + list_for_each_entry_safe(entry, next, + &ipa_ctx->msg_wlan_client_list, + link) { + event_ex_list = entry->buff; + max = event_ex_list->num_of_attribs; + for (cnt = 0; cnt < max; cnt++) { + memcpy(mac, + event_ex_list->attribs[cnt].u.mac_addr, + sizeof(mac)); + if (event_ex_list->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + pr_debug("%02x:%02x:%02x:%02x:%02x:%02x\n", + mac[0], mac[1], mac[2], + mac[3], mac[4], mac[5]); + + /* compare to delete one*/ + if (memcmp(mac2, + mac, + sizeof(mac)) == 0) { + IPADBG("clean %d\n", total); + list_del(&entry->link); + kfree(entry); + break; + } + } + } + total++; + } + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + } + return 0; +} + /** * ipa2_send_msg() - Send "message" from kernel client to IPA driver * @meta: [in] message meta-data @@ -404,7 +507,7 @@ int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff, } if (meta == NULL || (buff == NULL && callback != NULL) || - (buff != NULL && callback == NULL)) { + (buff != NULL && callback == NULL) || buff == NULL) { IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n", meta, buff, callback); return -EINVAL; @@ -436,6 +539,11 @@ int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff, mutex_lock(&ipa_ctx->msg_lock); list_add_tail(&msg->link, &ipa_ctx->msg_list); + /* support for softap client event cache */ + if (wlan_msg_process(meta, buff)) + IPAERR("wlan_msg_process failed\n"); + + /* unlock only after process */ mutex_unlock(&ipa_ctx->msg_lock); IPA_STATS_INC_CNT(ipa_ctx->stats.msg_w[meta->msg_type]); @@ -447,6 +555,73 @@ int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff, } /** + * ipa2_resend_wlan_msg() - Resend cached "message" to IPACM + * + * resend wlan client connect events to user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_resend_wlan_msg(void) +{ + struct ipa_wlan_msg_ex *event_ex_list = NULL; + struct ipa_push_msg *entry; + struct ipa_push_msg *next; + int cnt = 0, total = 0; + struct ipa_push_msg *msg; + void *data = NULL; + + IPADBG("\n"); + + mutex_lock(&ipa_ctx->msg_wlan_client_lock); + list_for_each_entry_safe(entry, next, &ipa_ctx->msg_wlan_client_list, + link) { + + event_ex_list = entry->buff; + for (cnt = 0; cnt < event_ex_list->num_of_attribs; cnt++) { + if (event_ex_list->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + IPADBG("%d-Mac %02x:%02x:%02x:%02x:%02x:%02x\n", + total, + event_ex_list->attribs[cnt].u.mac_addr[0], + event_ex_list->attribs[cnt].u.mac_addr[1], + event_ex_list->attribs[cnt].u.mac_addr[2], + event_ex_list->attribs[cnt].u.mac_addr[3], + event_ex_list->attribs[cnt].u.mac_addr[4], + event_ex_list->attribs[cnt].u.mac_addr[5]); + } + } + + msg = kzalloc(sizeof(struct ipa_push_msg), GFP_KERNEL); + if (msg == NULL) { + IPAERR("fail to alloc ipa_msg container\n"); + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + msg->meta = entry->meta; + data = kmalloc(entry->meta.msg_len, GFP_KERNEL); + if (data == NULL) { + IPAERR("fail to alloc data container\n"); + kfree(msg); + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + memcpy(data, entry->buff, entry->meta.msg_len); + msg->buff = data; + msg->callback = ipa2_send_msg_free; + mutex_lock(&ipa_ctx->msg_lock); + list_add_tail(&msg->link, &ipa_ctx->msg_list); + mutex_unlock(&ipa_ctx->msg_lock); + wake_up(&ipa_ctx->msg_waitq); + + total++; + } + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + return 0; +} + +/** * ipa2_register_pull_msg() - register pull message type * @meta: [in] message meta-data * @callback: [in] pull callback diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c index 1be68b31656b..cc3d26764048 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c @@ -785,12 +785,6 @@ int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del) base_addr = ipa_ctx->nat_mem.tmp_dma_handle; } - if (del->public_ip_addr == 0) { - IPADBG("Bad Parameter\n"); - result = -EPERM; - goto bail; - } - memset(&desc, 0, sizeof(desc)); /* NO-OP IC for ensuring that IPA pipeline is empty */ reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c index e33d0d86ac95..78555729d78a 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -514,6 +514,14 @@ int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req) int rc; int i; + /* check if modem up */ + if (!qmi_indication_fin || + !qmi_modem_init_fin || + !ipa_q6_clnt) { + IPAWANDBG("modem QMI haven't up yet\n"); + return -EINVAL; + } + /* check if the filter rules from IPACM is valid */ if (req->filter_spec_list_len == 0) { IPAWANDBG("IPACM pass zero rules to Q6\n"); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c index 7cc3c380ee71..e4a3a72ee670 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c @@ -1026,7 +1026,8 @@ static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry) } static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, - const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl) + const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl, + bool user) { struct ipa_rt_tbl *tbl; struct ipa_rt_entry *entry; @@ -1101,6 +1102,7 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, IPADBG_LOW("rule_cnt=%d\n", tbl->rule_cnt); *rule_hdl = id; entry->id = id; + entry->ipacm_installed = user; return 0; @@ -1126,6 +1128,21 @@ error: */ int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) { + return ipa2_add_rt_rule_usr(rules, false); +} + +/** + * ipa2_add_rt_rule_usr() - Add the specified routing rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of routing rules to add + * @user_only: [in] indicate installed by userspace module + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only) +{ int i; int ret; @@ -1139,7 +1156,8 @@ int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name, &rules->rules[i].rule, rules->rules[i].at_rear, - &rules->rules[i].rt_rule_hdl)) { + &rules->rules[i].rt_rule_hdl, + user_only)) { IPAERR_RL("failed to add rt rule %d\n", i); rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED; } else { @@ -1308,13 +1326,14 @@ bail: /** * ipa2_reset_rt() - reset the current SW routing table of specified type * (does not commit to HW) - * @ip: The family of routing tables + * @ip: [in] The family of routing tables + * @user_only: [in] indicate delete rules installed by userspace * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa2_reset_rt(enum ipa_ip_type ip) +int ipa2_reset_rt(enum ipa_ip_type ip, bool user_only) { struct ipa_rt_tbl *tbl; struct ipa_rt_tbl *tbl_next; @@ -1324,6 +1343,7 @@ int ipa2_reset_rt(enum ipa_ip_type ip) struct ipa_rt_tbl_set *rset; u32 apps_start_idx; int id; + bool tbl_user = false; if (ip >= IPA_IP_MAX) { IPAERR_RL("bad parm\n"); @@ -1343,7 +1363,7 @@ int ipa2_reset_rt(enum ipa_ip_type ip) * issue a reset on the filtering module of same IP type since * filtering rules point to routing tables */ - if (ipa2_reset_flt(ip)) + if (ipa2_reset_flt(ip, user_only)) IPAERR_RL("fail to reset flt ip=%d\n", ip); set = &ipa_ctx->rt_tbl_set[ip]; @@ -1351,6 +1371,7 @@ int ipa2_reset_rt(enum ipa_ip_type ip) mutex_lock(&ipa_ctx->lock); IPADBG("reset rt ip=%d\n", ip); list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) { + tbl_user = false; list_for_each_entry_safe(rule, rule_next, &tbl->head_rt_rule_list, link) { if (ipa_id_find(rule->id) == NULL) { @@ -1359,25 +1380,34 @@ int ipa2_reset_rt(enum ipa_ip_type ip) return -EFAULT; } + /* indicate if tbl used for user-specified rules*/ + if (rule->ipacm_installed) { + IPADBG("tbl_user %d, tbl-index %d\n", + tbl_user, tbl->id); + tbl_user = true; + } /* * for the "default" routing tbl, remove all but the * last rule */ if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1) continue; - - list_del(&rule->link); - tbl->rule_cnt--; - if (rule->hdr) - __ipa_release_hdr(rule->hdr->id); - else if (rule->proc_ctx) - __ipa_release_hdr_proc_ctx(rule->proc_ctx->id); - rule->cookie = 0; - id = rule->id; - kmem_cache_free(ipa_ctx->rt_rule_cache, rule); - - /* remove the handle from the database */ - ipa_id_remove(id); + if (!user_only || + rule->ipacm_installed) { + list_del(&rule->link); + tbl->rule_cnt--; + if (rule->hdr) + __ipa_release_hdr(rule->hdr->id); + else if (rule->proc_ctx) + __ipa_release_hdr_proc_ctx( + rule->proc_ctx->id); + rule->cookie = 0; + id = rule->id; + kmem_cache_free(ipa_ctx->rt_rule_cache, rule); + + /* remove the handle from the database */ + ipa_id_remove(id); + } } if (ipa_id_find(tbl->id) == NULL) { @@ -1389,24 +1419,28 @@ int ipa2_reset_rt(enum ipa_ip_type ip) /* do not remove the "default" routing tbl which has index 0 */ if (tbl->idx != apps_start_idx) { - if (!tbl->in_sys) { - list_del(&tbl->link); - set->tbl_cnt--; - clear_bit(tbl->idx, - &ipa_ctx->rt_idx_bitmap[ip]); - IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n", - tbl->idx, set->tbl_cnt); - kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl); - } else { - list_move(&tbl->link, &rset->head_rt_tbl_list); - clear_bit(tbl->idx, - &ipa_ctx->rt_idx_bitmap[ip]); - set->tbl_cnt--; - IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n", - tbl->idx, set->tbl_cnt); + if (!user_only || tbl_user) { + if (!tbl->in_sys) { + list_del(&tbl->link); + set->tbl_cnt--; + clear_bit(tbl->idx, + &ipa_ctx->rt_idx_bitmap[ip]); + IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n", + tbl->idx, set->tbl_cnt); + kmem_cache_free(ipa_ctx->rt_tbl_cache, + tbl); + } else { + list_move(&tbl->link, + &rset->head_rt_tbl_list); + clear_bit(tbl->idx, + &ipa_ctx->rt_idx_bitmap[ip]); + set->tbl_cnt--; + IPADBG("rst tbl_idx=%d cnt=%d\n", + tbl->idx, set->tbl_cnt); + } + /* remove the handle from the database */ + ipa_id_remove(id); } - /* remove the handle from the database */ - ipa_id_remove(id); } } mutex_unlock(&ipa_ctx->lock); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c index 76f74c058c6d..f8d6e68a0d78 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -5078,6 +5078,7 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_cfg_ep_holb_by_client = ipa2_cfg_ep_holb_by_client; api_ctrl->ipa_cfg_ep_ctrl = ipa2_cfg_ep_ctrl; api_ctrl->ipa_add_hdr = ipa2_add_hdr; + api_ctrl->ipa_add_hdr_usr = ipa2_add_hdr_usr; api_ctrl->ipa_del_hdr = ipa2_del_hdr; api_ctrl->ipa_commit_hdr = ipa2_commit_hdr; api_ctrl->ipa_reset_hdr = ipa2_reset_hdr; @@ -5087,6 +5088,7 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_add_hdr_proc_ctx = ipa2_add_hdr_proc_ctx; api_ctrl->ipa_del_hdr_proc_ctx = ipa2_del_hdr_proc_ctx; api_ctrl->ipa_add_rt_rule = ipa2_add_rt_rule; + api_ctrl->ipa_add_rt_rule_usr = ipa2_add_rt_rule_usr; api_ctrl->ipa_del_rt_rule = ipa2_del_rt_rule; api_ctrl->ipa_commit_rt = ipa2_commit_rt; api_ctrl->ipa_reset_rt = ipa2_reset_rt; @@ -5095,6 +5097,7 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_query_rt_index = ipa2_query_rt_index; api_ctrl->ipa_mdfy_rt_rule = ipa2_mdfy_rt_rule; api_ctrl->ipa_add_flt_rule = ipa2_add_flt_rule; + api_ctrl->ipa_add_flt_rule_usr = ipa2_add_flt_rule_usr; api_ctrl->ipa_del_flt_rule = ipa2_del_flt_rule; api_ctrl->ipa_mdfy_flt_rule = ipa2_mdfy_flt_rule; api_ctrl->ipa_commit_flt = ipa2_commit_flt; diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c index 3defc03c2571..c79978038668 100644 --- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c @@ -76,6 +76,8 @@ static bool egress_set, a7_ul_flt_set; static struct workqueue_struct *ipa_rm_q6_workqueue; /* IPA_RM workqueue*/ static atomic_t is_initialized; static atomic_t is_ssr; +static atomic_t is_after_powerup_cmpltd; +static struct completion is_after_shutdown_cmpltd; static void *subsys_notify_handle; u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */ @@ -135,6 +137,14 @@ struct wwan_private { struct napi_struct napi; }; +static int ssr_notifier_cb(struct notifier_block *this, + unsigned long code, + void *data); + +static struct notifier_block ssr_notifier = { + .notifier_call = ssr_notifier_cb, +}; + /** * ipa_setup_a7_qmap_hdr() - Setup default a7 qmap hdr * @@ -1958,15 +1968,34 @@ static void ipa_rm_notify(void *dev, enum ipa_rm_event event, } } -/* IPA_RM related functions end*/ +/** +* q6_cleanup_cb() - IPA q6 cleanup +* +* This function is called in the sequence +* of ipa platform shutdown +*/ -static int ssr_notifier_cb(struct notifier_block *this, - unsigned long code, - void *data); +static int q6_cleanup_cb(void) +{ + int ret = 0; -static struct notifier_block ssr_notifier = { - .notifier_call = ssr_notifier_cb, -}; + IPAWANERR("Start\n"); + if (atomic_read(&is_initialized) && + atomic_read(&is_after_powerup_cmpltd)) { + pr_info("Wait for q6 cleanup\n"); + wait_for_completion(&is_after_shutdown_cmpltd); + pr_info("q6_cleanup_cb: Q6 SSR cleanup is taken care\n"); + } else { + if (!atomic_read(&is_initialized)) + pr_info("RmNET IPA driver is not inited\n"); + if (!atomic_read(&is_after_powerup_cmpltd)) + pr_info("Modem is not up\n"); + } + IPAWANERR("END\n"); + return ret; +} + +/* IPA_RM related functions end*/ static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev, struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res) @@ -2179,6 +2208,8 @@ static int ipa_wwan_probe(struct platform_device *pdev) ipa2_proxy_clk_unvote(); } atomic_set(&is_ssr, 0); + atomic_set(&is_after_powerup_cmpltd, 0); + init_completion(&is_after_shutdown_cmpltd); pr_info("rmnet_ipa completed initialization\n"); return 0; @@ -2221,6 +2252,7 @@ setup_dflt_wan_rt_tables_err: setup_a7_qmap_hdr_err: ipa_qmi_service_exit(); atomic_set(&is_ssr, 0); + atomic_set(&is_after_powerup_cmpltd, 0); return ret; } @@ -2409,6 +2441,7 @@ static int ssr_notifier_cb(struct notifier_block *this, if (atomic_read(&is_ssr)) ipa_q6_post_shutdown_cleanup(); pr_info("IPA AFTER_SHUTDOWN handling is complete\n"); + complete(&is_after_shutdown_cmpltd); return NOTIFY_DONE; } if (SUBSYS_AFTER_POWERUP == code) { @@ -2416,6 +2449,7 @@ static int ssr_notifier_cb(struct notifier_block *this, if (!atomic_read(&is_initialized) && atomic_read(&is_ssr)) platform_driver_register(&rmnet_ipa_driver); + atomic_set(&is_after_powerup_cmpltd, 1); pr_info("IPA AFTER_POWERUP handling is complete\n"); return NOTIFY_DONE; } @@ -3222,6 +3256,7 @@ void ipa_q6_handshake_complete(bool ssr_bootup) static int __init ipa_wwan_init(void) { + int ret = 0; atomic_set(&is_initialized, 0); atomic_set(&is_ssr, 0); @@ -3231,6 +3266,11 @@ static int __init ipa_wwan_init(void) ipa_qmi_init(); + IPAWANERR("Registering for q6_cleanup_cb\n"); + ret = register_ipa_platform_cb(&q6_cleanup_cb); + if (ret == -EAGAIN) + IPAWANERR("Register for q6_cleanup_cb is un-successful\n"); + /* Register for Modem SSR */ subsys_notify_handle = subsys_notif_register_notifier(SUBSYS_MODEM, &ssr_notifier); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 681b2d945945..dfff3b422659 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -345,6 +345,43 @@ int ipa3_active_clients_log_print_table(char *buf, int size) return cnt; } +static int ipa3_clean_modem_rule(void) +{ + struct ipa_install_fltr_rule_req_msg_v01 *req; + struct ipa_install_fltr_rule_req_ex_msg_v01 *req_ex; + int val = 0; + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v3_0) { + req = kzalloc( + sizeof(struct ipa_install_fltr_rule_req_msg_v01), + GFP_KERNEL); + if (!req) { + IPAERR("mem allocated failed!\n"); + return -ENOMEM; + } + req->filter_spec_list_valid = false; + req->filter_spec_list_len = 0; + req->source_pipe_index_valid = 0; + val = ipa3_qmi_filter_request_send(req); + kfree(req); + } else { + req_ex = kzalloc( + sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01), + GFP_KERNEL); + if (!req_ex) { + IPAERR("mem allocated failed!\n"); + return -ENOMEM; + } + req_ex->filter_spec_ex_list_valid = false; + req_ex->filter_spec_ex_list_len = 0; + req_ex->source_pipe_index_valid = 0; + val = ipa3_qmi_filter_request_ex_send(req_ex); + kfree(req_ex); + } + + return val; +} + static int ipa3_active_clients_panic_notifier(struct notifier_block *this, unsigned long event, void *ptr) { @@ -598,7 +635,8 @@ static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type) kfree(buff); } -static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_cache) +static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, + bool is_cache) { int retval; struct ipa_wan_msg *wan_msg; @@ -906,7 +944,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (ipa3_add_hdr((struct ipa_ioc_add_hdr *)param)) { + if (ipa3_add_hdr_usr((struct ipa_ioc_add_hdr *)param, + true)) { retval = -EFAULT; break; } @@ -986,7 +1025,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (ipa3_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) { + if (ipa3_add_rt_rule_usr((struct ipa_ioc_add_rt_rule *)param, + true)) { retval = -EFAULT; break; } @@ -1191,7 +1231,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) { + if (ipa3_add_flt_rule_usr((struct ipa_ioc_add_flt_rule *)param, + true)) { retval = -EFAULT; break; } @@ -1328,19 +1369,19 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = ipa3_commit_hdr(); break; case IPA_IOC_RESET_HDR: - retval = ipa3_reset_hdr(); + retval = ipa3_reset_hdr(false); break; case IPA_IOC_COMMIT_RT: retval = ipa3_commit_rt(arg); break; case IPA_IOC_RESET_RT: - retval = ipa3_reset_rt(arg); + retval = ipa3_reset_rt(arg, false); break; case IPA_IOC_COMMIT_FLT: retval = ipa3_commit_flt(arg); break; case IPA_IOC_RESET_FLT: - retval = ipa3_reset_flt(arg); + retval = ipa3_reset_flt(arg, false); break; case IPA_IOC_GET_RT_TBL: if (copy_from_user(header, (u8 *)arg, @@ -1720,7 +1761,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } if (ipa3_add_hdr_proc_ctx( - (struct ipa_ioc_add_hdr_proc_ctx *)param)) { + (struct ipa_ioc_add_hdr_proc_ctx *)param, true)) { retval = -EFAULT; break; } @@ -1812,7 +1853,22 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; - default: /* redundant, as cmd was checked against MAXNR */ + case IPA_IOC_CLEANUP: + /*Route and filter rules will also be clean*/ + IPADBG("Got IPA_IOC_CLEANUP\n"); + retval = ipa3_reset_hdr(true); + memset(&nat_del, 0, sizeof(nat_del)); + nat_del.table_index = 0; + retval = ipa3_nat_del_cmd(&nat_del); + retval = ipa3_clean_modem_rule(); + break; + + case IPA_IOC_QUERY_WLAN_CLIENT: + IPADBG("Got IPA_IOC_QUERY_WLAN_CLIENT\n"); + retval = ipa3_resend_wlan_msg(); + break; + + default: IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return -ENOTTY; } @@ -1823,13 +1879,13 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } /** -* ipa3_setup_dflt_rt_tables() - Setup default routing tables -* -* Return codes: -* 0: success -* -ENOMEM: failed to allocate memory -* -EPERM: failed to add the tables -*/ + * ipa3_setup_dflt_rt_tables() - Setup default routing tables + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate memory + * -EPERM: failed to add the tables + */ int ipa3_setup_dflt_rt_tables(void) { struct ipa_ioc_add_rt_rule *rt_rule; @@ -2010,14 +2066,14 @@ static int ipa3_init_smem_region(int memory_region_size, } /** -* ipa3_init_q6_smem() - Initialize Q6 general memory and -* header memory regions in IPA. -* -* Return codes: -* 0: success -* -ENOMEM: failed to allocate dma memory -* -EFAULT: failed to send IPA command to initialize the memory -*/ + * ipa3_init_q6_smem() - Initialize Q6 general memory and + * header memory regions in IPA. + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate dma memory + * -EFAULT: failed to send IPA command to initialize the memory + */ int ipa3_init_q6_smem(void) { int rc; @@ -2546,12 +2602,12 @@ static int ipa3_q6_set_ex_path_to_apps(void) } /** -* ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration -* in IPA HW. This is performed in case of SSR. -* -* This is a mandatory procedure, in case one of the steps fails, the -* AP needs to restart. -*/ + * ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration + * in IPA HW. This is performed in case of SSR. + * + * This is a mandatory procedure, in case one of the steps fails, the + * AP needs to restart. + */ void ipa3_q6_pre_shutdown_cleanup(void) { IPADBG_LOW("ENTER\n"); @@ -2569,8 +2625,8 @@ void ipa3_q6_pre_shutdown_cleanup(void) BUG(); } /* Remove delay from Q6 PRODs to avoid pending descriptors - * on pipe reset procedure - */ + * on pipe reset procedure + */ ipa3_q6_pipe_delay(false); IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); @@ -3465,11 +3521,11 @@ static unsigned int ipa3_get_bus_vote(void) } /** -* ipa3_enable_clks() - Turn on IPA clocks -* -* Return codes: -* None -*/ + * ipa3_enable_clks() - Turn on IPA clocks + * + * Return codes: + * None + */ void ipa3_enable_clks(void) { IPADBG("enabling IPA clocks and bus voting\n"); @@ -3498,11 +3554,11 @@ void _ipa_disable_clks_v3_0(void) } /** -* ipa3_disable_clks() - Turn off IPA clocks -* -* Return codes: -* None -*/ + * ipa3_disable_clks() - Turn off IPA clocks + * + * Return codes: + * None + */ void ipa3_disable_clks(void) { IPADBG("disabling IPA clocks and bus voting\n"); @@ -3541,28 +3597,28 @@ static void ipa3_start_tag_process(struct work_struct *work) } /** -* ipa3_active_clients_log_mod() - Log a modification in the active clients -* reference count -* -* This method logs any modification in the active clients reference count: -* It logs the modification in the circular history buffer -* It logs the modification in the hash table - looking for an entry, -* creating one if needed and deleting one if needed. -* -* @id: ipa3_active client logging info struct to hold the log information -* @inc: a boolean variable to indicate whether the modification is an increase -* or decrease -* @int_ctx: a boolean variable to indicate whether this call is being made from -* an interrupt context and therefore should allocate GFP_ATOMIC memory -* -* Method process: -* - Hash the unique identifier string -* - Find the hash in the table -* 1)If found, increase or decrease the reference count -* 2)If not found, allocate a new hash table entry struct and initialize it -* - Remove and deallocate unneeded data structure -* - Log the call in the circular history buffer (unless it is a simple call) -*/ + * ipa3_active_clients_log_mod() - Log a modification in the active clients + * reference count + * + * This method logs any modification in the active clients reference count: + * It logs the modification in the circular history buffer + * It logs the modification in the hash table - looking for an entry, + * creating one if needed and deleting one if needed. + * + * @id: ipa3_active client logging info struct to hold the log information + * @inc: a boolean variable to indicate whether the modification is an increase + * or decrease + * @int_ctx: a boolean variable to indicate whether this call is being made from + * an interrupt context and therefore should allocate GFP_ATOMIC memory + * + * Method process: + * - Hash the unique identifier string + * - Find the hash in the table + * 1)If found, increase or decrease the reference count + * 2)If not found, allocate a new hash table entry struct and initialize it + * - Remove and deallocate unneeded data structure + * - Log the call in the circular history buffer (unless it is a simple call) + */ void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id, bool inc, bool int_ctx) { @@ -3632,12 +3688,12 @@ void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id, } /** -* ipa3_inc_client_enable_clks() - Increase active clients counter, and -* enable ipa clocks if necessary -* -* Return codes: -* None -*/ + * ipa3_inc_client_enable_clks() - Increase active clients counter, and + * enable ipa clocks if necessary + * + * Return codes: + * None + */ void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id) { ipa3_active_clients_lock(); @@ -3650,13 +3706,13 @@ void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id) } /** -* ipa3_inc_client_enable_clks_no_block() - Only increment the number of active -* clients if no asynchronous actions should be done. Asynchronous actions are -* locking a mutex and waking up IPA HW. -* -* Return codes: 0 for success -* -EPERM if an asynchronous action should have been done -*/ + * ipa3_inc_client_enable_clks_no_block() - Only increment the number of active + * clients if no asynchronous actions should be done. Asynchronous actions are + * locking a mutex and waking up IPA HW. + * + * Return codes: 0 for success + * -EPERM if an asynchronous action should have been done + */ int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info *id) { @@ -3718,12 +3774,12 @@ void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id) } /** -* ipa3_inc_acquire_wakelock() - Increase active clients counter, and -* acquire wakelock if necessary -* -* Return codes: -* None -*/ + * ipa3_inc_acquire_wakelock() - Increase active clients counter, and + * acquire wakelock if necessary + * + * Return codes: + * None + */ void ipa3_inc_acquire_wakelock(void) { unsigned long flags; @@ -3835,12 +3891,12 @@ static void ipa3_sps_process_irq_schedule_rel(void) } /** -* ipa3_suspend_handler() - Handles the suspend interrupt: -* wakes up the suspended peripheral by requesting its consumer -* @interrupt: Interrupt type -* @private_data: The client's private data -* @interrupt_data: Interrupt specific information data -*/ + * ipa3_suspend_handler() - Handles the suspend interrupt: + * wakes up the suspended peripheral by requesting its consumer + * @interrupt: Interrupt type + * @private_data: The client's private data + * @interrupt_data: Interrupt specific information data + */ void ipa3_suspend_handler(enum ipa_irq_type interrupt, void *private_data, void *interrupt_data) @@ -3903,12 +3959,12 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt, } /** -* ipa3_restore_suspend_handler() - restores the original suspend IRQ handler -* as it was registered in the IPA init sequence. -* Return codes: -* 0: success -* -EPERM: failed to remove current handler or failed to add original handler -* */ + * ipa3_restore_suspend_handler() - restores the original suspend IRQ handler + * as it was registered in the IPA init sequence. + * Return codes: + * 0: success + * -EPERM: failed to remove current handler or failed to add original handler + */ int ipa3_restore_suspend_handler(void) { int result = 0; @@ -4497,39 +4553,37 @@ static int ipa3_tz_unlock_reg(struct ipa3_context *ipa3_ctx) } /** -* ipa3_pre_init() - Initialize the IPA Driver. -* This part contains all initialization which doesn't require IPA HW, such -* as structure allocations and initializations, register writes, etc. -* -* @resource_p: contain platform specific values from DST file -* @pdev: The platform device structure representing the IPA driver -* -* Function initialization process: -* - Allocate memory for the driver context data struct -* - Initializing the ipa3_ctx with: -* 1)parsed values from the dts file -* 2)parameters passed to the module initialization -* 3)read HW values(such as core memory size) -* - Map IPA core registers to CPU memory -* - Restart IPA core(HW reset) -* - Set configuration for IPA BAM via BAM_CNFG_BITS -* - Initialize the look-aside caches(kmem_cache/slab) for filter, -* routing and IPA-tree -* - Create memory pool with 4 objects for DMA operations(each object -* is 512Bytes long), this object will be use for tx(A5->IPA) -* - Initialize lists head(routing,filter,hdr,system pipes) -* - Initialize mutexes (for ipa_ctx and NAT memory mutexes) -* - Initialize spinlocks (for list related to A5<->IPA pipes) -* - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq" -* - Initialize Red-Black-Tree(s) for handles of header,routing rule, -* routing table ,filtering rule -* - Initialize the filter block by committing IPV4 and IPV6 default rules -* - Create empty routing table in system memory(no committing) -* - Initialize pipes memory pool with ipa3_pipe_mem_init for supported platforms -* - Create a char-device for IPA -* - Initialize IPA RM (resource manager) -* - Configure GSI registers (in GSI case) -*/ + * ipa3_pre_init() - Initialize the IPA Driver. + * This part contains all initialization which doesn't require IPA HW, such + * as structure allocations and initializations, register writes, etc. + * + * @resource_p: contain platform specific values from DST file + * @pdev: The platform device structure representing the IPA driver + * + * Function initialization process: + * Allocate memory for the driver context data struct + * Initializing the ipa3_ctx with : + * 1)parsed values from the dts file + * 2)parameters passed to the module initialization + * 3)read HW values(such as core memory size) + * Map IPA core registers to CPU memory + * Restart IPA core(HW reset) + * Initialize the look-aside caches(kmem_cache/slab) for filter, + * routing and IPA-tree + * Create memory pool with 4 objects for DMA operations(each object + * is 512Bytes long), this object will be use for tx(A5->IPA) + * Initialize lists head(routing, hdr, system pipes) + * Initialize mutexes (for ipa_ctx and NAT memory mutexes) + * Initialize spinlocks (for list related to A5<->IPA pipes) + * Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq" + * Initialize Red-Black-Tree(s) for handles of header,routing rule, + * routing table ,filtering rule + * Initialize the filter block by committing IPV4 and IPV6 default rules + * Create empty routing table in system memory(no committing) + * Create a char-device for IPA + * Initialize IPA RM (resource manager) + * Configure GSI registers (in GSI case) + */ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, struct device *ipa_dev) { @@ -4887,6 +4941,10 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, init_waitqueue_head(&ipa3_ctx->msg_waitq); mutex_init(&ipa3_ctx->msg_lock); + /* store wlan client-connect-msg-list */ + INIT_LIST_HEAD(&ipa3_ctx->msg_wlan_client_list); + mutex_init(&ipa3_ctx->msg_wlan_client_lock); + mutex_init(&ipa3_ctx->lock); mutex_init(&ipa3_ctx->nat_mem.lock); mutex_init(&ipa3_ctx->q6_proxy_clk_vote_mutex); @@ -5873,7 +5931,7 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p, * * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP. * This will postpone the suspend operation until IPA is no longer used by AP. -*/ + */ int ipa3_ap_suspend(struct device *dev) { int i; @@ -5899,14 +5957,14 @@ int ipa3_ap_suspend(struct device *dev) } /** -* ipa3_ap_resume() - resume callback for runtime_pm -* @dev: pointer to device -* -* This callback will be invoked by the runtime_pm framework when an AP resume -* operation is invoked. -* -* Always returns 0 since resume should always succeed. -*/ + * ipa3_ap_resume() - resume callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP resume + * operation is invoked. + * + * Always returns 0 since resume should always succeed. + */ int ipa3_ap_resume(struct device *dev) { return 0; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c index 69dda048f2bb..0f86194488c0 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -62,8 +62,16 @@ int ipa3_enable_data_path(u32 clnt_hdl) IPADBG("Enabling data path\n"); if (IPA_CLIENT_IS_CONS(ep->client)) { - memset(&holb_cfg, 0 , sizeof(holb_cfg)); - holb_cfg.en = IPA_HOLB_TMR_DIS; + memset(&holb_cfg, 0, sizeof(holb_cfg)); + /* + * Set HOLB on USB DPL CONS to avoid IPA stall + * if DPL client is not pulling the data + * on other end from IPA hw. + */ + if (ep->client == IPA_CLIENT_USB_DPL_CONS) + holb_cfg.en = IPA_HOLB_TMR_EN; + else + holb_cfg.en = IPA_HOLB_TMR_DIS; holb_cfg.tmr_val = 0; res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg); } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c index 128b859ee152..03c846ea9596 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c @@ -784,7 +784,7 @@ error: static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry, const struct ipa_flt_rule *rule, struct ipa3_rt_tbl *rt_tbl, - struct ipa3_flt_tbl *tbl) + struct ipa3_flt_tbl *tbl, bool user) { int id; @@ -809,6 +809,7 @@ static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry, } } (*entry)->rule_id = id; + (*entry)->ipacm_installed = user; return 0; @@ -846,7 +847,7 @@ ipa_insert_failed: static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip, const struct ipa_flt_rule *rule, u8 add_rear, - u32 *rule_hdl) + u32 *rule_hdl, bool user) { struct ipa3_flt_entry *entry; struct ipa3_rt_tbl *rt_tbl = NULL; @@ -854,7 +855,7 @@ static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip, if (__ipa_validate_flt_rule(rule, &rt_tbl, ip)) goto error; - if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl)) + if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl, user)) goto error; if (add_rear) { @@ -904,7 +905,7 @@ static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl, if (__ipa_validate_flt_rule(rule, &rt_tbl, ip)) goto error; - if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl)) + if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl, true)) goto error; list_add(&entry->link, &((*add_after_entry)->link)); @@ -1054,7 +1055,7 @@ static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx) static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep, const struct ipa_flt_rule *rule, u8 add_rear, - u32 *rule_hdl) + u32 *rule_hdl, bool user) { struct ipa3_flt_tbl *tbl; int ipa_ep_idx; @@ -1072,12 +1073,13 @@ static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep, tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip]; IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep); - return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl); + return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, user); } /** * ipa3_add_flt_rule() - Add the specified filtering rules to SW and optionally * commit to IPA HW + * @rules: [inout] set of filtering rules to add * * Returns: 0 on success, negative on failure * @@ -1085,6 +1087,20 @@ static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep, */ int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) { + return ipa3_add_flt_rule_usr(rules, false); +} +/** + * ipa3_add_flt_rule_usr() - Add the specified filtering rules to + * SW and optionally commit to IPA HW + * @rules: [inout] set of filtering rules to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only) +{ int i; int result; @@ -1100,7 +1116,8 @@ int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) result = __ipa_add_ep_flt_rule(rules->ip, rules->ep, &rules->rules[i].rule, rules->rules[i].at_rear, - &rules->rules[i].flt_rule_hdl); + &rules->rules[i].flt_rule_hdl, + user_only); else result = -1; @@ -1347,18 +1364,20 @@ bail: * ipa3_reset_flt() - Reset the current SW filtering table of specified type * (does not commit to HW) * @ip: [in] the family of routing tables + * @user_only: [in] indicate rules deleted by userspace * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa3_reset_flt(enum ipa_ip_type ip) +int ipa3_reset_flt(enum ipa_ip_type ip, bool user_only) { struct ipa3_flt_tbl *tbl; struct ipa3_flt_entry *entry; struct ipa3_flt_entry *next; int i; int id; + int rule_id; if (ip >= IPA_IP_MAX) { IPAERR_RL("bad parm\n"); @@ -1378,21 +1397,27 @@ int ipa3_reset_flt(enum ipa_ip_type ip) mutex_unlock(&ipa3_ctx->lock); return -EFAULT; } - list_del(&entry->link); - entry->tbl->rule_cnt--; - if (entry->rt_tbl) - entry->rt_tbl->ref_cnt--; - /* if rule id was allocated from idr, remove it */ - if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) && - (entry->rule_id >= ipahal_get_low_rule_id())) - idr_remove(&entry->tbl->rule_ids, - entry->rule_id); - entry->cookie = 0; - id = entry->id; - kmem_cache_free(ipa3_ctx->flt_rule_cache, entry); - - /* remove the handle from the database */ - ipa3_id_remove(id); + + if (!user_only || + entry->ipacm_installed) { + list_del(&entry->link); + entry->tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + /* if rule id was allocated from idr, remove */ + rule_id = entry->rule_id; + id = entry->id; + if ((rule_id < ipahal_get_rule_id_hi_bit()) && + (rule_id >= ipahal_get_low_rule_id())) + idr_remove(&entry->tbl->rule_ids, + rule_id); + entry->cookie = 0; + kmem_cache_free(ipa3_ctx->flt_rule_cache, + entry); + + /* remove the handle from the database */ + ipa3_id_remove(id); + } } } mutex_unlock(&ipa3_ctx->lock); @@ -1418,14 +1443,14 @@ void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx) tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4]; rule.action = IPA_PASS_TO_EXCEPTION; __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true, - &ep->dflt_flt4_rule_hdl); + &ep->dflt_flt4_rule_hdl, false); ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4); tbl->sticky_rear = true; tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6]; rule.action = IPA_PASS_TO_EXCEPTION; __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true, - &ep->dflt_flt6_rule_hdl); + &ep->dflt_flt6_rule_hdl, false); ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6); tbl->sticky_rear = true; mutex_unlock(&ipa3_ctx->lock); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c index b5b8643f24a9..f71eb952cde2 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -315,7 +315,7 @@ end: } static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, - bool add_ref_hdr) + bool add_ref_hdr, bool user_only) { struct ipa3_hdr_entry *hdr_entry; struct ipa3_hdr_proc_ctx_entry *entry; @@ -360,6 +360,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, if (add_ref_hdr) hdr_entry->ref_cnt++; entry->cookie = IPA_PROC_HDR_COOKIE; + entry->ipacm_installed = user_only; needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type); @@ -396,6 +397,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, */ offset->offset = htbl->end; offset->bin = bin; + offset->ipacm_installed = user_only; htbl->end += ipa_hdr_proc_ctx_bin_sz[bin]; list_add(&offset->link, &htbl->head_offset_list[bin]); @@ -404,6 +406,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, offset = list_first_entry(&htbl->head_free_offset_list[bin], struct ipa3_hdr_proc_ctx_offset_entry, link); + offset->ipacm_installed = user_only; list_move(&offset->link, &htbl->head_offset_list[bin]); } @@ -441,7 +444,7 @@ bad_len: } -static int __ipa_add_hdr(struct ipa_hdr_add *hdr) +static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user) { struct ipa3_hdr_entry *entry; struct ipa_hdr_offset_entry *offset = NULL; @@ -476,6 +479,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid; entry->eth2_ofst = hdr->eth2_ofst; entry->cookie = IPA_HDR_COOKIE; + entry->ipacm_installed = user; if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0]) bin = IPA_HDR_BIN0; @@ -527,6 +531,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) list_add(&offset->link, &htbl->head_offset_list[bin]); entry->offset_entry = offset; + offset->ipacm_installed = user; } } else { entry->is_hdr_proc_ctx = false; @@ -535,6 +540,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) struct ipa_hdr_offset_entry, link); list_move(&offset->link, &htbl->head_offset_list[bin]); entry->offset_entry = offset; + offset->ipacm_installed = user; } list_add(&entry->link, &htbl->head_hdr_entry_list); @@ -566,7 +572,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) IPADBG("adding processing context for header %s\n", hdr->name); proc_ctx.type = IPA_HDR_PROC_NONE; proc_ctx.hdr_hdl = id; - if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) { + if (__ipa_add_hdr_proc_ctx(&proc_ctx, false, user)) { IPAERR("failed to add hdr proc ctx\n"); goto fail_add_proc_ctx; } @@ -728,6 +734,21 @@ int __ipa3_del_hdr(u32 hdr_hdl, bool by_user) */ int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs) { + return ipa3_add_hdr_usr(hdrs, false); +} + +/** + * ipa3_add_hdr_usr() - add the specified headers to SW + * and optionally commit them to IPA HW + * @hdrs: [inout] set of headers to add + * @user_only: [in] indicate installed from user + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only) +{ int i; int result = -EFAULT; @@ -740,7 +761,7 @@ int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs) IPADBG("adding %d headers to IPA driver internal data struct\n", hdrs->num_hdrs); for (i = 0; i < hdrs->num_hdrs; i++) { - if (__ipa_add_hdr(&hdrs->hdr[i])) { + if (__ipa_add_hdr(&hdrs->hdr[i], user_only)) { IPAERR_RL("failed to add hdr %d\n", i); hdrs->hdr[i].status = -1; } else { @@ -821,12 +842,14 @@ int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls) * ipa3_add_hdr_proc_ctx() - add the specified headers to SW * and optionally commit them to IPA HW * @proc_ctxs: [inout] set of processing context headers to add + * @user_only: [in] indicate installed by user-space module * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs) +int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only) { int i; int result = -EFAULT; @@ -840,7 +863,8 @@ int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs) IPADBG("adding %d header processing contextes to IPA driver\n", proc_ctxs->num_proc_ctxs); for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) { - if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) { + if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], + true, user_only)) { IPAERR_RL("failed to add hdr pric ctx %d\n", i); proc_ctxs->proc_ctx[i].status = -1; } else { @@ -955,11 +979,12 @@ bail: * ipa3_reset_hdr() - reset the current header table in SW (does not commit to * HW) * + * @user_only: [in] indicate delete rules installed by userspace * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa3_reset_hdr(void) +int ipa3_reset_hdr(bool user_only) { struct ipa3_hdr_entry *entry; struct ipa3_hdr_entry *next; @@ -975,9 +1000,9 @@ int ipa3_reset_hdr(void) * issue a reset on the routing module since routing rules point to * header table entries */ - if (ipa3_reset_rt(IPA_IP_v4)) + if (ipa3_reset_rt(IPA_IP_v4, user_only)) IPAERR("fail to reset v4 rt\n"); - if (ipa3_reset_rt(IPA_IP_v6)) + if (ipa3_reset_rt(IPA_IP_v6, user_only)) IPAERR("fail to reset v4 rt\n"); mutex_lock(&ipa3_ctx->lock); @@ -1006,21 +1031,23 @@ int ipa3_reset_hdr(void) WARN_ON(1); return -EFAULT; } - if (entry->is_hdr_proc_ctx) { - dma_unmap_single(ipa3_ctx->pdev, - entry->phys_base, - entry->hdr_len, - DMA_TO_DEVICE); - entry->proc_ctx = NULL; - } - list_del(&entry->link); - entry->ref_cnt = 0; - entry->cookie = 0; - /* remove the handle from the database */ - ipa3_id_remove(entry->id); - kmem_cache_free(ipa3_ctx->hdr_cache, entry); + if (!user_only || entry->ipacm_installed) { + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa3_ctx->pdev, + entry->phys_base, + entry->hdr_len, + DMA_TO_DEVICE); + entry->proc_ctx = NULL; + } + list_del(&entry->link); + entry->ref_cnt = 0; + entry->cookie = 0; + /* remove the handle from the database */ + ipa3_id_remove(entry->id); + kmem_cache_free(ipa3_ctx->hdr_cache, entry); + } } for (i = 0; i < IPA_HDR_BIN_MAX; i++) { list_for_each_entry_safe(off_entry, off_next, @@ -1034,14 +1061,23 @@ int ipa3_reset_hdr(void) if (off_entry->offset == 0) continue; - list_del(&off_entry->link); - kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry); + if (!user_only || + off_entry->ipacm_installed) { + list_del(&off_entry->link); + kmem_cache_free(ipa3_ctx->hdr_offset_cache, + off_entry); + } } list_for_each_entry_safe(off_entry, off_next, &ipa3_ctx->hdr_tbl.head_free_offset_list[i], link) { - list_del(&off_entry->link); - kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry); + + if (!user_only || + off_entry->ipacm_installed) { + list_del(&off_entry->link); + kmem_cache_free(ipa3_ctx->hdr_offset_cache, + off_entry); + } } } /* there is one header of size 8 */ @@ -1060,30 +1096,43 @@ int ipa3_reset_hdr(void) WARN_ON(1); return -EFAULT; } - list_del(&ctx_entry->link); - ctx_entry->ref_cnt = 0; - ctx_entry->cookie = 0; - /* remove the handle from the database */ - ipa3_id_remove(ctx_entry->id); - kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, ctx_entry); + if (!user_only || + ctx_entry->ipacm_installed) { + list_del(&ctx_entry->link); + ctx_entry->ref_cnt = 0; + ctx_entry->cookie = 0; + /* remove the handle from the database */ + ipa3_id_remove(ctx_entry->id); + kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, + ctx_entry); + } } for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) { list_for_each_entry_safe(ctx_off_entry, ctx_off_next, &ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i], link) { - list_del(&ctx_off_entry->link); - kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache, + if (!user_only || + ctx_off_entry->ipacm_installed) { + list_del(&ctx_off_entry->link); + kmem_cache_free( + ipa3_ctx->hdr_proc_ctx_offset_cache, ctx_off_entry); + } } list_for_each_entry_safe(ctx_off_entry, ctx_off_next, &ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i], link) { - list_del(&ctx_off_entry->link); - kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache, - ctx_off_entry); + + if (!user_only || + ctx_off_entry->ipacm_installed) { + list_del(&ctx_off_entry->link); + kmem_cache_free( + ipa3_ctx->hdr_proc_ctx_offset_cache, + ctx_off_entry); + } } } ipa3_ctx->hdr_proc_ctx_tbl.end = 0; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 7bf56688a9e7..ea98433bbdf2 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -233,6 +233,7 @@ struct ipa_smmu_cb_ctx { * @prio: rule 10bit priority which defines the order of the rule * among other rules at the same integrated table * @rule_id: rule 10bit ID to be returned in packet status + * @ipacm_installed: indicate if installed by ipacm */ struct ipa3_flt_entry { struct list_head link; @@ -244,6 +245,7 @@ struct ipa3_flt_entry { int id; u16 prio; u16 rule_id; + bool ipacm_installed; }; /** @@ -300,6 +302,7 @@ struct ipa3_rt_tbl { * @is_eth2_ofst_valid: is eth2_ofst field valid? * @eth2_ofst: offset to start of Ethernet-II/802.3 header * @user_deleted: is the header deleted by the user? + * @ipacm_installed: indicate if installed by ipacm */ struct ipa3_hdr_entry { struct list_head link; @@ -318,6 +321,7 @@ struct ipa3_hdr_entry { u8 is_eth2_ofst_valid; u16 eth2_ofst; bool user_deleted; + bool ipacm_installed; }; /** @@ -341,11 +345,13 @@ struct ipa3_hdr_tbl { * @link: entry's link in global processing context header offset entries list * @offset: the offset * @bin: bin + * @ipacm_installed: indicate if installed by ipacm */ struct ipa3_hdr_proc_ctx_offset_entry { struct list_head link; u32 offset; u32 bin; + bool ipacm_installed; }; /** @@ -358,6 +364,7 @@ struct ipa3_hdr_proc_ctx_offset_entry { * @ref_cnt: reference counter of routing table * @id: processing context header entry id * @user_deleted: is the hdr processing context deleted by the user? + * @ipacm_installed: indicate if installed by ipacm */ struct ipa3_hdr_proc_ctx_entry { struct list_head link; @@ -368,6 +375,7 @@ struct ipa3_hdr_proc_ctx_entry { u32 ref_cnt; int id; bool user_deleted; + bool ipacm_installed; }; /** @@ -423,6 +431,8 @@ struct ipa3_flt_tbl { * @prio: rule 10bit priority which defines the order of the rule * among other rules at the integrated same table * @rule_id: rule 10bit ID to be returned in packet status + * @rule_id_valid: indicate if rule_id_valid valid or not? + * @ipacm_installed: indicate if installed by ipacm */ struct ipa3_rt_entry { struct list_head link; @@ -436,6 +446,7 @@ struct ipa3_rt_entry { u16 prio; u16 rule_id; u16 rule_id_valid; + bool ipacm_installed; }; /** @@ -1217,6 +1228,8 @@ struct ipa3_context { struct list_head msg_list; struct list_head pull_msg_list; struct mutex msg_lock; + struct list_head msg_wlan_client_list; + struct mutex msg_wlan_client_lock; wait_queue_head_t msg_waitq; enum ipa_hw_type ipa_hw_type; enum ipa3_hw_mode ipa3_hw_mode; @@ -1591,13 +1604,15 @@ int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl); */ int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs); +int ipa3_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool by_user); + int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls); int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user); int ipa3_commit_hdr(void); -int ipa3_reset_hdr(void); +int ipa3_reset_hdr(bool user_only); int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup); @@ -1608,7 +1623,8 @@ int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy); /* * Header Processing Context */ -int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs); +int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only); int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls); @@ -1620,6 +1636,9 @@ int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls, */ int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules); +int ipa3_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, + bool user_only); + int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules); int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules); @@ -1628,7 +1647,7 @@ int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls); int ipa3_commit_rt(enum ipa_ip_type ip); -int ipa3_reset_rt(enum ipa_ip_type ip); +int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only); int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup); @@ -1643,6 +1662,9 @@ int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules); */ int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules); +int ipa3_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, + bool user_only); + int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules); int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls); @@ -1651,7 +1673,7 @@ int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules); int ipa3_commit_flt(enum ipa_ip_type ip); -int ipa3_reset_flt(enum ipa_ip_type ip); +int ipa3_reset_flt(enum ipa_ip_type ip, bool user_only); /* * NAT @@ -1672,6 +1694,7 @@ int ipa3_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del); */ int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff, ipa_msg_free_fn callback); +int ipa3_resend_wlan_msg(void); int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback); int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta); @@ -1704,7 +1727,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, * To transfer multiple data packets * While passing the data descriptor list, the anchor node * should be of type struct ipa_tx_data_desc not list_head -*/ + */ int ipa3_tx_dp_mul(enum ipa_client_type dst, struct ipa_tx_data_desc *data_desc); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c index 76f37162f495..2039c1ba0a3c 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -13,6 +13,7 @@ #include <linux/fs.h> #include <linux/sched.h> #include "ipa_i.h" +#include <linux/msm_ipa.h> struct ipa3_intf { char name[IPA_RESOURCE_NAME_MAX]; @@ -387,6 +388,108 @@ static void ipa3_send_msg_free(void *buff, u32 len, u32 type) kfree(buff); } +static int wlan_msg_process(struct ipa_msg_meta *meta, void *buff) +{ + struct ipa3_push_msg *msg_dup; + struct ipa_wlan_msg_ex *event_ex_cur_con = NULL; + struct ipa_wlan_msg_ex *event_ex_list = NULL; + struct ipa_wlan_msg *event_ex_cur_discon = NULL; + void *data_dup = NULL; + struct ipa3_push_msg *entry; + struct ipa3_push_msg *next; + int cnt = 0, total = 0, max = 0; + uint8_t mac[IPA_MAC_ADDR_SIZE]; + uint8_t mac2[IPA_MAC_ADDR_SIZE]; + + if (meta->msg_type == WLAN_CLIENT_CONNECT_EX) { + /* debug print */ + event_ex_cur_con = buff; + for (cnt = 0; cnt < event_ex_cur_con->num_of_attribs; cnt++) { + if (event_ex_cur_con->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + IPADBG("%02x:%02x:%02x:%02x:%02x:%02x,(%d)\n", + event_ex_cur_con->attribs[cnt].u.mac_addr[0], + event_ex_cur_con->attribs[cnt].u.mac_addr[1], + event_ex_cur_con->attribs[cnt].u.mac_addr[2], + event_ex_cur_con->attribs[cnt].u.mac_addr[3], + event_ex_cur_con->attribs[cnt].u.mac_addr[4], + event_ex_cur_con->attribs[cnt].u.mac_addr[5], + meta->msg_type); + } + } + + mutex_lock(&ipa3_ctx->msg_wlan_client_lock); + msg_dup = kzalloc(sizeof(struct ipa3_push_msg), GFP_KERNEL); + if (msg_dup == NULL) { + IPAERR("fail to alloc ipa_msg container\n"); + return -ENOMEM; + } + msg_dup->meta = *meta; + if (meta->msg_len > 0 && buff) { + data_dup = kmalloc(meta->msg_len, GFP_KERNEL); + if (data_dup == NULL) { + IPAERR("fail to alloc data_dup container\n"); + kfree(msg_dup); + return -ENOMEM; + } + memcpy(data_dup, buff, meta->msg_len); + msg_dup->buff = data_dup; + msg_dup->callback = ipa3_send_msg_free; + } + list_add_tail(&msg_dup->link, &ipa3_ctx->msg_wlan_client_list); + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + } + + /* remove the cache */ + if (meta->msg_type == WLAN_CLIENT_DISCONNECT) { + /* debug print */ + event_ex_cur_discon = buff; + IPADBG("Mac %02x:%02x:%02x:%02x:%02x:%02x,msg %d\n", + event_ex_cur_discon->mac_addr[0], + event_ex_cur_discon->mac_addr[1], + event_ex_cur_discon->mac_addr[2], + event_ex_cur_discon->mac_addr[3], + event_ex_cur_discon->mac_addr[4], + event_ex_cur_discon->mac_addr[5], + meta->msg_type); + memcpy(mac2, + event_ex_cur_discon->mac_addr, + sizeof(mac2)); + + mutex_lock(&ipa3_ctx->msg_wlan_client_lock); + list_for_each_entry_safe(entry, next, + &ipa3_ctx->msg_wlan_client_list, + link) { + event_ex_list = entry->buff; + max = event_ex_list->num_of_attribs; + for (cnt = 0; cnt < max; cnt++) { + memcpy(mac, + event_ex_list->attribs[cnt].u.mac_addr, + sizeof(mac)); + if (event_ex_list->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + pr_debug("%02x:%02x:%02x:%02x:%02x:%02x\n", + mac[0], mac[1], mac[2], + mac[3], mac[4], mac[5]); + + /* compare to delete one*/ + if (memcmp(mac2, + mac, + sizeof(mac)) == 0) { + IPADBG("clean %d\n", total); + list_del(&entry->link); + kfree(entry); + break; + } + } + } + total++; + } + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + } + return 0; +} + /** * ipa3_send_msg() - Send "message" from kernel client to IPA driver * @meta: [in] message meta-data @@ -409,7 +512,7 @@ int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff, void *data = NULL; if (meta == NULL || (buff == NULL && callback != NULL) || - (buff != NULL && callback == NULL)) { + (buff != NULL && callback == NULL) || buff == NULL) { IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n", meta, buff, callback); return -EINVAL; @@ -441,6 +544,11 @@ int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff, mutex_lock(&ipa3_ctx->msg_lock); list_add_tail(&msg->link, &ipa3_ctx->msg_list); + /* support for softap client event cache */ + if (wlan_msg_process(meta, buff)) + IPAERR("wlan_msg_process failed\n"); + + /* unlock only after process */ mutex_unlock(&ipa3_ctx->msg_lock); IPA_STATS_INC_CNT(ipa3_ctx->stats.msg_w[meta->msg_type]); @@ -452,6 +560,73 @@ int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff, } /** + * ipa3_resend_wlan_msg() - Resend cached "message" to IPACM + * + * resend wlan client connect events to user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_resend_wlan_msg(void) +{ + struct ipa_wlan_msg_ex *event_ex_list = NULL; + struct ipa3_push_msg *entry; + struct ipa3_push_msg *next; + int cnt = 0, total = 0; + struct ipa3_push_msg *msg; + void *data = NULL; + + IPADBG("\n"); + + mutex_lock(&ipa3_ctx->msg_wlan_client_lock); + list_for_each_entry_safe(entry, next, &ipa3_ctx->msg_wlan_client_list, + link) { + + event_ex_list = entry->buff; + for (cnt = 0; cnt < event_ex_list->num_of_attribs; cnt++) { + if (event_ex_list->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + IPADBG("%d-Mac %02x:%02x:%02x:%02x:%02x:%02x\n", + total, + event_ex_list->attribs[cnt].u.mac_addr[0], + event_ex_list->attribs[cnt].u.mac_addr[1], + event_ex_list->attribs[cnt].u.mac_addr[2], + event_ex_list->attribs[cnt].u.mac_addr[3], + event_ex_list->attribs[cnt].u.mac_addr[4], + event_ex_list->attribs[cnt].u.mac_addr[5]); + } + } + + msg = kzalloc(sizeof(struct ipa3_push_msg), GFP_KERNEL); + if (msg == NULL) { + IPAERR("fail to alloc ipa_msg container\n"); + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + msg->meta = entry->meta; + data = kmalloc(entry->meta.msg_len, GFP_KERNEL); + if (data == NULL) { + IPAERR("fail to alloc data container\n"); + kfree(msg); + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + memcpy(data, entry->buff, entry->meta.msg_len); + msg->buff = data; + msg->callback = ipa3_send_msg_free; + mutex_lock(&ipa3_ctx->msg_lock); + list_add_tail(&msg->link, &ipa3_ctx->msg_list); + mutex_unlock(&ipa3_ctx->msg_lock); + wake_up(&ipa3_ctx->msg_waitq); + + total++; + } + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + return 0; +} + +/** * ipa3_register_pull_msg() - register pull message type * @meta: [in] message meta-data * @callback: [in] pull callback diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c index c0aef7e0ce15..6b90abf787b9 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c @@ -605,6 +605,14 @@ int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req) int rc; int i; + /* check if modem up */ + if (!ipa3_qmi_indication_fin || + !ipa3_qmi_modem_init_fin || + !ipa_q6_clnt) { + IPAWANDBG("modem QMI haven't up yet\n"); + return -EINVAL; + } + /* check if the filter rules from IPACM is valid */ if (req->filter_spec_list_len == 0) IPAWANDBG("IPACM pass zero rules to Q6\n"); @@ -688,6 +696,14 @@ int ipa3_qmi_filter_request_ex_send( int rc; int i; + /* check if modem up */ + if (!ipa3_qmi_indication_fin || + !ipa3_qmi_modem_init_fin || + !ipa_q6_clnt) { + IPAWANDBG("modem QMI haven't up yet\n"); + return -EINVAL; + } + /* check if the filter rules from IPACM is valid */ if (req->filter_spec_ex_list_len == 0) { IPAWANDBG("IPACM pass zero rules to Q6\n"); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c index fd455f72e09e..0cfe7f92aff7 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c @@ -940,7 +940,7 @@ static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry, const struct ipa_rt_rule *rule, struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr, struct ipa3_hdr_proc_ctx_entry *proc_ctx, - u16 rule_id) + u16 rule_id, bool user) { int id; @@ -967,6 +967,7 @@ static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry, } } (*(entry))->rule_id = id; + (*(entry))->ipacm_installed = user; return 0; @@ -1012,7 +1013,7 @@ ipa_insert_failed: static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl, - u16 rule_id) + u16 rule_id, bool user) { struct ipa3_rt_tbl *tbl; struct ipa3_rt_entry *entry; @@ -1041,7 +1042,7 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, } if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, - rule_id)) + rule_id, user)) goto error; if (at_rear) @@ -1072,7 +1073,7 @@ static int __ipa_add_rt_rule_after(struct ipa3_rt_tbl *tbl, if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx)) goto error; - if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, 0)) + if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, 0, true)) goto error; list_add(&entry->link, &((*add_after_entry)->link)); @@ -1101,8 +1102,24 @@ error: * * Note: Should not be called from atomic context */ + int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) { + return ipa3_add_rt_rule_usr(rules, false); +} +/** + * ipa3_add_rt_rule_usr() - Add the specified routing rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of routing rules to add + * @user_only: [in] indicate installed by userspace module + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ + +int ipa3_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only) +{ int i; int ret; @@ -1117,7 +1134,8 @@ int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) &rules->rules[i].rule, rules->rules[i].at_rear, &rules->rules[i].rt_rule_hdl, - 0)) { + 0, + user_only)) { IPAERR("failed to add rt rule %d\n", i); rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED; } else { @@ -1162,7 +1180,7 @@ int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules) &rules->rules[i].rule, rules->rules[i].at_rear, &rules->rules[i].rt_rule_hdl, - rules->rules[i].rule_id)) { + rules->rules[i].rule_id, true)) { IPAERR("failed to add rt rule %d\n", i); rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED; } else { @@ -1439,13 +1457,14 @@ bail: /** * ipa3_reset_rt() - reset the current SW routing table of specified type * (does not commit to HW) - * @ip: The family of routing tables + * @ip: [in] The family of routing tables + * @user_only: [in] indicate delete rules installed by userspace * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa3_reset_rt(enum ipa_ip_type ip) +int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only) { struct ipa3_rt_tbl *tbl; struct ipa3_rt_tbl *tbl_next; @@ -1455,6 +1474,7 @@ int ipa3_reset_rt(enum ipa_ip_type ip) struct ipa3_rt_tbl_set *rset; u32 apps_start_idx; int id; + bool tbl_user = false; if (ip >= IPA_IP_MAX) { IPAERR_RL("bad parm\n"); @@ -1472,7 +1492,7 @@ int ipa3_reset_rt(enum ipa_ip_type ip) * issue a reset on the filtering module of same IP type since * filtering rules point to routing tables */ - if (ipa3_reset_flt(ip)) + if (ipa3_reset_flt(ip, user_only)) IPAERR_RL("fail to reset flt ip=%d\n", ip); set = &ipa3_ctx->rt_tbl_set[ip]; @@ -1480,6 +1500,7 @@ int ipa3_reset_rt(enum ipa_ip_type ip) mutex_lock(&ipa3_ctx->lock); IPADBG("reset rt ip=%d\n", ip); list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) { + tbl_user = false; list_for_each_entry_safe(rule, rule_next, &tbl->head_rt_rule_list, link) { if (ipa3_id_find(rule->id) == NULL) { @@ -1488,6 +1509,12 @@ int ipa3_reset_rt(enum ipa_ip_type ip) return -EFAULT; } + /* indicate if tbl used for user-specified rules*/ + if (rule->ipacm_installed) { + IPADBG("tbl_user %d, tbl-index %d\n", + tbl_user, tbl->id); + tbl_user = true; + } /* * for the "default" routing tbl, remove all but the * last rule @@ -1495,19 +1522,23 @@ int ipa3_reset_rt(enum ipa_ip_type ip) if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1) continue; - list_del(&rule->link); - tbl->rule_cnt--; - if (rule->hdr) - __ipa3_release_hdr(rule->hdr->id); - else if (rule->proc_ctx) - __ipa3_release_hdr_proc_ctx(rule->proc_ctx->id); - rule->cookie = 0; - idr_remove(&tbl->rule_ids, rule->rule_id); - id = rule->id; - kmem_cache_free(ipa3_ctx->rt_rule_cache, rule); - - /* remove the handle from the database */ - ipa3_id_remove(id); + if (!user_only || + rule->ipacm_installed) { + list_del(&rule->link); + tbl->rule_cnt--; + if (rule->hdr) + __ipa3_release_hdr(rule->hdr->id); + else if (rule->proc_ctx) + __ipa3_release_hdr_proc_ctx( + rule->proc_ctx->id); + rule->cookie = 0; + idr_remove(&tbl->rule_ids, rule->rule_id); + id = rule->id; + kmem_cache_free(ipa3_ctx->rt_rule_cache, rule); + + /* remove the handle from the database */ + ipa3_id_remove(id); + } } if (ipa3_id_find(tbl->id) == NULL) { @@ -1519,26 +1550,30 @@ int ipa3_reset_rt(enum ipa_ip_type ip) /* do not remove the "default" routing tbl which has index 0 */ if (tbl->idx != apps_start_idx) { - idr_destroy(&tbl->rule_ids); - if (tbl->in_sys[IPA_RULE_HASHABLE] || - tbl->in_sys[IPA_RULE_NON_HASHABLE]) { - list_move(&tbl->link, &rset->head_rt_tbl_list); - clear_bit(tbl->idx, + if (!user_only || tbl_user) { + idr_destroy(&tbl->rule_ids); + if (tbl->in_sys[IPA_RULE_HASHABLE] || + tbl->in_sys[IPA_RULE_NON_HASHABLE]) { + list_move(&tbl->link, + &rset->head_rt_tbl_list); + clear_bit(tbl->idx, &ipa3_ctx->rt_idx_bitmap[ip]); - set->tbl_cnt--; - IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n", + set->tbl_cnt--; + IPADBG("rst tbl_idx=%d cnt=%d\n", tbl->idx, set->tbl_cnt); - } else { - list_del(&tbl->link); - set->tbl_cnt--; - clear_bit(tbl->idx, + } else { + list_del(&tbl->link); + set->tbl_cnt--; + clear_bit(tbl->idx, &ipa3_ctx->rt_idx_bitmap[ip]); - IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n", + IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n", tbl->idx, set->tbl_cnt); - kmem_cache_free(ipa3_ctx->rt_tbl_cache, tbl); + kmem_cache_free(ipa3_ctx->rt_tbl_cache, + tbl); + } + /* remove the handle from the database */ + ipa3_id_remove(id); } - /* remove the handle from the database */ - ipa3_id_remove(id); } } mutex_unlock(&ipa3_ctx->lock); @@ -1653,6 +1688,7 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule) struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL; struct ipa3_hdr_entry *hdr_entry; struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry; + if (rtrule->rule.hdr_hdl) { hdr = ipa3_id_find(rtrule->rule.hdr_hdl); if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index 74176d1aa47a..f4bce295311c 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -3153,6 +3153,7 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_cfg_ep_holb_by_client = ipa3_cfg_ep_holb_by_client; api_ctrl->ipa_cfg_ep_ctrl = ipa3_cfg_ep_ctrl; api_ctrl->ipa_add_hdr = ipa3_add_hdr; + api_ctrl->ipa_add_hdr_usr = ipa3_add_hdr_usr; api_ctrl->ipa_del_hdr = ipa3_del_hdr; api_ctrl->ipa_commit_hdr = ipa3_commit_hdr; api_ctrl->ipa_reset_hdr = ipa3_reset_hdr; @@ -3162,6 +3163,7 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_add_hdr_proc_ctx = ipa3_add_hdr_proc_ctx; api_ctrl->ipa_del_hdr_proc_ctx = ipa3_del_hdr_proc_ctx; api_ctrl->ipa_add_rt_rule = ipa3_add_rt_rule; + api_ctrl->ipa_add_rt_rule_usr = ipa3_add_rt_rule_usr; api_ctrl->ipa_del_rt_rule = ipa3_del_rt_rule; api_ctrl->ipa_commit_rt = ipa3_commit_rt; api_ctrl->ipa_reset_rt = ipa3_reset_rt; @@ -3170,6 +3172,7 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_query_rt_index = ipa3_query_rt_index; api_ctrl->ipa_mdfy_rt_rule = ipa3_mdfy_rt_rule; api_ctrl->ipa_add_flt_rule = ipa3_add_flt_rule; + api_ctrl->ipa_add_flt_rule_usr = ipa3_add_flt_rule_usr; api_ctrl->ipa_del_flt_rule = ipa3_del_flt_rule; api_ctrl->ipa_mdfy_flt_rule = ipa3_mdfy_flt_rule; api_ctrl->ipa_commit_flt = ipa3_commit_flt; diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c index 59a9b911553c..0f6b66719ef2 100644 --- a/drivers/platform/msm/sps/sps.c +++ b/drivers/platform/msm/sps/sps.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1006,8 +1006,6 @@ static void sps_device_de_init(void) "sps:%s:BAMs are still registered", __func__); sps_map_de_init(); - - kfree(sps); } sps_mem_de_init(); @@ -2993,6 +2991,7 @@ static struct platform_driver msm_sps_driver = { .name = SPS_DRV_NAME, .owner = THIS_MODULE, .of_match_table = msm_sps_match, + .suppress_bind_attrs = true, }, .remove = msm_sps_remove, }; diff --git a/drivers/soc/qcom/hab/hab_qvm.c b/drivers/soc/qcom/hab/hab_qvm.c index 129d1deeb2f0..df60e5c1b07d 100644 --- a/drivers/soc/qcom/hab/hab_qvm.c +++ b/drivers/soc/qcom/hab/hab_qvm.c @@ -314,10 +314,7 @@ int hab_hypervisor_register(void) void hab_hypervisor_unregister(void) { - hab_hypervisor_unregister_common(); - - qvm_priv_info.probe_cnt = 0; - qvm_priv_info.curr = 0; + pr_info("unregistration is called, but do nothing\n"); } /* this happens before hypervisor register */ diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 4dfb533b724a..d2dd714f762f 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -2239,8 +2239,6 @@ static int icnss_driver_event_fw_ready_ind(void *data) set_bit(ICNSS_FW_READY, &penv->state); - icnss_call_driver_uevent(penv, ICNSS_UEVENT_FW_READY, NULL); - icnss_pr_info("WLAN FW is ready: 0x%lx\n", penv->state); icnss_hw_power_off(penv); diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 40ce175655e6..99f67764765f 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -231,6 +231,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* Corsair K70 RGB */ { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Corsair Strafe */ + { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_DELAY_CTRL_MSG }, + /* Corsair Strafe RGB */ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | USB_QUIRK_DELAY_CTRL_MSG }, diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 8f36f3df55eb..9f73ec68a23b 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -852,6 +852,7 @@ struct dwc3_scratchpad_array { * @vbus_draw: current to be drawn from USB * @imod_interval: set the interrupt moderation interval in 250ns * increments or 0 to disable. + * @create_reg_debugfs: create debugfs entry to allow dwc3 register dump */ struct dwc3 { struct usb_ctrlrequest *ctrl_req; @@ -1048,6 +1049,7 @@ struct dwc3 { unsigned long l1_remote_wakeup_cnt; wait_queue_head_t wait_linkstate; + bool create_reg_debugfs; }; /* -------------------------------------------------------------------------- */ diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c index 4a18847983f7..2c00b3596055 100644 --- a/drivers/usb/dwc3/debugfs.c +++ b/drivers/usb/dwc3/debugfs.c @@ -368,6 +368,11 @@ static int dwc3_mode_show(struct seq_file *s, void *unused) unsigned long flags; u32 reg; + if (atomic_read(&dwc->in_lpm)) { + seq_puts(s, "USB device is powered off\n"); + return 0; + } + spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_GCTL); spin_unlock_irqrestore(&dwc->lock, flags); @@ -403,6 +408,11 @@ static ssize_t dwc3_mode_write(struct file *file, u32 mode = 0; char buf[32] = {}; + if (atomic_read(&dwc->in_lpm)) { + dev_err(dwc->dev, "USB device is powered off\n"); + return count; + } + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; @@ -437,6 +447,12 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused) unsigned long flags; u32 reg; + + if (atomic_read(&dwc->in_lpm)) { + seq_puts(s, "USB device is powered off\n"); + return 0; + } + spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg &= DWC3_DCTL_TSTCTRL_MASK; @@ -483,6 +499,11 @@ static ssize_t dwc3_testmode_write(struct file *file, u32 testmode = 0; char buf[32] = {}; + if (atomic_read(&dwc->in_lpm)) { + seq_puts(s, "USB device is powered off\n"); + return count; + } + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; @@ -521,6 +542,11 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused) enum dwc3_link_state state; u32 reg; + if (atomic_read(&dwc->in_lpm)) { + seq_puts(s, "USB device is powered off\n"); + return 0; + } + spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_DSTS); state = DWC3_DSTS_USBLNKST(reg); @@ -590,6 +616,11 @@ static ssize_t dwc3_link_state_write(struct file *file, enum dwc3_link_state state = 0; char buf[32] = {}; + if (atomic_read(&dwc->in_lpm)) { + seq_puts(s, "USB device is powered off\n"); + return count; + } + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; @@ -1241,10 +1272,14 @@ int dwc3_debugfs_init(struct dwc3 *dwc) dwc->regset->nregs = ARRAY_SIZE(dwc3_regs); dwc->regset->base = dwc->regs; - file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset); - if (!file) { - ret = -ENOMEM; - goto err1; + if (dwc->create_reg_debugfs) { + file = debugfs_create_regset32("regdump", 0444, + root, dwc->regset); + if (!file) { + dev_dbg(dwc->dev, "Can't create debugfs regdump\n"); + ret = -ENOMEM; + goto err1; + } } if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) { diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index ccff562d2f48..a6f886e5b2f3 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -638,7 +638,7 @@ struct xhci_ring *xhci_stream_id_to_ring( if (!ep->stream_info) return NULL; - if (stream_id > ep->stream_info->num_streams) + if (stream_id >= ep->stream_info->num_streams) return NULL; return ep->stream_info->stream_rings[stream_id]; } diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 343fa6ff9f4b..512c84adcace 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -414,8 +414,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct usb_yurex *dev; - int retval = 0; - int bytes_read = 0; + int len = 0; char in_buffer[20]; unsigned long flags; @@ -423,26 +422,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, mutex_lock(&dev->io_mutex); if (!dev->interface) { /* already disconnected */ - retval = -ENODEV; - goto exit; + mutex_unlock(&dev->io_mutex); + return -ENODEV; } spin_lock_irqsave(&dev->lock, flags); - bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu); + len = snprintf(in_buffer, 20, "%lld\n", dev->bbu); spin_unlock_irqrestore(&dev->lock, flags); - - if (*ppos < bytes_read) { - if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos)) - retval = -EFAULT; - else { - retval = bytes_read - *ppos; - *ppos += bytes_read; - } - } - -exit: mutex_unlock(&dev->io_mutex); - return retval; + + return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); } static ssize_t yurex_write(struct file *file, const char __user *user_buffer, diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c index aef8de046b8e..d9508dba2f83 100644 --- a/drivers/usb/pd/policy_engine.c +++ b/drivers/usb/pd/policy_engine.c @@ -1826,6 +1826,22 @@ enable_reg: else pd->vbus_enabled = true; + count = 10; + /* + * Check to make sure VBUS voltage reaches above Vsafe5Vmin (4.75v) + * before proceeding. + */ + while (count--) { + ret = power_supply_get_property(pd->usb_psy, + POWER_SUPPLY_PROP_VOLTAGE_NOW, &val); + if (ret || val.intval >= 4750000) /*vsafe5Vmin*/ + break; + usleep_range(10000, 12000); /* Delay between two reads */ + } + + if (ret) + msleep(100); /* Delay to wait for VBUS ramp up if read fails */ + return ret; } @@ -2747,7 +2763,6 @@ static void usbpd_sm(struct work_struct *w) case PE_PRS_SNK_SRC_SOURCE_ON: enable_vbus(pd); - msleep(200); /* allow time VBUS ramp-up, must be < tNewSrc */ ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG); if (ret) { diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c index 3ffb20c4a207..d1b6f1df860d 100644 --- a/drivers/usb/phy/phy-msm-ssusb-qmp.c +++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c @@ -473,11 +473,13 @@ static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend) } if (suspend) { - if (phy->cable_connected) - msm_ssusb_qmp_enable_autonomous(phy, 1); - else + if (phy->cable_connected) { + if (phy->vls_clamp_reg) + msm_ssusb_qmp_enable_autonomous(phy, 1); + } else { writel_relaxed(0x00, phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]); + } /* Make sure above write completed with PHY */ wmb(); @@ -509,7 +511,8 @@ static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend) writel_relaxed(0x01, phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]); } else { - msm_ssusb_qmp_enable_autonomous(phy, 0); + if (phy->vls_clamp_reg) + msm_ssusb_qmp_enable_autonomous(phy, 0); } /* Make sure that above write completed with PHY */ @@ -648,13 +651,13 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vls_clamp_reg"); if (!res) { - dev_err(dev, "failed getting vls_clamp_reg\n"); - return -ENODEV; - } - phy->vls_clamp_reg = devm_ioremap_resource(dev, res); - if (IS_ERR(phy->vls_clamp_reg)) { - dev_err(dev, "couldn't find vls_clamp_reg address.\n"); - return PTR_ERR(phy->vls_clamp_reg); + dev_dbg(dev, "vls_clamp_reg not passed\n"); + } else { + phy->vls_clamp_reg = devm_ioremap_resource(dev, res); + if (IS_ERR(phy->vls_clamp_reg)) { + dev_err(dev, "couldn't find vls_clamp_reg address.\n"); + return PTR_ERR(phy->vls_clamp_reg); + } } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 71133d96f97d..f73ea14e8173 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -118,7 +118,7 @@ static int ch341_control_in(struct usb_device *dev, r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, value, index, buf, bufsize, DEFAULT_TIMEOUT); - if (r < bufsize) { + if (r < (int)bufsize) { if (r >= 0) { dev_err(&dev->dev, "short control message received (%d < %u)\n", diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 73835027a7cc..97382301c393 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -145,6 +145,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */ + { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index 6b0942428917..8a4047de43dc 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c @@ -373,8 +373,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial, 3, /* get pins */ USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN, 0, 0, data, 1, 2000); - if (rc >= 0) + if (rc == 1) *value = *data; + else if (rc >= 0) + rc = -EIO; kfree(data); return rc; diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index ed883a7ad533..58ba6904a087 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -482,6 +482,9 @@ static void mos7840_control_callback(struct urb *urb) } dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length); + if (urb->actual_length < 1) + goto out; + dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__, mos7840_port->MsrLsr, mos7840_port->port_num); data = urb->transfer_buffer; diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.c b/drivers/video/fbdev/msm/mdss_compat_utils.c index 2f5aad8ed801..8c28fbf8fbc2 100644 --- a/drivers/video/fbdev/msm/mdss_compat_utils.c +++ b/drivers/video/fbdev/msm/mdss_compat_utils.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * Copyright (C) 1994 Martin Schaller * * 2001 - Documented with DocBook @@ -2853,26 +2853,28 @@ static int __pp_compat_alloc(struct msmfb_mdp_pp32 __user *pp32, *pp = compat_alloc_user_space(alloc_size); if (NULL == *pp) return -ENOMEM; - memset(*pp, 0, alloc_size); - - (*pp)->data.lut_cfg_data.data.pgc_lut_data.r_data = - (struct mdp_ar_gc_lut_data *) - ((unsigned long) *pp + - sizeof(struct msmfb_mdp_pp)); - (*pp)->data.lut_cfg_data.data.pgc_lut_data.g_data = - (struct mdp_ar_gc_lut_data *) + if (clear_user(*pp, alloc_size)) + return -EFAULT; + if (put_user((struct mdp_ar_gc_lut_data *) + ((unsigned long) *pp + + sizeof(struct msmfb_mdp_pp)), + &(*pp)->data.lut_cfg_data.data.pgc_lut_data.r_data) || + put_user((struct mdp_ar_gc_lut_data *) ((unsigned long) *pp + sizeof(struct msmfb_mdp_pp) + - pgc_size); - (*pp)->data.lut_cfg_data.data.pgc_lut_data.b_data = - (struct mdp_ar_gc_lut_data *) + pgc_size), + &(*pp)->data.lut_cfg_data.data.pgc_lut_data.g_data) || + put_user((struct mdp_ar_gc_lut_data *) ((unsigned long) *pp + sizeof(struct msmfb_mdp_pp) + - (2 * pgc_size)); - (*pp)->data.lut_cfg_data.data.pgc_lut_data.cfg_payload - = (void *)((unsigned long) *pp + + (2 * pgc_size)), + &(*pp)->data.lut_cfg_data.data.pgc_lut_data.b_data) || + put_user((void *)((unsigned long) *pp + sizeof(struct msmfb_mdp_pp) + - (3 * pgc_size)); + (3 * pgc_size)), + &(*pp)->data.lut_cfg_data.data. + pgc_lut_data.cfg_payload)) + return -EFAULT; break; case mdp_lut_igc: alloc_size += __pp_compat_size_igc(); @@ -2882,10 +2884,13 @@ static int __pp_compat_alloc(struct msmfb_mdp_pp32 __user *pp32, alloc_size); return -ENOMEM; } - memset(*pp, 0, alloc_size); - (*pp)->data.lut_cfg_data.data.igc_lut_data.cfg_payload - = (void *)((unsigned long)(*pp) + - sizeof(struct msmfb_mdp_pp)); + if (clear_user(*pp, alloc_size)) + return -EFAULT; + if (put_user((void *)((unsigned long)(*pp) + + sizeof(struct msmfb_mdp_pp)), + &(*pp)->data.lut_cfg_data.data. + igc_lut_data.cfg_payload)) + return -EFAULT; break; case mdp_lut_hist: alloc_size += __pp_compat_size_hist_lut(); @@ -2895,10 +2900,13 @@ static int __pp_compat_alloc(struct msmfb_mdp_pp32 __user *pp32, alloc_size); return -ENOMEM; } - memset(*pp, 0, alloc_size); - (*pp)->data.lut_cfg_data.data.hist_lut_data.cfg_payload - = (void *)((unsigned long)(*pp) + - sizeof(struct msmfb_mdp_pp)); + if (clear_user(*pp, alloc_size)) + return -EFAULT; + if (put_user((void *)((unsigned long)(*pp) + + sizeof(struct msmfb_mdp_pp)), + &(*pp)->data.lut_cfg_data.data. + hist_lut_data.cfg_payload)) + return -EFAULT; break; default: *pp = compat_alloc_user_space(alloc_size); @@ -2907,7 +2915,8 @@ static int __pp_compat_alloc(struct msmfb_mdp_pp32 __user *pp32, alloc_size, lut_type); return -ENOMEM; } - memset(*pp, 0, alloc_size); + if (clear_user(*pp, alloc_size)) + return -EFAULT; break; } break; @@ -2919,10 +2928,12 @@ static int __pp_compat_alloc(struct msmfb_mdp_pp32 __user *pp32, alloc_size); return -ENOMEM; } - memset(*pp, 0, alloc_size); - (*pp)->data.pcc_cfg_data.cfg_payload = - (void *)((unsigned long)(*pp) + - sizeof(struct msmfb_mdp_pp)); + if (clear_user(*pp, alloc_size)) + return -EFAULT; + if (put_user((void *)((unsigned long)(*pp) + + sizeof(struct msmfb_mdp_pp)), + &(*pp)->data.pcc_cfg_data.cfg_payload)) + return -EFAULT; break; case mdp_op_gamut_cfg: alloc_size += __pp_compat_size_gamut(); @@ -2932,10 +2943,12 @@ static int __pp_compat_alloc(struct msmfb_mdp_pp32 __user *pp32, alloc_size); return -ENOMEM; } - memset(*pp, 0, alloc_size); - (*pp)->data.gamut_cfg_data.cfg_payload = - (void *)((unsigned long)(*pp) + - sizeof(struct msmfb_mdp_pp)); + if (clear_user(*pp, alloc_size)) + return -EFAULT; + if (put_user((void *)((unsigned long)(*pp) + + sizeof(struct msmfb_mdp_pp)), + &(*pp)->data.gamut_cfg_data.cfg_payload)) + return -EFAULT; break; case mdp_op_pa_v2_cfg: alloc_size += __pp_compat_size_pa(); @@ -2945,16 +2958,19 @@ static int __pp_compat_alloc(struct msmfb_mdp_pp32 __user *pp32, alloc_size); return -ENOMEM; } - memset(*pp, 0, alloc_size); - (*pp)->data.pa_v2_cfg_data.cfg_payload = - (void *)((unsigned long)(*pp) + - sizeof(struct msmfb_mdp_pp)); + if (clear_user(*pp, alloc_size)) + return -EFAULT; + if (put_user((void *)((unsigned long)(*pp) + + sizeof(struct msmfb_mdp_pp)), + &(*pp)->data.pa_v2_cfg_data.cfg_payload)) + return -EFAULT; break; default: *pp = compat_alloc_user_space(alloc_size); if (NULL == *pp) return -ENOMEM; - memset(*pp, 0, alloc_size); + if (clear_user(*pp, alloc_size)) + return -EFAULT; break; } return 0; @@ -3372,7 +3388,9 @@ static int mdss_histo_compat_ioctl(struct fb_info *info, unsigned int cmd, sizeof(struct mdp_histogram_start_req)); return -EINVAL; } - memset(hist_req, 0, sizeof(struct mdp_histogram_start_req)); + if (clear_user(hist_req, + sizeof(struct mdp_histogram_start_req))) + return -EFAULT; ret = __from_user_hist_start_req(hist_req32, hist_req); if (ret) goto histo_compat_err; @@ -3392,7 +3410,8 @@ static int mdss_histo_compat_ioctl(struct fb_info *info, unsigned int cmd, sizeof(struct mdp_histogram_data)); return -EINVAL; } - memset(hist, 0, sizeof(struct mdp_histogram_data)); + if (clear_user(hist, sizeof(struct mdp_histogram_data))) + return -EFAULT; ret = __from_user_hist_data(hist32, hist); if (ret) goto histo_compat_err; @@ -3895,7 +3914,7 @@ static int __to_user_mdp_overlay(struct mdp_overlay32 __user *ov32, } -static int __from_user_mdp_overlay(struct mdp_overlay *ov, +static int __from_user_mdp_overlay(struct mdp_overlay __user *ov, struct mdp_overlay32 __user *ov32) { __u32 data; @@ -3954,12 +3973,12 @@ static int __from_user_mdp_overlay(struct mdp_overlay *ov, return 0; } -static int __from_user_mdp_overlaylist(struct mdp_overlay_list *ovlist, - struct mdp_overlay_list32 *ovlist32, +static int __from_user_mdp_overlaylist(struct mdp_overlay_list __user *ovlist, + struct mdp_overlay_list32 __user *ovlist32, struct mdp_overlay **to_list_head) { __u32 i, ret; - unsigned long data, from_list_head; + unsigned long data, from_list_head, num_overlays; struct mdp_overlay32 *iter; if (!to_list_head || !ovlist32 || !ovlist) { @@ -3980,11 +3999,13 @@ static int __from_user_mdp_overlaylist(struct mdp_overlay_list *ovlist, sizeof(ovlist32->processed_overlays))) return -EFAULT; - if (get_user(data, &ovlist32->overlay_list)) { + if (get_user(data, &ovlist32->overlay_list) || + get_user(num_overlays, &ovlist32->num_overlays)) { ret = -EFAULT; goto validate_exit; } - for (i = 0; i < ovlist32->num_overlays; i++) { + + for (i = 0; i < num_overlays; i++) { if (get_user(from_list_head, (__u32 *)data + i)) { ret = -EFAULT; goto validate_exit; @@ -3997,7 +4018,8 @@ static int __from_user_mdp_overlaylist(struct mdp_overlay_list *ovlist, goto validate_exit; } } - ovlist->overlay_list = to_list_head; + if (put_user(to_list_head, &ovlist->overlay_list)) + return -EFAULT; return 0; @@ -4006,8 +4028,8 @@ validate_exit: return -EFAULT; } -static int __to_user_mdp_overlaylist(struct mdp_overlay_list32 *ovlist32, - struct mdp_overlay_list *ovlist, +static int __to_user_mdp_overlaylist(struct mdp_overlay_list32 __user *ovlist32, + struct mdp_overlay_list __user *ovlist, struct mdp_overlay **l_ptr) { __u32 i, ret; @@ -4080,31 +4102,33 @@ static u32 __pp_sspp_size(void) return size; } -static int __pp_sspp_set_offsets(struct mdp_overlay *ov) +static int __pp_sspp_set_offsets(struct mdp_overlay __user *ov) { if (!ov) { pr_err("invalid overlay pointer\n"); return -EFAULT; } - ov->overlay_pp_cfg.igc_cfg.cfg_payload = (void *)((unsigned long)ov + - sizeof(struct mdp_overlay)); - ov->overlay_pp_cfg.pa_v2_cfg_data.cfg_payload = - ov->overlay_pp_cfg.igc_cfg.cfg_payload + - sizeof(struct mdp_igc_lut_data_v1_7); - ov->overlay_pp_cfg.pcc_cfg_data.cfg_payload = - ov->overlay_pp_cfg.pa_v2_cfg_data.cfg_payload + - sizeof(struct mdp_pa_data_v1_7); - ov->overlay_pp_cfg.hist_lut_cfg.cfg_payload = - ov->overlay_pp_cfg.pcc_cfg_data.cfg_payload + - sizeof(struct mdp_pcc_data_v1_7); + if (put_user((void *)((unsigned long)ov + sizeof(struct mdp_overlay)), + &(ov->overlay_pp_cfg.igc_cfg.cfg_payload)) || + put_user(ov->overlay_pp_cfg.igc_cfg.cfg_payload + + sizeof(struct mdp_igc_lut_data_v1_7), + &(ov->overlay_pp_cfg.pa_v2_cfg_data.cfg_payload)) || + put_user(ov->overlay_pp_cfg.pa_v2_cfg_data.cfg_payload + + sizeof(struct mdp_pa_data_v1_7), + &(ov->overlay_pp_cfg.pcc_cfg_data.cfg_payload)) || + put_user(ov->overlay_pp_cfg.pcc_cfg_data.cfg_payload + + sizeof(struct mdp_pcc_data_v1_7), + &(ov->overlay_pp_cfg.hist_lut_cfg.cfg_payload))) + return -EFAULT; return 0; } int mdss_compat_overlay_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg, struct file *file) { - struct mdp_overlay *ov, **layers_head; - struct mdp_overlay32 *ov32; + struct mdp_overlay **layers_head; + struct mdp_overlay __user *ov; + struct mdp_overlay32 __user *ov32; struct mdp_overlay_list __user *ovlist; struct mdp_overlay_list32 __user *ovlist32; size_t layers_refs_sz, layers_sz, prepare_sz; diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h index 54b792305eb5..43fc5eafb047 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.h +++ b/drivers/video/fbdev/msm/mdss_mdp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1762,7 +1762,7 @@ int mdss_mdp_ctl_start(struct mdss_mdp_ctl *ctl, bool handoff); int mdss_mdp_ctl_stop(struct mdss_mdp_ctl *ctl, int panel_power_mode); int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg, u32 flags); -int mdss_mdp_get_prefetch_lines(struct mdss_panel_info *pinfo); +int mdss_mdp_get_prefetch_lines(struct mdss_panel_info *pinfo, bool is_fixed); int mdss_mdp_perf_bw_check(struct mdss_mdp_ctl *ctl, struct mdss_mdp_pipe **left_plist, int left_cnt, struct mdss_mdp_pipe **right_plist, int right_cnt); diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c index 632d73e909a3..ec56bcf6e64e 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c +++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c @@ -1525,7 +1525,7 @@ static bool is_mdp_prefetch_needed(struct mdss_panel_info *pinfo) * the mdp fetch lines as the last (25 - vbp - vpw) lines of vertical * front porch. */ -int mdss_mdp_get_prefetch_lines(struct mdss_panel_info *pinfo) +int mdss_mdp_get_prefetch_lines(struct mdss_panel_info *pinfo, bool is_fixed) { int prefetch_avail = 0; int v_total, vfp_start; @@ -1534,7 +1534,11 @@ int mdss_mdp_get_prefetch_lines(struct mdss_panel_info *pinfo) if (!is_mdp_prefetch_needed(pinfo)) return 0; - v_total = mdss_panel_get_vtotal(pinfo); + if (is_fixed) + v_total = mdss_panel_get_vtotal_fixed(pinfo); + else + v_total = mdss_panel_get_vtotal(pinfo); + vfp_start = (pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width + pinfo->yres); diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c index 607c0647b505..e27f6bc49892 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c +++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c @@ -1120,6 +1120,7 @@ static void mdss_mdp_video_vsync_intr_done(void *arg) struct mdss_mdp_video_ctx *ctx = ctl->intf_ctx[MASTER_CTX]; struct mdss_mdp_vsync_handler *tmp; ktime_t vsync_time; + u32 ctl_flush_bits = 0; if (!ctx) { pr_err("invalid ctx\n"); @@ -1129,10 +1130,13 @@ static void mdss_mdp_video_vsync_intr_done(void *arg) vsync_time = ktime_get(); ctl->vsync_cnt++; - MDSS_XLOG(ctl->num, ctl->vsync_cnt, ctl->vsync_cnt); + ctl_flush_bits = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_FLUSH); - pr_debug("intr ctl=%d vsync cnt=%u vsync_time=%d\n", - ctl->num, ctl->vsync_cnt, (int)ktime_to_ms(vsync_time)); + MDSS_XLOG(ctl->num, ctl->vsync_cnt, ctl_flush_bits); + + pr_debug("intr ctl=%d vsync cnt=%u vsync_time=%d ctl_flush=%d\n", + ctl->num, ctl->vsync_cnt, (int)ktime_to_ms(vsync_time), + ctl_flush_bits); ctx->polling_en = false; complete_all(&ctx->vsync_comp); @@ -1901,6 +1905,7 @@ static void mdss_mdp_fetch_start_config(struct mdss_mdp_video_ctx *ctx, mdata = ctl->mdata; + pinfo->prg_fet = mdss_mdp_get_prefetch_lines(pinfo, true); if (!pinfo->prg_fet) { pr_debug("programmable fetch is not needed/supported\n"); @@ -1919,7 +1924,7 @@ static void mdss_mdp_fetch_start_config(struct mdss_mdp_video_ctx *ctx, * Fetch should always be outside the active lines. If the fetching * is programmed within active region, hardware behavior is unknown. */ - v_total = mdss_panel_get_vtotal(pinfo); + v_total = mdss_panel_get_vtotal_fixed(pinfo); h_total = mdss_panel_get_htotal(pinfo, true); fetch_start = (v_total - pinfo->prg_fet) * h_total + 1; @@ -2206,8 +2211,6 @@ static int mdss_mdp_video_ctx_setup(struct mdss_mdp_ctl *ctl, ctx->intf_num); return -EINVAL; } - - pinfo->prg_fet = mdss_mdp_get_prefetch_lines(pinfo); mdss_mdp_fetch_start_config(ctx, ctl); if (test_bit(MDSS_QOS_VBLANK_PANIC_CTRL, mdata->mdss_qos_map)) diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c index 2f5b45638cdb..34001ab35e55 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c +++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c @@ -3367,9 +3367,6 @@ static void cache_initial_timings(struct mdss_panel_data *pdata) { if (!pdata->panel_info.default_fps) { - pdata->panel_info.default_prg_fet = - mdss_mdp_get_prefetch_lines(&pdata->panel_info); - /* * This value will change dynamically once the * actual dfps update happen in hw. @@ -3442,13 +3439,8 @@ static void dfps_update_panel_params(struct mdss_panel_data *pdata, dfps_update_fps(&pdata->panel_info, new_fps); - /* - * Fetch start is pinned to default fps. - * Adjust programmable fetch accordingly. - */ pdata->panel_info.prg_fet = - (pdata->panel_info.default_prg_fet) ? - (pdata->panel_info.default_prg_fet + add_v_lines) : 0; + mdss_mdp_get_prefetch_lines(&pdata->panel_info, false); } else if (pdata->panel_info.dfps_update == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP) { @@ -5815,6 +5807,7 @@ static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd) int rc; struct mdss_overlay_private *mdp5_data; struct mdss_mdp_mixer *mixer; + struct mdss_mdp_pipe *pipe, *tmp; int need_cleanup; int retire_cnt; bool destroy_ctl = false; @@ -5870,6 +5863,13 @@ static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd) mixer->cursor_enabled = 0; mutex_lock(&mdp5_data->list_lock); + if (!list_empty(&mdp5_data->pipes_used)) { + list_for_each_entry_safe( + pipe, tmp, &mdp5_data->pipes_used, list) { + pipe->file = NULL; + list_move(&pipe->list, &mdp5_data->pipes_cleanup); + } + } need_cleanup = !list_empty(&mdp5_data->pipes_cleanup); mutex_unlock(&mdp5_data->list_lock); mutex_unlock(&mdp5_data->ov_lock); diff --git a/drivers/video/fbdev/msm/mdss_panel.c b/drivers/video/fbdev/msm/mdss_panel.c index 31cf74274131..ffe9b19c3859 100644 --- a/drivers/video/fbdev/msm/mdss_panel.c +++ b/drivers/video/fbdev/msm/mdss_panel.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2016, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -630,6 +630,7 @@ void mdss_panel_info_from_timing(struct mdss_panel_timing *pt, pinfo->yres = pt->yres; pinfo->lcdc.v_front_porch = pt->v_front_porch; + pinfo->lcdc.v_front_porch_fixed = pt->v_front_porch; pinfo->lcdc.v_back_porch = pt->v_back_porch; pinfo->lcdc.v_pulse_width = pt->v_pulse_width; diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h index 7085a9fd7200..acac672662c1 100644 --- a/drivers/video/fbdev/msm/mdss_panel.h +++ b/drivers/video/fbdev/msm/mdss_panel.h @@ -392,6 +392,7 @@ struct lcd_panel_info { u32 h_active_low; u32 v_back_porch; u32 v_front_porch; + u32 v_front_porch_fixed; u32 v_pulse_width; u32 v_active_low; u32 border_clr; @@ -814,8 +815,6 @@ struct mdss_panel_info { int new_fps; /* stores initial fps after boot */ u32 default_fps; - /* store programmable fetch corresponding to default fps */ - u32 default_prg_fet; /* stores initial vtotal (vfp-method) or htotal (hfp-method) */ u32 saved_total; /* stores initial vfp (vfp-method) or hfp (hfp-method) */ @@ -1063,6 +1062,23 @@ static inline u32 mdss_panel_get_framerate(struct mdss_panel_info *panel_info) } /* + * mdss_panel_get_vtotal_fixed() - return panel device tree vertical height + * @pinfo: Pointer to panel info containing all panel information + * + * Returns the total height as defined in panel device tree including any + * blanking regions which are not visible to user but used to calculate + * panel clock. + */ +static inline int mdss_panel_get_vtotal_fixed(struct mdss_panel_info *pinfo) +{ + return pinfo->yres + pinfo->lcdc.v_back_porch + + pinfo->lcdc.v_front_porch_fixed + + pinfo->lcdc.v_pulse_width+ + pinfo->lcdc.border_top + + pinfo->lcdc.border_bottom; +} + +/* * mdss_panel_get_vtotal() - return panel vertical height * @pinfo: Pointer to panel info containing all panel information * diff --git a/drivers/video/fbdev/msm/msm_dba/adv7533.c b/drivers/video/fbdev/msm/msm_dba/adv7533.c index 15fe77d05091..a21b6db85ed8 100644 --- a/drivers/video/fbdev/msm/msm_dba/adv7533.c +++ b/drivers/video/fbdev/msm/msm_dba/adv7533.c @@ -885,9 +885,13 @@ static void adv7533_handle_hdcp_intr(struct adv7533 *pdata, u8 hdcp_status) break; case 4: pr_err("%s: DDC: I2C ERROR\n", __func__); + adv7533_notify_clients(&pdata->dev_info, + MSM_DBA_CB_DDC_I2C_ERROR); break; case 5: pr_err("%s: DDC: TIMED OUT DS DONE\n", __func__); + adv7533_notify_clients(&pdata->dev_info, + MSM_DBA_CB_DDC_TIMEOUT); break; case 6: pr_err("%s: DDC: MAX CAS EXC\n", __func__); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7efd70bfeaf7..d106b981d86f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -923,7 +923,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags) if (bio_flags & EXTENT_BIO_TREE_LOG) return 0; #ifdef CONFIG_X86 - if (static_cpu_has_safe(X86_FEATURE_XMM4_2)) + if (static_cpu_has(X86_FEATURE_XMM4_2)) return 0; #endif return 1; diff --git a/fs/inode.c b/fs/inode.c index bd16497b3bba..48185da625ce 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1943,8 +1943,14 @@ void inode_init_owner(struct inode *inode, const struct inode *dir, inode->i_uid = current_fsuid(); if (dir && dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; + + /* Directories are special, and always inherit S_ISGID */ if (S_ISDIR(mode)) mode |= S_ISGID; + else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) && + !in_group_p(inode->i_gid) && + !capable_wrt_inode_uidgid(dir, CAP_FSETID)) + mode &= ~S_ISGID; } else inode->i_gid = current_fsgid(); inode->i_mode = mode; diff --git a/include/linux/ipa.h b/include/linux/ipa.h index a4b817c5e4fc..623d0f08cdf9 100644 --- a/include/linux/ipa.h +++ b/include/linux/ipa.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1226,11 +1226,13 @@ int ipa_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl); */ int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs); +int ipa_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only); + int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls); int ipa_commit_hdr(void); -int ipa_reset_hdr(void); +int ipa_reset_hdr(bool user_only); int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup); @@ -1241,7 +1243,8 @@ int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy); /* * Header Processing Context */ -int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs); +int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only); int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls); @@ -1250,11 +1253,13 @@ int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls); */ int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules); +int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only); + int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls); int ipa_commit_rt(enum ipa_ip_type ip); -int ipa_reset_rt(enum ipa_ip_type ip); +int ipa_reset_rt(enum ipa_ip_type ip, bool user_only); int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup); @@ -1269,13 +1274,15 @@ int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules); */ int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules); +int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only); + int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls); int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules); int ipa_commit_flt(enum ipa_ip_type ip); -int ipa_reset_flt(enum ipa_ip_type ip); +int ipa_reset_flt(enum ipa_ip_type ip, bool user_only); /* * NAT @@ -1648,6 +1655,12 @@ static inline int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs) return -EPERM; } +static inline int ipa_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, + bool user_only) +{ + return -EPERM; +} + static inline int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls) { return -EPERM; @@ -1658,7 +1671,7 @@ static inline int ipa_commit_hdr(void) return -EPERM; } -static inline int ipa_reset_hdr(void) +static inline int ipa_reset_hdr(bool user_only) { return -EPERM; } @@ -1682,7 +1695,8 @@ static inline int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy) * Header Processing Context */ static inline int ipa_add_hdr_proc_ctx( - struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs) + struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only) { return -EPERM; } @@ -1699,6 +1713,12 @@ static inline int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) return -EPERM; } +static inline int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, + bool user_only) +{ + return -EPERM; +} + static inline int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls) { return -EPERM; @@ -1709,7 +1729,7 @@ static inline int ipa_commit_rt(enum ipa_ip_type ip) return -EPERM; } -static inline int ipa_reset_rt(enum ipa_ip_type ip) +static inline int ipa_reset_rt(enum ipa_ip_type ip, bool user_only) { return -EPERM; } @@ -1742,6 +1762,12 @@ static inline int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) return -EPERM; } +static inline int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, + bool user_only) +{ + return -EPERM; +} + static inline int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls) { return -EPERM; @@ -1757,7 +1783,7 @@ static inline int ipa_commit_flt(enum ipa_ip_type ip) return -EPERM; } -static inline int ipa_reset_flt(enum ipa_ip_type ip) +static inline int ipa_reset_flt(enum ipa_ip_type ip, bool user_only) { return -EPERM; } diff --git a/include/linux/libata.h b/include/linux/libata.h index b20a2752f934..6428ac4746de 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -210,6 +210,7 @@ enum { ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ /* (doesn't imply presence) */ ATA_FLAG_SATA = (1 << 1), + ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */ ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */ ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ diff --git a/include/net/cnss2.h b/include/net/cnss2.h index f80d87533a99..53f73d436752 100644 --- a/include/net/cnss2.h +++ b/include/net/cnss2.h @@ -171,6 +171,9 @@ extern int cnss_get_fw_files_for_target(struct device *dev, u32 target_type, u32 target_version); extern int cnss_get_platform_cap(struct device *dev, struct cnss_platform_cap *cap); +extern struct dma_iommu_mapping *cnss_smmu_get_mapping(struct device *dev); +extern int cnss_smmu_map(struct device *dev, + phys_addr_t paddr, uint32_t *iova_addr, size_t size); extern int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info); extern int cnss_request_bus_bandwidth(struct device *dev, int bandwidth); extern int cnss_power_up(struct device *dev); diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h index 4fff429dc0b2..ae8834d3fe54 100644 --- a/include/soc/qcom/icnss.h +++ b/include/soc/qcom/icnss.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -23,7 +23,6 @@ #endif enum icnss_uevent { - ICNSS_UEVENT_FW_READY, ICNSS_UEVENT_FW_CRASHED, ICNSS_UEVENT_FW_DOWN, }; diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index 6a471c955d91..33090dd4489f 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -95,7 +95,9 @@ #define IPA_IOCTL_DEL_VLAN_IFACE 53 #define IPA_IOCTL_ADD_L2TP_VLAN_MAPPING 54 #define IPA_IOCTL_DEL_L2TP_VLAN_MAPPING 55 -#define IPA_IOCTL_MAX 56 +#define IPA_IOCTL_CLEANUP 56 +#define IPA_IOCTL_QUERY_WLAN_CLIENT 57 +#define IPA_IOCTL_MAX 58 /** * max size of the header to be inserted @@ -261,6 +263,11 @@ enum ipa_client_type { ((client) == IPA_CLIENT_APPS_LAN_CONS || \ (client) == IPA_CLIENT_APPS_WAN_CONS) +#define IPA_CLIENT_IS_APPS_PROD(client) \ + ((client) == IPA_CLIENT_APPS_LAN_PROD || \ + (client) == IPA_CLIENT_APPS_WAN_PROD || \ + (client) == IPA_CLIENT_APPS_CMD_PROD) + #define IPA_CLIENT_IS_USB_CONS(client) \ ((client) == IPA_CLIENT_USB_CONS || \ (client) == IPA_CLIENT_USB2_CONS || \ @@ -1907,6 +1914,10 @@ struct ipa_tether_device_info { #define IPA_IOC_DEL_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \ IPA_IOCTL_DEL_L2TP_VLAN_MAPPING, \ struct ipa_ioc_l2tp_vlan_mapping_info *) +#define IPA_IOC_CLEANUP _IO(IPA_IOC_MAGIC,\ + IPA_IOCTL_CLEANUP) +#define IPA_IOC_QUERY_WLAN_CLIENT _IO(IPA_IOC_MAGIC,\ + IPA_IOCTL_QUERY_WLAN_CLIENT) /* * unique magic number of the Tethering bridge ioctls */ diff --git a/include/uapi/media/msm_camera.h b/include/uapi/media/msm_camera.h index fd0937ffb1e5..81e350ede6eb 100644 --- a/include/uapi/media/msm_camera.h +++ b/include/uapi/media/msm_camera.h @@ -263,7 +263,7 @@ struct msm_mctl_post_proc_cmd { #define MSM_CAMERA_STROBE_FLASH_NONE 0 #define MSM_CAMERA_STROBE_FLASH_XENON 1 -#define MSM_MAX_CAMERA_SENSORS 5 +#define MSM_MAX_CAMERA_SENSORS 6 #define MAX_SENSOR_NAME 32 #define MAX_CAM_NAME_SIZE 32 #define MAX_ACT_MOD_NAME_SIZE 32 diff --git a/include/uapi/media/msm_camsensor_sdk.h b/include/uapi/media/msm_camsensor_sdk.h index ac454ca9a7fc..40731a927ce5 100644 --- a/include/uapi/media/msm_camsensor_sdk.h +++ b/include/uapi/media/msm_camsensor_sdk.h @@ -50,6 +50,8 @@ #define MSM_SENSOR_BYPASS_VIDEO_NODE 1 +#define FRONT_AUX_SENSOR_SUPPORT + enum msm_sensor_camera_id_t { CAMERA_0, CAMERA_1, @@ -70,6 +72,7 @@ enum camb_position_t { BACK_CAMERA_B, FRONT_CAMERA_B, AUX_CAMERA_B = 0x100, + FRONT_AUX_CAMERA_B, INVALID_CAMERA_B, }; diff --git a/include/uapi/media/msmb_camera.h b/include/uapi/media/msmb_camera.h index 47d5a998e139..0a2dd446ccc4 100644 --- a/include/uapi/media/msmb_camera.h +++ b/include/uapi/media/msmb_camera.h @@ -53,7 +53,7 @@ #define MSM_CAMERA_SUBDEV_EXT 19 #define MSM_CAMERA_SUBDEV_TOF 20 #define MSM_CAMERA_SUBDEV_LASER_LED 21 -#define MSM_MAX_CAMERA_SENSORS 5 +#define MSM_MAX_CAMERA_SENSORS 6 /* The below macro is defined to put an upper limit on maximum * number of buffer requested per stream. In case of extremely diff --git a/include/video/msm_dba.h b/include/video/msm_dba.h index 3d20fd8d65eb..8ce2138044c3 100644 --- a/include/video/msm_dba.h +++ b/include/video/msm_dba.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2015,2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -53,6 +53,9 @@ * @MSM_DBA_CB_POST_RESET: This callback is called after device reset is * complete and the driver has applied back all the * properties. + * @MSM_DBA_CB_DDC_I2C_ERROR: Detected a failure in DDC block for i2c error. + * @MSM_DBA_CB_DDC_TIMEOUT: Detected a failure in DDC block for timed out + * waiting for downstream receiver. * * Clients for this driver can register for receiving callbacks for specific * events. This enum defines the type of events supported by the driver. An @@ -71,6 +74,8 @@ enum msm_dba_callback_event { MSM_DBA_CB_CEC_READ_PENDING = BIT(9), MSM_DBA_CB_PRE_RESET = BIT(10), MSM_DBA_CB_POST_RESET = BIT(11), + MSM_DBA_CB_DDC_I2C_ERROR = BIT(12), + MSM_DBA_CB_DDC_TIMEOUT = BIT(13), }; /** diff --git a/kernel/power/user.c b/kernel/power/user.c index 35310b627388..bc6dde1f1567 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -186,6 +186,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf, res = PAGE_SIZE - pg_offp; } + if (!data_of(data->handle)) { + res = -EINVAL; + goto unlock; + } + res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp, buf, count); if (res > 0) diff --git a/kernel/power/wakeup_reason.c b/kernel/power/wakeup_reason.c index 252611fad2fe..54697e28ba38 100644 --- a/kernel/power/wakeup_reason.c +++ b/kernel/power/wakeup_reason.c @@ -3,8 +3,14 @@ * * Logs the reasons which caused the kernel to resume from * the suspend mode. + * Sends uevent to user space when enter or out of suspend, + * the modules of user space can use it to do some necessary + * operation. for example, sending a special signal to modem + * or controling the brightness of a lamp before or after suspend. * * Copyright (C) 2014 Google, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. @@ -26,7 +32,8 @@ #include <linux/spinlock.h> #include <linux/notifier.h> #include <linux/suspend.h> - +#include <linux/kobject.h> +#include <linux/suspend.h> #define MAX_WAKEUP_REASON_IRQS 32 static int irq_list[MAX_WAKEUP_REASON_IRQS]; @@ -41,6 +48,9 @@ static ktime_t curr_monotime; /* monotonic time after last suspend */ static ktime_t last_stime; /* monotonic boottime offset before last suspend */ static ktime_t curr_stime; /* monotonic boottime offset after last suspend */ +static struct class *wake_uevent_class; +static struct device *wake_uevent_device; + static ssize_t last_resume_reason_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -168,12 +178,22 @@ void log_suspend_abort_reason(const char *fmt, ...) static int wakeup_reason_pm_event(struct notifier_block *notifier, unsigned long pm_event, void *unused) { + int ret = 0; + static char envp[32] = {0}; + static const char * const evp[] = {envp, NULL}; + switch (pm_event) { case PM_SUSPEND_PREPARE: spin_lock(&resume_reason_lock); irqcount = 0; suspend_abort = false; spin_unlock(&resume_reason_lock); + /* send the uevent to userspace */ + snprintf(envp, 32, "STATE=%s", "suspend start"); + ret = kobject_uevent_env(&wake_uevent_device->kobj, + KOBJ_CHANGE, (char **)evp); + if (ret) + pr_warn("Send uevent failed"); /* monotonic time since boot */ last_monotime = ktime_get(); /* monotonic time since boot including the time spent in suspend */ @@ -184,6 +204,12 @@ static int wakeup_reason_pm_event(struct notifier_block *notifier, curr_monotime = ktime_get(); /* monotonic time since boot including the time spent in suspend */ curr_stime = ktime_get_boottime(); + /* send the uevent to userspace */ + snprintf(envp, 32, "STATE=%s", "resume complete"); + ret = kobject_uevent_env(&wake_uevent_device->kobj, + KOBJ_CHANGE, (char **)evp); + if (ret) + pr_warn("Send uevent failed"); break; default: break; @@ -195,12 +221,18 @@ static struct notifier_block wakeup_reason_pm_notifier_block = { .notifier_call = wakeup_reason_pm_event, }; +static const struct file_operations wakeup_uevent = { + .owner = THIS_MODULE, +}; + /* Initializes the sysfs parameter * registers the pm_event notifier + * register the wake_uevent device */ int __init wakeup_reason_init(void) { int retval; + int major; retval = register_pm_notifier(&wakeup_reason_pm_notifier_block); if (retval) @@ -218,8 +250,36 @@ int __init wakeup_reason_init(void) kobject_put(wakeup_reason); printk(KERN_WARNING "[%s] failed to create a sysfs group %d\n", __func__, retval); + return retval; + } + major = register_chrdev(0, "wake_uevent", &wakeup_uevent); + if (major < 0) { + sysfs_remove_group(wakeup_reason, &attr_group); + kobject_put(wakeup_reason); + return major; + } + wake_uevent_class = class_create(THIS_MODULE, "wake_uevent"); + if (IS_ERR(wake_uevent_class)) { + retval = PTR_ERR(wake_uevent_class); + goto fail_class; } + wake_uevent_device = device_create(wake_uevent_class, NULL, + MKDEV(major, 0), + NULL, "wake_uevent"); + if (IS_ERR(wake_uevent_device)) { + retval = PTR_ERR(wake_uevent_device); + goto fail_device; + } + return 0; + +fail_device: + class_destroy(wake_uevent_class); +fail_class: + unregister_chrdev(major, "wake_uevent"); + sysfs_remove_group(wakeup_reason, &attr_group); + kobject_put(wakeup_reason); + return retval; } late_initcall(wakeup_reason_init); diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c index 83c33a5bcffb..de67fea3cf46 100644 --- a/lib/atomic64_test.c +++ b/lib/atomic64_test.c @@ -16,6 +16,10 @@ #include <linux/kernel.h> #include <linux/atomic.h> +#ifdef CONFIG_X86 +#include <asm/cpufeature.h> /* for boot_cpu_has below */ +#endif + #define TEST(bit, op, c_op, val) \ do { \ atomic##bit##_set(&v, v0); \ diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index f64de569175a..a295b5ec9d4a 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_session *session) del_timer(&session->timer); } -static void hidp_process_report(struct hidp_session *session, - int type, const u8 *data, int len, int intr) +static void hidp_process_report(struct hidp_session *session, int type, + const u8 *data, unsigned int len, int intr) { if (len > HID_MAX_BUFFER_SIZE) len = HID_MAX_BUFFER_SIZE; diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 9f70c267a7a5..665fd87cc105 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -701,6 +701,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net, } i = 0; + memset(&mtpar, 0, sizeof(mtpar)); + memset(&tgpar, 0, sizeof(tgpar)); mtpar.net = tgpar.net = net; mtpar.table = tgpar.table = name; mtpar.entryinfo = tgpar.entryinfo = e; diff --git a/net/ipc_router/ipc_router_socket.c b/net/ipc_router/ipc_router_socket.c index 23e4443fc1b2..7c82f7ff8874 100644 --- a/net/ipc_router/ipc_router_socket.c +++ b/net/ipc_router/ipc_router_socket.c @@ -143,6 +143,7 @@ static int msm_ipc_router_extract_msg(struct msghdr *m, return -EINVAL; } ctl_msg = (union rr_control_msg *)(temp->data); + memset(addr, 0x0, sizeof(*addr)); addr->family = AF_MSM_IPC; addr->address.addrtype = MSM_IPC_ADDR_ID; addr->address.addr.port_addr.node_id = ctl_msg->cli.node_id; @@ -151,6 +152,7 @@ static int msm_ipc_router_extract_msg(struct msghdr *m, return offset; } if (addr && (hdr->type == IPC_ROUTER_CTRL_CMD_DATA)) { + memset(addr, 0x0, sizeof(*addr)); addr->family = AF_MSM_IPC; addr->address.addrtype = MSM_IPC_ADDR_ID; addr->address.addr.port_addr.node_id = hdr->src_node_id; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index dac62b5e7fe3..9363c1a70f16 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -663,6 +663,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, return -ENOMEM; j = 0; + memset(&mtpar, 0, sizeof(mtpar)); mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ip; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index f424dad89054..7f6fabdc7ad2 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -680,6 +680,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, return -ENOMEM; j = 0; + memset(&mtpar, 0, sizeof(mtpar)); mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ipv6; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 7edcfda288c4..54cde78c2718 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -1106,6 +1106,9 @@ nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, + [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 }, + [NFQA_CFG_MASK] = { .type = NLA_U32 }, + [NFQA_CFG_FLAGS] = { .type = NLA_U32 }, }; static const struct nf_queue_handler nfqh = { diff --git a/tools/build/Build.include b/tools/build/Build.include index 4d000bc959b4..1c570528baf7 100644 --- a/tools/build/Build.include +++ b/tools/build/Build.include @@ -62,8 +62,8 @@ dep-cmd = $(if $(wildcard $(fixdep)), $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp; \ rm -f $(depfile); \ mv -f $(dot-target).tmp $(dot-target).cmd, \ - printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \ - printf '\# using basic dep data\n\n' >> $(dot-target).cmd; \ + printf '$(pound) cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \ + printf '$(pound) using basic dep data\n\n' >> $(dot-target).cmd; \ cat $(depfile) >> $(dot-target).cmd; \ printf '%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd) |
