diff options
776 files changed, 16771 insertions, 5548 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 index 6708c5e264aa..33e96f740639 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 +++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 @@ -1,4 +1,4 @@ -What /sys/bus/iio/devices/iio:deviceX/in_proximity_raw +What /sys/bus/iio/devices/iio:deviceX/in_proximity_input Date: March 2014 KernelVersion: 3.15 Contact: Matt Ranostay <mranostay@gmail.com> diff --git a/Documentation/ABI/testing/sysfs-class-stm b/Documentation/ABI/testing/sysfs-class-stm index c9aa4f3fc9a7..77ed3da0f68e 100644 --- a/Documentation/ABI/testing/sysfs-class-stm +++ b/Documentation/ABI/testing/sysfs-class-stm @@ -12,3 +12,13 @@ KernelVersion: 4.3 Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com> Description: Shows the number of channels per master on this STM device. + +What: /sys/class/stm/<stm>/hw_override +Date: March 2016 +KernelVersion: 4.7 +Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com> +Description: + Reads as 0 if master numbers in the STP stream produced by + this stm device will match the master numbers assigned by + the software or 1 if the stm hardware overrides software + assigned masters. diff --git a/Documentation/device-mapper/boot.txt b/Documentation/device-mapper/boot.txt new file mode 100644 index 000000000000..adcaad5e5e32 --- /dev/null +++ b/Documentation/device-mapper/boot.txt @@ -0,0 +1,42 @@ +Boot time creation of mapped devices +=================================== + +It is possible to configure a device mapper device to act as the root +device for your system in two ways. + +The first is to build an initial ramdisk which boots to a minimal +userspace which configures the device, then pivot_root(8) in to it. + +For simple device mapper configurations, it is possible to boot directly +using the following kernel command line: + +dm="<name> <uuid> <ro>,table line 1,...,table line n" + +name = the name to associate with the device + after boot, udev, if used, will use that name to label + the device node. +uuid = may be 'none' or the UUID desired for the device. +ro = may be "ro" or "rw". If "ro", the device and device table will be + marked read-only. + +Each table line may be as normal when using the dmsetup tool except for +two variations: +1. Any use of commas will be interpreted as a newline +2. Quotation marks cannot be escaped and cannot be used without + terminating the dm= argument. + +Unless renamed by udev, the device node created will be dm-0 as the +first minor number for the device-mapper is used during early creation. + +Example +======= + +- Booting to a linear array made up of user-mode linux block devices: + + dm="lroot none 0, 0 4096 linear 98:16 0, 4096 4096 linear 98:32 0" \ + root=/dev/dm-0 + +Will boot to a rw dm-linear target of 8192 sectors split across two +block devices identified by their major:minor numbers. After boot, udev +will rename this target to /dev/mapper/lroot (depending on the rules). +No uuid was assigned. diff --git a/Documentation/devicetree/bindings/arm/cache.txt b/Documentation/devicetree/bindings/arm/cache.txt new file mode 100644 index 000000000000..a9594f026506 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/cache.txt @@ -0,0 +1,195 @@ +========================================== +ARM processors cache binding description +========================================== + +Device tree bindings for ARM processor caches adhere to the cache bindings +described in [3], in section 3.8 for multi-level and shared caches. +On ARM based systems most of the cache properties related to cache +geometry are probeable in HW, hence, unless otherwise stated, the properties +defined in ePAPR for multi-level and shared caches are to be considered +optional by default. + +On ARM, caches are either architected (directly controlled by the processor +through coprocessor instructions and tightly coupled with the processor +implementation) or unarchitected (controlled through a memory mapped +interface, implemented as a stand-alone IP external to the processor +implementation). + +This document provides the device tree bindings for ARM architected caches. + +- ARM architected cache node + + Description: must be a direct child of the cpu node. A system + can contain multiple architected cache nodes per cpu node, + linked through the next-level-cache phandle. The + next-level-cache property in the cpu node points to + the first level of architected cache for the CPU. + The next-level-cache property in architected cache nodes + points to the respective next level of caching in the + hierarchy. An architected cache node with an empty or + missing next-level-cache property represents the last + architected cache level for the CPU. + On ARM v7 and v8 architectures, the order in which cache + nodes are linked through the next-level-cache phandle must + follow the ordering specified in the processors CLIDR (v7) + and CLIDR_EL1 (v8) registers, as described in [1][2], + implying that a cache node pointed at by a + next-level-cache phandle must correspond to a level + defined in CLIDR (v7) and CLIDR_EL1 (v8) greater than the + one the cache node containing the next-level-cache + phandle corresponds to. + + Since on ARM most of the cache properties are probeable in HW the + properties described in [3] - section 3.8 multi-level and shared + caches - shall be considered optional, with the following properties + updates, specific for the ARM architected cache node. + + - compatible + Usage: Required + Value type: <string> + Definition: value shall be "arm,arch-cache". + + - interrupts + Usage: Optional + Value type: See definition + Definition: standard device tree property [3] that defines + the interrupt line associated with the cache. + The property can be accompanied by an + interrupt-names property, as described in [4]. + + - power-domain + Usage: Optional + Value type: phandle + Definition: A phandle and power domain specifier as defined by + bindings of power controller specified by the + phandle [5]. + + - qcom,dump-size + Usage: Optional + Value type: <integer> + Definition: The memory size needed to contain a copy of the + cache data and associated tag ram. + size = nways * nsets * (bytes per cache line + + bytes tag ram per line) + +Example(dual-cluster big.LITTLE system 32-bit) + + cpus { + #size-cells = <0>; + #address-cells = <1>; + + cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <0x0>; + next-level-cache = <&L1_0>; + + L1_0: l1-cache { + compatible = "arm,arch-cache"; + next-level-cache = <&L2_0>; + }; + + L2_0: l2-cache { + compatible = "arm,arch-cache"; + }; + }; + + cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <0x1>; + next-level-cache = <&L1_1>; + + L1_1: l1-cache { + compatible = "arm,arch-cache"; + next-level-cache = <&L2_0>; + }; + }; + + cpu@2 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <0x2>; + next-level-cache = <&L1_2>; + + L1_2: l1-cache { + compatible = "arm,arch-cache"; + next-level-cache = <&L2_0>; + }; + }; + + cpu@3 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <0x3>; + next-level-cache = <&L1_3>; + + L1_3: l1-cache { + compatible = "arm,arch-cache"; + next-level-cache = <&L2_0>; + }; + }; + + cpu@100 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x100>; + next-level-cache = <&L1_4>; + + L1_4: l1-cache { + compatible = "arm,arch-cache"; + next-level-cache = <&L2_1>; + }; + + L2_1: l2-cache { + compatible = "arm,arch-cache"; + }; + }; + + cpu@101 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x101>; + next-level-cache = <&L1_5>; + + L1_5: l1-cache { + compatible = "arm,arch-cache"; + next-level-cache = <&L2_1>; + }; + }; + + cpu@102 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x102>; + next-level-cache = <&L1_6>; + + L1_6: l1-cache { + compatible = "arm,arch-cache"; + next-level-cache = <&L2_1>; + }; + }; + + cpu@103 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x103>; + next-level-cache = <&L1_7>; + + L1_7: l1-cache { + compatible = "arm,arch-cache"; + next-level-cache = <&L2_1>; + }; + }; + }; + +[1] ARMv7-AR Reference Manual + http://infocenter.arm.com/help/index.jsp +[2] ARMv8-A Reference Manual + http://infocenter.arm.com/help/index.jsp +[3] ePAPR standard + https://www.power.org/documentation/epapr-version-1-1/ +[4] Kernel documentation - resource property bindings + Documentation/devicetree/bindings/resource-names.txt +[5] Kernel documentation - power domain bindings + Documentation/devicetree/bindings/power/power_domain.txt diff --git a/Documentation/devicetree/bindings/clock/imx35-clock.txt b/Documentation/devicetree/bindings/clock/imx35-clock.txt index a70356452a82..f49783213c56 100644 --- a/Documentation/devicetree/bindings/clock/imx35-clock.txt +++ b/Documentation/devicetree/bindings/clock/imx35-clock.txt @@ -94,6 +94,7 @@ clocks and IDs. csi_sel 79 iim_gate 80 gpu2d_gate 81 + ckli_gate 82 Examples: diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt index 1ca2b6dd6d5c..ebbcfe5b2fd0 100644 --- a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt +++ b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt @@ -33,7 +33,15 @@ Optional properties for WLED: - 31100, 29600, 19600, 18100 for pmicobalt/pm2falcon. Should only be used if qcom,disp-type-amoled is not specified. -- qcom,ilim-ma : maximum current limiter in ma. default is 980. +- qcom,ilim-ma : Current limit threshold in mA. + For pmi8994/8952/8996, default value for LCD is 980mA + and AMOLED is 385mA. + Supported values are: + - 105, 385, 660, 980, 1150, 1420, 1700, 1980. + For pmicobalt/pm2falcon, default value for LCD is + 970mA and AMOLED is 620mA. + Supported values are: + - 105, 280, 450, 620, 970, 1150, 1300, 1500. - qcom,boost-duty-ns : maximum boost duty cycle in ns. default is 104. - qcom,mod-freq-khz : modulation frequency in khz. default is 9600. - qcom,dim-mode : dimming mode. supporting dimming modes are "analog", @@ -61,10 +69,13 @@ Optional properties if 'qcom,disp-type-amoled' is mentioned in DT: - qcom,loop-ea-gm : control the gm for gm stage in control loop. default is 3. - qcom,loop-comp-res-kohm : control to select the compensation resistor in kohm. default is 320. - qcom,vref-psm-mv : reference psm voltage in mv. default for amoled is 450. -- qcom,avdd-target-voltage-mv: The target voltage desired for the AVDD module in mV. - The supported values are: - 7900, 7600, 7300, 6400, 6100, 5800. - If not specified, default value used is 7600. +- qcom,avdd-mode-spmi: Boolean property to enable AMOLED_VOUT programming via SPMI. If not specified, + AMOLED_VOUT is programmed via S-wire. This can be specified only for newer + PMICs like pmicobalt/pm2falcon. +- qcom,avdd-target-voltage-mv: The voltage required for AMOLED_VOUT. Accepted values are in the range + of 5650 to 7900 in steps of 150. Default value is 7600. Unit is in mV. + For old revisions, accepted values are: 7900, 7600, 7300, 6400, 6100, + 5800. Example: qcom,leds@d800 { diff --git a/Documentation/devicetree/bindings/power/reset/reboot-mode.txt b/Documentation/devicetree/bindings/power/reset/reboot-mode.txt new file mode 100644 index 000000000000..de34f27d509e --- /dev/null +++ b/Documentation/devicetree/bindings/power/reset/reboot-mode.txt @@ -0,0 +1,25 @@ +Generic reboot mode core map driver + +This driver get reboot mode arguments and call the write +interface to store the magic value in special register +or ram. Then the bootloader can read it and take different +action according to the argument stored. + +All mode properties are vendor specific, it is a indication to tell +the bootloader what to do when the system reboots, and should be named +as mode-xxx = <magic> (xxx is mode name, magic should be a none-zero value). + +For example modes common on Android platform: +- mode-normal: Normal reboot mode, system reboot with command "reboot". +- mode-recovery: Android Recovery mode, it is a mode to format the device or update a new image. +- mode-bootloader: Android fastboot mode, it's a mode to re-flash partitions on the Android based device. +- mode-loader: A bootloader mode, it's a mode used to download image on Rockchip platform, + usually used in development. + +Example: + reboot-mode { + mode-normal = <BOOT_NORMAL>; + mode-recovery = <BOOT_RECOVERY>; + mode-bootloader = <BOOT_FASTBOOT>; + mode-loader = <BOOT_BL_DOWNLOAD>; + } diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.txt b/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.txt new file mode 100644 index 000000000000..f7ce1d8af04a --- /dev/null +++ b/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.txt @@ -0,0 +1,35 @@ +SYSCON reboot mode driver + +This driver gets reboot mode magic value form reboot-mode driver +and stores it in a SYSCON mapped register. Then the bootloader +can read it and take different action according to the magic +value stored. + +This DT node should be represented as a sub-node of a "syscon", "simple-mfd" +node. + +Required properties: +- compatible: should be "syscon-reboot-mode" +- offset: offset in the register map for the storage register (in bytes) + +Optional property: +- mask: bits mask of the bits in the register to store the reboot mode magic value, + default set to 0xffffffff if missing. + +The rest of the properties should follow the generic reboot-mode description +found in reboot-mode.txt + +Example: + pmu: pmu@20004000 { + compatible = "rockchip,rk3066-pmu", "syscon", "simple-mfd"; + reg = <0x20004000 0x100>; + + reboot-mode { + compatible = "syscon-reboot-mode"; + offset = <0x40>; + mode-normal = <BOOT_NORMAL>; + mode-recovery = <BOOT_RECOVERY>; + mode-bootloader = <BOOT_FASTBOOT>; + mode-loader = <BOOT_BL_DOWNLOAD>; + }; + }; diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index d00ead0b3012..04c02786f6bd 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -43,6 +43,7 @@ Table of Contents 3.7 /proc/<pid>/task/<tid>/children - Information about task children 3.8 /proc/<pid>/fdinfo/<fd> - Information about opened file 3.9 /proc/<pid>/map_files - Information about memory mapped files + 3.10 /proc/<pid>/timerslack_ns - Task timerslack value 4 Configuring procfs 4.1 Mount options @@ -1876,6 +1877,23 @@ time one can open(2) mappings from the listings of two processes and comparing their inode numbers to figure out which anonymous memory areas are actually shared. +3.10 /proc/<pid>/timerslack_ns - Task timerslack value +--------------------------------------------------------- +This file provides the value of the task's timerslack value in nanoseconds. +This value specifies a amount of time that normal timers may be deferred +in order to coalesce timers and avoid unnecessary wakeups. + +This allows a task's interactivity vs power consumption trade off to be +adjusted. + +Writing 0 to the file will set the tasks timerslack to the default value. + +Valid values are from 0 - ULLONG_MAX + +An application setting the value must have PTRACE_MODE_ATTACH_FSCREDS level +permissions on the task specified to change its timerslack_ns value. + + ------------------------------------------------------------------------------ Configuring procfs ------------------------------------------------------------------------------ diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 024bdaf9af46..276d3f68e08d 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -56,6 +56,7 @@ parameter is applicable: BLACKFIN Blackfin architecture is enabled. CLK Common clock infrastructure is enabled. CMA Contiguous Memory Area support is enabled. + DM Device mapper support is enabled. DRM Direct Rendering Management support is enabled. DYNAMIC_DEBUG Build in debug messages and enable them at runtime EDD BIOS Enhanced Disk Drive Services (EDD) is enabled @@ -919,6 +920,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted. dis_ucode_ldr [X86] Disable the microcode loader. + dm= [DM] Allows early creation of a device-mapper device. + See Documentation/device-mapper/boot.txt. + dma_debug=off If the kernel is compiled with DMA_API_DEBUG support, this option disables the debugging code at boot. diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt index 8638f61c8c9d..37eca00796ee 100644 --- a/Documentation/scsi/scsi_eh.txt +++ b/Documentation/scsi/scsi_eh.txt @@ -263,19 +263,23 @@ scmd->allowed. 3. scmd recovered ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd - - shost->host_failed-- - clear scmd->eh_eflags - scsi_setup_cmd_retry() - move from local eh_work_q to local eh_done_q LOCKING: none + CONCURRENCY: at most one thread per separate eh_work_q to + keep queue manipulation lockless 4. EH completes ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper - layer of failure. + layer of failure. May be called concurrently but must have + a no more than one thread per separate eh_work_q to + manipulate the queue locklessly - scmd is removed from eh_done_q and scmd->eh_entry is cleared - if retry is necessary, scmd is requeued using scsi_queue_insert() - otherwise, scsi_finish_command() is invoked for scmd + - zero shost->host_failed LOCKING: queue or finish function performs appropriate locking diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt index bc3842dc323a..e2dea3dc4307 100644 --- a/Documentation/serial/tty.txt +++ b/Documentation/serial/tty.txt @@ -213,9 +213,6 @@ TTY_IO_ERROR If set, causes all subsequent userspace read/write TTY_OTHER_CLOSED Device is a pty and the other side has closed. -TTY_OTHER_DONE Device is a pty and the other side has closed and - all pending input processing has been completed. - TTY_NO_WRITE_SPLIT Prevent driver from splitting up writes into smaller chunks. diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt index 88152f214f48..302b5ed616a6 100644 --- a/Documentation/sysctl/fs.txt +++ b/Documentation/sysctl/fs.txt @@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/fs: - nr_open - overflowuid - overflowgid +- pipe-user-pages-hard +- pipe-user-pages-soft - protected_hardlinks - protected_symlinks - suid_dumpable @@ -159,6 +161,27 @@ The default is 65534. ============================================================== +pipe-user-pages-hard: + +Maximum total number of pages a non-privileged user may allocate for pipes. +Once this limit is reached, no new pipes may be allocated until usage goes +below the limit again. When set to 0, no limit is applied, which is the default +setting. + +============================================================== + +pipe-user-pages-soft: + +Maximum total number of pages a non-privileged user may allocate for pipes +before the pipe size gets limited to a single page. Once this limit is reached, +new pipes will be limited to a single page in size for this user in order to +limit total memory usage, and trying to increase them using fcntl() will be +denied until usage goes below the limit again. The default value allows to +allocate up to 1024 pipes at their default size. When set to 0, no limit is +applied. + +============================================================== + protected_hardlinks: A long-standing class of security issues is the hardlink-based diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 03e6aafd5b94..2c5137a6fef6 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -60,6 +60,7 @@ show up in /proc/sys/kernel: - panic_on_stackoverflow - panic_on_unrecovered_nmi - panic_on_warn +- perf_cpu_time_max_percent - perf_event_paranoid - pid_max - powersave-nap [ PPC only ] diff --git a/MAINTAINERS b/MAINTAINERS index ab65bbecb159..7875f7b71546 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1007,6 +1007,10 @@ F: drivers/hwtracing/coresight/* F: Documentation/trace/coresight.txt F: Documentation/devicetree/bindings/arm/coresight.txt F: Documentation/ABI/testing/sysfs-bus-coresight-devices-* +F: tools/perf/arch/arm/util/pmu.c +F: tools/perf/arch/arm/util/auxtrace.c +F: tools/perf/arch/arm/util/cs_etm.c +F: tools/perf/arch/arm/util/cs_etm.h ARM/CORGI MACHINE SUPPORT M: Richard Purdie <rpurdie@rpsys.net> @@ -9356,6 +9360,7 @@ F: drivers/mmc/host/dw_mmc* SYSTEM TRACE MODULE CLASS M: Alexander Shishkin <alexander.shishkin@linux.intel.com> S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ash/stm.git F: Documentation/trace/stm.txt F: drivers/hwtracing/stm/ F: include/linux/stm.h @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 11 +SUBLEVEL = 16 EXTRAVERSION = NAME = Blurry Fish Butt @@ -368,7 +368,7 @@ AFLAGS_MODULE = LDFLAGS_MODULE = CFLAGS_KERNEL = AFLAGS_KERNEL = -CFLAGS_GCOV = -fprofile-arcs -ftest-coverage +CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im # Use USERINCLUDE when you must reference the UAPI directories only. @@ -686,9 +686,10 @@ KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,) KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior) else -# This warning generated too much noise in a regular build. -# Use make W=1 to enable this warning (see scripts/Makefile.build) +# These warnings generated too much noise in a regular build. +# Use make W=1 to enable them (see scripts/Makefile.build) KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) +KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) endif ifdef CONFIG_FRAME_POINTER diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg index fa53af0c37ad..6496bb3961a2 100644 --- a/android/configs/android-base.cfg +++ b/android/configs/android-base.cfg @@ -37,7 +37,6 @@ CONFIG_IP6_NF_IPTABLES=y CONFIG_IP6_NF_MANGLE=y CONFIG_IP6_NF_RAW=y CONFIG_IP6_NF_TARGET_REJECT=y -CONFIG_IP6_NF_TARGET_REJECT_SKERR=y CONFIG_IPV6=y CONFIG_IPV6_MIP6=y CONFIG_IPV6_MULTIPLE_TABLES=y @@ -57,13 +56,13 @@ CONFIG_IP_NF_MANGLE=y CONFIG_IP_NF_MATCH_AH=y CONFIG_IP_NF_MATCH_ECN=y CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_NAT=y CONFIG_IP_NF_RAW=y CONFIG_IP_NF_SECURITY=y CONFIG_IP_NF_TARGET_MASQUERADE=y CONFIG_IP_NF_TARGET_NETMAP=y CONFIG_IP_NF_TARGET_REDIRECT=y CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_TARGET_REJECT_SKERR=y CONFIG_NET=y CONFIG_NETDEVICES=y CONFIG_NETFILTER=y @@ -138,18 +137,20 @@ CONFIG_PPP_BSDCOMP=y CONFIG_PPP_DEFLATE=y CONFIG_PPP_MPPE=y CONFIG_PREEMPT=y -CONFIG_RESOURCE_COUNTERS=y +CONFIG_PROFILING=y +CONFIG_QUOTA=y CONFIG_RTC_CLASS=y CONFIG_RT_GROUP_SCHED=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY_SELINUX=y CONFIG_SETEND_EMULATION=y CONFIG_STAGING=y -CONFIG_SWITCH=y CONFIG_SWP_EMULATION=y CONFIG_SYNC=y CONFIG_TUN=y +CONFIG_UID_CPUTIME=y CONFIG_UNIX=y CONFIG_USB_GADGET=y CONFIG_USB_CONFIGFS=y diff --git a/android/configs/android-recommended.cfg b/android/configs/android-recommended.cfg index e4c8aaade197..c3222a77ba24 100644 --- a/android/configs/android-recommended.cfg +++ b/android/configs/android-recommended.cfg @@ -110,7 +110,6 @@ CONFIG_TABLET_USB_AIPTEK=y CONFIG_TABLET_USB_GTCO=y CONFIG_TABLET_USB_HANWANG=y CONFIG_TABLET_USB_KBTAB=y -CONFIG_TABLET_USB_WACOM=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_IO_ACCOUNTING=y @@ -119,7 +118,6 @@ CONFIG_TIMER_STATS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_UHID=y -CONFIG_UID_STAT=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_HIDDEV=y diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 6312f607932f..2d785f5a3041 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -387,7 +387,7 @@ config ARC_HAS_LLSC config ARC_STAR_9000923308 bool "Workaround for llock/scond livelock" - default y + default n depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC config ARC_HAS_SWAPE diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index e1b87444ea9a..05131805aa33 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -332,10 +332,6 @@ static void arc_chk_core_config(void) pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n"); else if (!cpu->extn.fpu_dp && fpu_enabled) panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); - - if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic && - !IS_ENABLED(CONFIG_ARC_STAR_9000923308)) - panic("llock/scond livelock workaround missing\n"); } /* diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi index 85d2c377c332..22f7a13e20b4 100644 --- a/arch/arm/boot/dts/armada-385-linksys.dtsi +++ b/arch/arm/boot/dts/armada-385-linksys.dtsi @@ -58,8 +58,8 @@ soc { ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000 MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000 - MBUS_ID(0x09, 0x09) 0 0xf1100000 0x10000 - MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>; + MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000 + MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>; internal-regs { @@ -245,7 +245,7 @@ button@2 { label = "Factory Reset Button"; linux,code = <KEY_RESTART>; - gpios = <&gpio1 15 GPIO_ACTIVE_LOW>; + gpios = <&gpio0 29 GPIO_ACTIVE_LOW>; }; }; @@ -260,7 +260,7 @@ }; sata { - gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>; + gpios = <&gpio1 22 GPIO_ACTIVE_LOW>; default-state = "off"; }; }; @@ -313,7 +313,7 @@ &pinctrl { keys_pin: keys-pin { - marvell,pins = "mpp24", "mpp47"; + marvell,pins = "mpp24", "mpp29"; marvell,function = "gpio"; }; diff --git a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts index b89e6cf1271a..7a461541ce50 100644 --- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts +++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts @@ -304,13 +304,13 @@ button@1 { label = "WPS"; linux,code = <KEY_WPS_BUTTON>; - gpios = <&gpio1 0 GPIO_ACTIVE_HIGH>; + gpios = <&gpio1 0 GPIO_ACTIVE_LOW>; }; button@2 { label = "Factory Reset Button"; linux,code = <KEY_RESTART>; - gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>; + gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts index a50be640f1b0..59411e447fa0 100644 --- a/arch/arm/boot/dts/exynos4210-trats.dts +++ b/arch/arm/boot/dts/exynos4210-trats.dts @@ -298,6 +298,8 @@ compatible = "maxim,max8997-pmic"; reg = <0x66>; + interrupt-parent = <&gpx0>; + interrupts = <7 0>; max8997,pmic-buck1-uses-gpio-dvs; max8997,pmic-buck2-uses-gpio-dvs; diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile index bffa21a06462..9fa8763fb19d 100644 --- a/arch/arm/boot/dts/qcom/Makefile +++ b/arch/arm/boot/dts/qcom/Makefile @@ -113,7 +113,9 @@ dtb-$(CONFIG_ARCH_MSMCOBALT) += msmcobalt-sim.dtb \ msmcobalt-v2-cdp.dtb \ msmcobalt-v2-qrd.dtb \ msmcobalt-qrd-skuk.dtb \ + msmcobalt-v2-qrd-skuk.dtb \ msmcobalt-qrd-vr1.dtb \ + msmcobalt-v2-qrd-vr1.dtb \ apqcobalt-mtp.dtb \ apqcobalt-cdp.dtb \ apqcobalt-v2-mtp.dtb \ diff --git a/arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi b/arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi index 91b4cc351010..41589d02f6fc 100644 --- a/arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi +++ b/arch/arm/boot/dts/qcom/msm-pm2falcon.dtsi @@ -237,7 +237,7 @@ qcom,vref-mv = <350>; qcom,switch-freq-khz = <800>; qcom,ovp-mv = <29600>; - qcom,ilim-ma = <980>; + qcom,ilim-ma = <970>; qcom,boost-duty-ns = <26>; qcom,mod-freq-khz = <9600>; qcom,dim-mode = "hybrid"; diff --git a/arch/arm/boot/dts/qcom/msm-pmcobalt-rpm-regulator.dtsi b/arch/arm/boot/dts/qcom/msm-pmcobalt-rpm-regulator.dtsi index 7a8e71d14291..7243a6b1d6d4 100644 --- a/arch/arm/boot/dts/qcom/msm-pmcobalt-rpm-regulator.dtsi +++ b/arch/arm/boot/dts/qcom/msm-pmcobalt-rpm-regulator.dtsi @@ -592,7 +592,7 @@ regulator-bob { compatible = "qcom,rpm-smd-regulator"; - regulator-name = "pmcobalt_bob"; + regulator-name = "pmicobalt_bob"; qcom,set = <3>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/qcom/msm-pmfalcon.dtsi b/arch/arm/boot/dts/qcom/msm-pmfalcon.dtsi index dec37881249c..b8fac8a183a2 100644 --- a/arch/arm/boot/dts/qcom/msm-pmfalcon.dtsi +++ b/arch/arm/boot/dts/qcom/msm-pmfalcon.dtsi @@ -158,6 +158,185 @@ interrupts = <0x0 0x61 0x1 IRQ_TYPE_NONE>; }; }; + + pmfalcon_vadc: vadc@3100 { + compatible = "qcom,qpnp-vadc-hc"; + reg = <0x3100 0x100>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "eoc-int-en-set"; + qcom,adc-bit-resolution = <15>; + qcom,adc-vdd-reference = <1875>; + + chan@6 { + label = "die_temp"; + reg = <6>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <3>; + qcom,hw-settle-time = <0>; + qcom,fast-avg-setup = <0>; + qcom,cal-val = <0>; + }; + + chan@0 { + label = "ref_gnd"; + reg = <0>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <0>; + qcom,hw-settle-time = <0>; + qcom,fast-avg-setup = <0>; + qcom,cal-val = <0>; + }; + + chan@1 { + label = "ref_1250v"; + reg = <1>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <0>; + qcom,hw-settle-time = <0>; + qcom,fast-avg-setup = <0>; + qcom,cal-val = <0>; + }; + + chan@83 { + label = "vph_pwr"; + reg = <0x83>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <1>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <0>; + qcom,hw-settle-time = <0>; + qcom,fast-avg-setup = <0>; + }; + + chan@85 { + label = "vcoin"; + reg = <0x85>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <1>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <0>; + qcom,hw-settle-time = <0>; + qcom,fast-avg-setup = <0>; + }; + + chan@4c { + label = "xo_therm"; + reg = <0x4c>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <4>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; + }; + + chan@4d { + label = "msm_therm"; + reg = <0x4d>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; + }; + + chan@51 { + label = "quiet_therm"; + reg = <0x51>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; + }; + + chan@1d { + label = "drax_temp"; + reg = <0x1d>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <3>; + qcom,hw-settle-time = <0>; + qcom,fast-avg-setup = <0>; + qcom,cal-val = <0>; + }; + }; + + pmfalcon_adc_tm: vadc@3400 { + compatible = "qcom,qpnp-adc-tm-hc"; + reg = <0x3400 0x100>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <0x0 0x34 0x0 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "eoc-int-en-set"; + qcom,adc-bit-resolution = <15>; + qcom,adc-vdd-reference = <1875>; + qcom,adc_tm-vadc = <&pmfalcon_vadc>; + qcom,decimation = <0>; + qcom,fast-avg-setup = <0>; + + chan@83 { + label = "vph_pwr"; + reg = <0x83>; + qcom,pre-div-channel-scaling = <1>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <0>; + qcom,hw-settle-time = <0>; + qcom,btm-channel-number = <0x60>; + }; + + chan@4d { + label = "msm_therm"; + reg = <0x4d>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,btm-channel-number = <0x68>; + qcom,thermal-node; + }; + + chan@51 { + label = "quiet_therm"; + reg = <0x51>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,btm-channel-number = <0x70>; + qcom,thermal-node; + }; + + chan@4c { + label = "xo_therm"; + reg = <0x4c>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <4>; + qcom,hw-settle-time = <2>; + qcom,btm-channel-number = <0x78>; + qcom,thermal-node; + }; + }; + + pmfalcon_rradc: rradc@4500 { + compatible = "qcom,rradc"; + reg = <0x4500 0x100>; + #address-cells = <1>; + #size-cells = <0>; + #io-channel-cells = <1>; + }; }; qcom,pmfalcon@1 { diff --git a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi index ad56f1d3dd74..a5243aff4282 100644 --- a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi +++ b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi @@ -597,7 +597,7 @@ qcom,vref-mv = <350>; qcom,switch-freq-khz = <800>; qcom,ovp-mv = <29600>; - qcom,ilim-ma = <980>; + qcom,ilim-ma = <970>; qcom,boost-duty-ns = <26>; qcom,mod-freq-khz = <9600>; qcom,dim-mode = "hybrid"; @@ -741,7 +741,7 @@ qcom,led-mask = <3>; qcom,default-led-trigger = "switch0_trigger"; reg0 { - regulator-name = "pmcobalt_bob"; + regulator-name = "pmicobalt_bob"; max-voltage-uv = <3600000>; }; }; @@ -752,7 +752,7 @@ qcom,led-mask = <4>; qcom,default-led-trigger = "switch1_trigger"; reg0 { - regulator-name = "pmcobalt_bob"; + regulator-name = "pmicobalt_bob"; max-voltage-uv = <3600000>; }; }; diff --git a/arch/arm/boot/dts/qcom/msm8996-ion.dtsi b/arch/arm/boot/dts/qcom/msm8996-ion.dtsi index 6b64e9b8976a..9b287e3f23c5 100644 --- a/arch/arm/boot/dts/qcom/msm8996-ion.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-ion.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,11 +21,6 @@ qcom,ion-heap-type = "SYSTEM"; }; - system_contig_heap: qcom,ion-heap@21 { - reg = <21>; - qcom,ion-heap-type = "SYSTEM_CONTIG"; - }; - qcom,ion-heap@22 { /* ADSP HEAP */ reg = <22>; memory-region = <&adsp_mem>; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-ion.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-ion.dtsi index dfa3dae8e371..7b15fd81c710 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-ion.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-ion.dtsi @@ -21,11 +21,6 @@ qcom,ion-heap-type = "SYSTEM"; }; - system_contig_heap: qcom,ion-heap@21 { - reg = <21>; - qcom,ion-heap-type = "SYSTEM_CONTIG"; - }; - qcom,ion-heap@22 { /* ADSP HEAP */ reg = <22>; memory-region = <&adsp_mem>; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi index b77bab712ecf..81f53f1512fd 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi @@ -100,6 +100,16 @@ }; &pmcobalt_gpios { + /* GPIO 2 for Home Key */ + gpio@c100 { + status = "okay"; + qcom,mode = <0>; + qcom,pull = <0>; + qcom,vin-sel = <0>; + qcom,src-sel = <0>; + qcom,out-strength = <1>; + }; + /* GPIO 6 for Vol+ Key */ gpio@c500 { status = "okay"; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dts b/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dts index ee6a58a41b4f..e53912071502 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dts +++ b/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dts @@ -21,32 +21,3 @@ compatible = "qcom,msmcobalt-qrd", "qcom,msmcobalt", "qcom,qrd"; qcom,board-id = <0x02000b 0x80>; }; - -&soc { - sound-tavil { - qcom,model = "msmcobalt-qvr-tavil-snd-card"; - qcom,audio-routing = - "RX_BIAS", "MCLK", - "MADINPUT", "MCLK", - "AMIC2", "MIC BIAS2", - "MIC BIAS2", "Headset Mic", - "DMIC0", "MIC BIAS1", - "MIC BIAS1", "Digital Mic0", - "DMIC1", "MIC BIAS1", - "MIC BIAS1", "Digital Mic1", - "DMIC2", "MIC BIAS3", - "MIC BIAS3", "Digital Mic2", - "DMIC4", "MIC BIAS4", - "MIC BIAS4", "Digital Mic4", - "SpkrLeft IN", "SPK1 OUT"; - - qcom,msm-mbhc-hphl-swh = <1>; - /delete-property/ qcom,us-euro-gpios; - /delete-property/ qcom,hph-en0-gpio; - /delete-property/ qcom,hph-en0-gpio; - - qcom,wsa-max-devs = <1>; - qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>; - qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft"; - }; -}; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dtsi index f8069856f3d8..f0607ac3a34a 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dtsi @@ -99,4 +99,31 @@ debounce-interval = <15>; }; }; + + sound-tavil { + qcom,model = "msmcobalt-qvr-tavil-snd-card"; + qcom,audio-routing = + "RX_BIAS", "MCLK", + "MADINPUT", "MCLK", + "AMIC2", "MIC BIAS2", + "MIC BIAS2", "Headset Mic", + "DMIC0", "MIC BIAS1", + "MIC BIAS1", "Digital Mic0", + "DMIC1", "MIC BIAS1", + "MIC BIAS1", "Digital Mic1", + "DMIC2", "MIC BIAS3", + "MIC BIAS3", "Digital Mic2", + "DMIC4", "MIC BIAS4", + "MIC BIAS4", "Digital Mic4", + "SpkrLeft IN", "SPK1 OUT"; + + qcom,msm-mbhc-hphl-swh = <1>; + /delete-property/ qcom,us-euro-gpios; + /delete-property/ qcom,hph-en0-gpio; + /delete-property/ qcom,hph-en0-gpio; + + qcom,wsa-max-devs = <1>; + qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>; + qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft"; + }; }; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi index 7d5509f0016c..682ea8a260ef 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi @@ -264,7 +264,7 @@ }; &pmicobalt_wled { - qcom,led-strings-list = [00 01]; + qcom,led-strings-list = [01 02]; }; &dsi_dual_nt35597_video { diff --git a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi index 2a61cccad273..bb72cf3a0d2c 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi @@ -501,7 +501,7 @@ }; pmicobalt_bob_pin1: regulator-bob-pin1 { compatible = "qcom,rpm-smd-regulator"; - regulator-name = "pmcobalt_bob_pin1"; + regulator-name = "pmicobalt_bob_pin1"; qcom,set = <3>; regulator-min-microvolt = <3312000>; regulator-max-microvolt = <3600000>; @@ -509,7 +509,7 @@ }; pmicobalt_bob_pin2: regulator-bob-pin2 { compatible = "qcom,rpm-smd-regulator"; - regulator-name = "pmcobalt_bob_pin2"; + regulator-name = "pmicobalt_bob_pin2"; qcom,set = <3>; regulator-min-microvolt = <3312000>; regulator-max-microvolt = <3600000>; @@ -517,7 +517,7 @@ }; pmicobalt_bob_pin3: regulator-bob-pin3 { compatible = "qcom,rpm-smd-regulator"; - regulator-name = "pmcobalt_bob_pin3"; + regulator-name = "pmicobalt_bob_pin3"; qcom,set = <3>; regulator-min-microvolt = <3312000>; regulator-max-microvolt = <3600000>; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2-qrd-skuk.dts b/arch/arm/boot/dts/qcom/msmcobalt-v2-qrd-skuk.dts new file mode 100644 index 000000000000..78e810b816c9 --- /dev/null +++ b/arch/arm/boot/dts/qcom/msmcobalt-v2-qrd-skuk.dts @@ -0,0 +1,23 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + + +/dts-v1/; + +#include "msmcobalt-v2.dtsi" +#include "msmcobalt-qrd-skuk.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. MSM COBALT V2 SKUK"; + compatible = "qcom,msmcobalt-qrd", "qcom,msmcobalt", "qcom,qrd"; + qcom,board-id = <0x01000b 0x80>; +}; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2-qrd-vr1.dts b/arch/arm/boot/dts/qcom/msmcobalt-v2-qrd-vr1.dts new file mode 100644 index 000000000000..15dd2d550b31 --- /dev/null +++ b/arch/arm/boot/dts/qcom/msmcobalt-v2-qrd-vr1.dts @@ -0,0 +1,23 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + + +/dts-v1/; + +#include "msmcobalt-v2.dtsi" +#include "msmcobalt-qrd-vr1.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. MSM COBALT V2 VR1 Board"; + compatible = "qcom,msmcobalt-qrd", "qcom,msmcobalt", "qcom,qrd"; + qcom,board-id = <0x02000b 0x80>; +}; diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi index 60b514c7ca20..b58e2d2c7cc6 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi @@ -2222,6 +2222,16 @@ qcom,sensors = <8>; }; + qcom,qbt1000 { + compatible = "qcom,qbt1000"; + clock-names = "core", "iface"; + clocks = <&clock_gcc clk_gcc_blsp2_qup6_spi_apps_clk>, + <&clock_gcc clk_gcc_blsp2_ahb_clk>; + clock-frequency = <15000000>; + qcom,ipc-gpio = <&tlmm 121 0>; + qcom,finger-detect-gpio = <&pmcobalt_gpios 2 0>; + }; + qcom,sensor-information { compatible = "qcom,sensor-information"; sensor_information0: qcom,sensor-information-0 { diff --git a/arch/arm/boot/dts/qcom/msmfalcon-blsp.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-blsp.dtsi new file mode 100644 index 000000000000..61764a095a29 --- /dev/null +++ b/arch/arm/boot/dts/qcom/msmfalcon-blsp.dtsi @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "msmfalcon-pinctrl.dtsi" + +/ { + aliases { + spi1 = &spi_1; + spi2 = &spi_2; + spi3 = &spi_3; + spi4 = &spi_4; + spi5 = &spi_5; + spi6 = &spi_6; + spi7 = &spi_7; + spi8 = &spi_8; + }; +}; + + +&soc { + spi_1: spi@c175000 { /* BLSP1 QUP1 */ + compatible = "qcom,spi-qup-v2"; + #address-cells = <1>; + #size-cells = <0>; + reg-names = "spi_physical", "spi_bam_physical"; + reg = <0xc175000 0x600>, + <0xc144000 0x1f000>; + interrupt-names = "spi_irq", "spi_bam_irq"; + interrupts = <0 95 0>, <0 238 0>; + spi-max-frequency = <50000000>; + qcom,use-bam; + qcom,ver-reg-exists; + qcom,bam-consumer-pipe-index = <4>; + qcom,bam-producer-pipe-index = <5>; + qcom,master-id = <86>; + qcom,use-pinctrl; + pinctrl-names = "spi_default", "spi_sleep"; + pinctrl-0 = <&spi_1_active>; + pinctrl-1 = <&spi_1_sleep>; + clock-names = "iface_clk", "core_clk"; + clocks = <&clock_gcc GCC_BLSP1_AHB_CLK>, + <&clock_gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>; + status = "disabled"; + }; + + spi_2: spi@c176000 { /* BLSP1 QUP2 */ + compatible = "qcom,spi-qup-v2"; + #address-cells = <1>; + #size-cells = <0>; + reg-names = "spi_physical", "spi_bam_physical"; + reg = <0xc176000 0x600>, + <0xc144000 0x1f000>; + interrupt-names = "spi_irq", "spi_bam_irq"; + interrupts = <0 96 0>, <0 238 0>; + spi-max-frequency = <50000000>; + qcom,use-bam; + qcom,ver-reg-exists; + qcom,bam-consumer-pipe-index = <6>; + qcom,bam-producer-pipe-index = <7>; + qcom,master-id = <86>; + qcom,use-pinctrl; + pinctrl-names = "spi_default", "spi_sleep"; + pinctrl-0 = <&spi_2_active>; + pinctrl-1 = <&spi_2_sleep>; + clock-names = "iface_clk", "core_clk"; + clocks = <&clock_gcc GCC_BLSP1_AHB_CLK>, + <&clock_gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>; + status = "disabled"; + }; + + spi_3: spi@c177000 { /* BLSP1 QUP3 */ + compatible = "qcom,spi-qup-v2"; + #address-cells = <1>; + #size-cells = <0>; + reg-names = "spi_physical", "spi_bam_physical"; + reg = <0xc177000 0x600>, + <0xc144000 0x1f000>; + interrupt-names = "spi_irq", "spi_bam_irq"; + interrupts = <0 97 0>, <0 238 0>; + spi-max-frequency = <50000000>; + qcom,use-bam; + qcom,ver-reg-exists; + qcom,bam-consumer-pipe-index = <8>; + qcom,bam-producer-pipe-index = <9>; + qcom,master-id = <86>; + qcom,use-pinctrl; + pinctrl-names = "spi_default", "spi_sleep"; + pinctrl-0 = <&spi_3_active>; + pinctrl-1 = <&spi_3_sleep>; + clock-names = "iface_clk", "core_clk"; + clocks = <&clock_gcc GCC_BLSP1_AHB_CLK>, + <&clock_gcc GCC_BLSP1_QUP3_SPI_APPS_CLK>; + status = "disabled"; + }; + + spi_4: spi@c178000 { /* BLSP1 QUP4 */ + compatible = "qcom,spi-qup-v2"; + #address-cells = <1>; + #size-cells = <0>; + reg-names = "spi_physical", "spi_bam_physical"; + reg = <0xc178000 0x600>, + <0xc144000 0x1f000>; + interrupt-names = "spi_irq", "spi_bam_irq"; + interrupts = <0 98 0>, <0 238 0>; + spi-max-frequency = <50000000>; + qcom,use-bam; + qcom,ver-reg-exists; + qcom,bam-consumer-pipe-index = <10>; + qcom,bam-producer-pipe-index = <11>; + qcom,master-id = <86>; + qcom,use-pinctrl; + pinctrl-names = "spi_default", "spi_sleep"; + pinctrl-0 = <&spi_4_active>; + pinctrl-1 = <&spi_4_sleep>; + clock-names = "iface_clk", "core_clk"; + clocks = <&clock_gcc GCC_BLSP1_AHB_CLK>, + <&clock_gcc GCC_BLSP1_QUP4_SPI_APPS_CLK>; + status = "disabled"; + }; + + spi_5: spi@c1b5000 { /* BLSP2 QUP1 */ + compatible = "qcom,spi-qup-v2"; + #address-cells = <1>; + #size-cells = <0>; + reg-names = "spi_physical", "spi_bam_physical"; + reg = <0xc1b5000 0x600>, + <0xc184000 0x1f000>; + interrupt-names = "spi_irq", "spi_bam_irq"; + interrupts = <0 101 0>, <0 239 0>; + spi-max-frequency = <50000000>; + qcom,use-bam; + qcom,ver-reg-exists; + qcom,bam-consumer-pipe-index = <4>; + qcom,bam-producer-pipe-index = <5>; + qcom,master-id = <84>; + qcom,use-pinctrl; + pinctrl-names = "spi_default", "spi_sleep"; + pinctrl-0 = <&spi_5_active>; + pinctrl-1 = <&spi_5_sleep>; + clock-names = "iface_clk", "core_clk"; + clocks = <&clock_gcc GCC_BLSP2_AHB_CLK>, + <&clock_gcc GCC_BLSP2_QUP1_SPI_APPS_CLK>; + status = "disabled"; + }; + + spi_6: spi@c1b6000 { /* BLSP2 QUP2 */ + compatible = "qcom,spi-qup-v2"; + #address-cells = <1>; + #size-cells = <0>; + reg-names = "spi_physical", "spi_bam_physical"; + reg = <0xc1b6000 0x600>, + <0xc184000 0x1f000>; + interrupt-names = "spi_irq", "spi_bam_irq"; + interrupts = <0 102 0>, <0 239 0>; + spi-max-frequency = <50000000>; + qcom,use-bam; + qcom,ver-reg-exists; + qcom,bam-consumer-pipe-index = <6>; + qcom,bam-producer-pipe-index = <7>; + qcom,master-id = <84>; + qcom,use-pinctrl; + pinctrl-names = "spi_default", "spi_sleep"; + pinctrl-0 = <&spi_6_active>; + pinctrl-1 = <&spi_6_sleep>; + clock-names = "iface_clk", "core_clk"; + clocks = <&clock_gcc GCC_BLSP2_AHB_CLK>, + <&clock_gcc GCC_BLSP2_QUP2_SPI_APPS_CLK>; + status = "disabled"; + }; + + spi_7: spi@c1b7000 { /* BLSP2 QUP3 */ + compatible = "qcom,spi-qup-v2"; + #address-cells = <1>; + #size-cells = <0>; + reg-names = "spi_physical", "spi_bam_physical"; + reg = <0xc1b7000 0x600>, + <0xc184000 0x1f000>; + interrupt-names = "spi_irq", "spi_bam_irq"; + interrupts = <0 103 0>, <0 239 0>; + spi-max-frequency = <50000000>; + qcom,use-bam; + qcom,ver-reg-exists; + qcom,bam-consumer-pipe-index = <8>; + qcom,bam-producer-pipe-index = <9>; + qcom,master-id = <84>; + qcom,use-pinctrl; + pinctrl-names = "spi_default", "spi_sleep"; + pinctrl-0 = <&spi_7_active>; + pinctrl-1 = <&spi_7_sleep>; + clock-names = "iface_clk", "core_clk"; + clocks = <&clock_gcc GCC_BLSP2_AHB_CLK>, + <&clock_gcc GCC_BLSP2_QUP3_SPI_APPS_CLK>; + status = "disabled"; + }; + + spi_8: spi@c1b8000 { /* BLSP2 QUP4 */ + compatible = "qcom,spi-qup-v2"; + #address-cells = <1>; + #size-cells = <0>; + reg-names = "spi_physical", "spi_bam_physical"; + reg = <0xc1b8000 0x600>, + <0xc184000 0x1f000>; + interrupt-names = "spi_irq", "spi_bam_irq"; + interrupts = <0 104 0>, <0 239 0>; + spi-max-frequency = <50000000>; + qcom,use-bam; + qcom,ver-reg-exists; + qcom,bam-consumer-pipe-index = <10>; + qcom,bam-producer-pipe-index = <11>; + qcom,master-id = <84>; + qcom,use-pinctrl; + pinctrl-names = "spi_default", "spi_sleep"; + pinctrl-0 = <&spi_8_active>; + pinctrl-1 = <&spi_8_sleep>; + clock-names = "iface_clk", "core_clk"; + clocks = <&clock_gcc GCC_BLSP2_AHB_CLK>, + <&clock_gcc GCC_BLSP2_QUP4_SPI_APPS_CLK>; + status = "disabled"; + }; +}; diff --git a/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi index 3826b00bf09e..2f1ef974811e 100644 --- a/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi +++ b/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi @@ -138,6 +138,14 @@ <&funnel_in0_out_funnel_merg>; }; }; + port@2 { + reg = <1>; + funnel_merg_in_funnel_in1:endpoint { + slave-mode; + remote-endpoint = + <&funnel_in1_out_funnel_merg>; + }; + }; }; }; @@ -183,6 +191,167 @@ }; }; + funnel_in1: funnel@6042000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6042000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-in1"; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_in1_out_funnel_merg: endpoint { + remote-endpoint = + <&funnel_merg_in_funnel_in1>; + }; + }; + port@5 { + reg = <6>; + funnel_in1_in_funnel_apss_merg: endpoint { + slave-mode; + remote-endpoint = + <&funnel_apss_merg_out_funnel_in1>; + }; + }; + }; + }; + + funnel_apss_merg: funnel@7b70000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x7b70000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-apss-merg"; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_apss_merg_out_funnel_in1: endpoint { + remote-endpoint = + <&funnel_in1_in_funnel_apss_merg>; + }; + }; + port@1 { + reg = <0>; + funnel_apss_merg_in_funnel_apss: endpoint { + slave-mode; + remote-endpoint = + <&funnel_apss_out_funnel_apss_merg>; + }; + }; + }; + }; + + funnel_apss: funnel@7b60000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x7b60000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-apss"; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_apss_out_funnel_apss_merg: endpoint { + remote-endpoint = + <&funnel_apss_merg_in_funnel_apss>; + }; + }; + port@1 { + reg = <0>; + funnel_apss_in_etm0: endpoint { + slave-mode; + remote-endpoint = + <&etm0_out_funnel_apss>; + }; + }; + port@2 { + reg = <1>; + funnel_apss_in_etm1: endpoint { + slave-mode; + remote-endpoint = + <&etm1_out_funnel_apss>; + }; + }; + port@3 { + reg = <2>; + funnel_apss_in_etm2: endpoint { + slave-mode; + remote-endpoint = + <&etm2_out_funnel_apss>; + }; + }; + port@4 { + reg = <3>; + funnel_apss_in_etm3: endpoint { + slave-mode; + remote-endpoint = + <&etm3_out_funnel_apss>; + }; + }; + port@5 { + reg = <4>; + funnel_apss_in_etm4: endpoint { + slave-mode; + remote-endpoint = + <&etm4_out_funnel_apss>; + }; + }; + port@6 { + reg = <5>; + funnel_apss_in_etm5: endpoint { + slave-mode; + remote-endpoint = + <&etm5_out_funnel_apss>; + }; + }; + port@7 { + reg = <6>; + funnel_apss_in_etm6: endpoint { + slave-mode; + remote-endpoint = + <&etm6_out_funnel_apss>; + }; + }; + port@8 { + reg = <7>; + funnel_apss_in_etm7: endpoint { + slave-mode; + remote-endpoint = + <&etm7_out_funnel_apss>; + }; + }; + }; + }; + stm: stm@6002000 { compatible = "arm,primecell"; arm,primecell-periphid = <0x0003b962>; @@ -204,6 +373,166 @@ }; }; + etm0: etm@7840000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b95d>; + + reg = <0x7840000 0x1000>; + cpu = <&CPU0>; + + coresight-name = "coresight-etm0"; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port{ + etm0_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm0>; + }; + }; + }; + + etm1: etm@7940000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b95d>; + + reg = <0x7940000 0x1000>; + cpu = <&CPU1>; + + coresight-name = "coresight-etm1"; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port{ + etm1_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm1>; + }; + }; + }; + + etm2: etm@7a40000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b95d>; + + reg = <0x7a40000 0x1000>; + cpu = <&CPU2>; + + coresight-name = "coresight-etm2"; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port{ + etm2_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm2>; + }; + }; + }; + + etm3: etm@7b40000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b95d>; + + reg = <0x7b40000 0x1000>; + cpu = <&CPU3>; + + coresight-name = "coresight-etm3"; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port{ + etm3_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm3>; + }; + }; + }; + + etm4: etm@7c40000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b95d>; + + reg = <0x7c40000 0x1000>; + cpu = <&CPU4>; + + coresight-name = "coresight-etm4"; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port{ + etm4_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm4>; + }; + }; + }; + + etm5: etm@7d40000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b95d>; + + reg = <0x7d40000 0x1000>; + cpu = <&CPU5>; + + coresight-name = "coresight-etm5"; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port{ + etm5_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm5>; + }; + }; + }; + + etm6: etm@7e40000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b95d>; + + reg = <0x7e40000 0x1000>; + cpu = <&CPU6>; + + coresight-name = "coresight-etm6"; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port{ + etm6_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm6>; + }; + }; + }; + + etm7: etm@7f40000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b95d>; + + reg = <0x7f40000 0x1000>; + cpu = <&CPU7>; + + coresight-name = "coresight-etm7"; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "core_a_clk"; + + port{ + etm7_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm7>; + }; + }; + }; + cti0: cti@6010000 { compatible = "arm,coresight-cti"; reg = <0x6010000 0x1000>; @@ -396,6 +725,110 @@ clock-names = "core_clk", "core_a_clk"; }; + cti_cpu0: cti@7820000 { + compatible = "arm,coresight-cti"; + reg = <0x7820000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu0"; + cpu = <&CPU0>; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "core_clk", "core_a_clk"; + }; + + cti_cpu1: cti@7920000 { + compatible = "arm,coresight-cti"; + reg = <0x7920000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu1"; + cpu = <&CPU1>; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "core_clk", "core_a_clk"; + }; + + cti_cpu2: cti@7a20000 { + compatible = "arm,coresight-cti"; + reg = <0x7a20000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu2"; + cpu = <&CPU2>; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "core_clk", "core_a_clk"; + }; + + cti_cpu3: cti@7b20000 { + compatible = "arm,coresight-cti"; + reg = <0x7b20000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu3"; + cpu = <&CPU3>; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "core_clk", "core_a_clk"; + }; + + cti_cpu4: cti@7c20000 { + compatible = "arm,coresight-cti"; + reg = <0x7c20000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu4"; + cpu = <&CPU4>; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "core_clk", "core_a_clk"; + }; + + cti_cpu5: cti@7d20000 { + compatible = "arm,coresight-cti"; + reg = <0x7d20000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu5"; + cpu = <&CPU5>; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "core_clk", "core_a_clk"; + }; + + cti_cpu6: cti@7e20000 { + compatible = "arm,coresight-cti"; + reg = <0x7e20000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu6"; + cpu = <&CPU6>; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "core_clk", "core_a_clk"; + }; + + cti_cpu7: cti@7f20000 { + compatible = "arm,coresight-cti"; + reg = <0x7f20000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu7"; + cpu = <&CPU7>; + + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; + clock-names = "core_clk", "core_a_clk"; + }; + funnel_qatb: funnel@6005000 { compatible = "arm,primecell"; arm,primecell-periphid = <0x0003b908>; diff --git a/arch/arm/boot/dts/qcom/msmfalcon-ion.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-ion.dtsi index f6deef335844..00b9e61d01b8 100644 --- a/arch/arm/boot/dts/qcom/msmfalcon-ion.dtsi +++ b/arch/arm/boot/dts/qcom/msmfalcon-ion.dtsi @@ -21,11 +21,6 @@ qcom,ion-heap-type = "SYSTEM"; }; - system_contig_heap: qcom,ion-heap@21 { - reg = <21>; - qcom,ion-heap-type = "SYSTEM_CONTIG"; - }; - qcom,ion-heap@22 { /* ADSP HEAP */ reg = <22>; memory-region = <&adsp_mem>; diff --git a/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi index e8c66871425d..f13e34f8296b 100644 --- a/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi +++ b/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi @@ -112,5 +112,262 @@ bias-pull-down; /* pull down */ }; }; + + /* SPI CONFIGURATION */ + spi_1 { + spi_1_active: spi_1_active { + mux { + pins = "gpio0", "gpio1", + "gpio2", "gpio3"; + function = "blsp_spi1"; + }; + + config { + pins = "gpio0", "gpio1", + "gpio2", "gpio3"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_1_sleep: spi_1_sleep { + mux { + pins = "gpio0", "gpio1", + "gpio2", "gpio3"; + function = "blsp_spi1"; + }; + + config { + pins = "gpio0", "gpio1", + "gpio2", "gpio3"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spi_2 { + spi_2_active: spi_2_active { + mux { + pins = "gpio4", "gpio5", + "gpio6", "gpio7"; + function = "blsp_spi2"; + }; + + config { + pins = "gpio4", "gpio5", + "gpio6", "gpio7"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_2_sleep: spi_2_sleep { + mux { + pins = "gpio4", "gpio5", + "gpio6", "gpio7"; + function = "blsp_spi2"; + }; + + config { + pins = "gpio4", "gpio5", + "gpio6", "gpio7"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spi_3 { + spi_3_active: spi_3_active { + mux { + pins = "gpio8", "gpio9", + "gpio10", "gpio11"; + function = "blsp_spi3"; + }; + + config { + pins = "gpio8", "gpio9", + "gpio10", "gpio11"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_3_sleep: spi_3_sleep { + mux { + pins = "gpio8", "gpio9", + "gpio10", "gpio11"; + function = "blsp_spi3"; + }; + + config { + pins = "gpio8", "gpio9", + "gpio10", "gpio11"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spi_4 { + spi_4_active: spi_4_active { + mux { + pins = "gpio12", "gpio13", + "gpio14", "gpio15"; + function = "blsp_spi4"; + }; + + config { + pins = "gpio12", "gpio13", + "gpio14", "gpio15"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_4_sleep: spi_4_sleep { + mux { + pins = "gpio12", "gpio13", + "gpio14", "gpio15"; + function = "blsp_spi4"; + }; + + config { + pins = "gpio12", "gpio13", + "gpio14", "gpio15"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spi_5 { + spi_5_active: spi_5_active { + mux { + pins = "gpio16", "gpio17", + "gpio18", "gpio19"; + function = "blsp_spi5"; + }; + + config { + pins = "gpio16", "gpio17", + "gpio18", "gpio19"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_5_sleep: spi_5_sleep { + mux { + pins = "gpio16", "gpio17", + "gpio18", "gpio19"; + function = "blsp_spi5"; + }; + + config { + pins = "gpio16", "gpio17", + "gpio18", "gpio19"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spi_6 { + spi_6_active: spi_6_active { + mux { + pins = "gpio49", "gpio52", + "gpio22", "gpio23"; + function = "blsp_spi6"; + }; + + config { + pins = "gpio49", "gpio52", + "gpio22", "gpio23"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_6_sleep: spi_6_sleep { + mux { + pins = "gpio49", "gpio52", + "gpio22", "gpio23"; + function = "blsp_spi6"; + }; + + config { + pins = "gpio49", "gpio52", + "gpio22", "gpio23"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spi_7 { + spi_7_active: spi_7_active { + mux { + pins = "gpio24", "gpio25", + "gpio26", "gpio27"; + function = "blsp_spi7"; + }; + + config { + pins = "gpio24", "gpio25", + "gpio26", "gpio27"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_7_sleep: spi_7_sleep { + mux { + pins = "gpio24", "gpio25", + "gpio26", "gpio27"; + function = "blsp_spi7"; + }; + + config { + pins = "gpio24", "gpio25", + "gpio26", "gpio27"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + spi_8 { + spi_8_active: spi_8_active { + mux { + pins = "gpio28", "gpio29", + "gpio30", "gpio31"; + function = "blsp_spi8"; + }; + + config { + pins = "gpio28", "gpio29", + "gpio30", "gpio31"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_8_sleep: spi_8_sleep { + mux { + pins = "gpio28", "gpio29", + "gpio30", "gpio31"; + function = "blsp_spi8"; + }; + + config { + pins = "gpio28", "gpio29", + "gpio30", "gpio31"; + drive-strength = <6>; + bias-disable; + }; + }; + }; }; }; diff --git a/arch/arm/boot/dts/qcom/msmfalcon.dtsi b/arch/arm/boot/dts/qcom/msmfalcon.dtsi index fffad9374a69..f2adc32fb732 100644 --- a/arch/arm/boot/dts/qcom/msmfalcon.dtsi +++ b/arch/arm/boot/dts/qcom/msmfalcon.dtsi @@ -447,7 +447,7 @@ <0x10b4000 0x800>; reg-names = "dcc-base", "dcc-ram-base"; - clocks = <&clock_rpmcc RPM_QDSS_CLK>; + clocks = <&clock_rpmcc GCC_DCC_AHB_CLK>; clock-names = "dcc_clk"; }; @@ -899,3 +899,4 @@ #include "msm-pm2falcon.dtsi" #include "msm-arm-smmu-falcon.dtsi" #include "msm-arm-smmu-impl-defs-falcon.dtsi" +#include "msmfalcon-blsp.dtsi" diff --git a/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi b/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi index f6deef335844..00b9e61d01b8 100644 --- a/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi +++ b/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi @@ -21,11 +21,6 @@ qcom,ion-heap-type = "SYSTEM"; }; - system_contig_heap: qcom,ion-heap@21 { - reg = <21>; - qcom,ion-heap-type = "SYSTEM_CONTIG"; - }; - qcom,ion-heap@22 { /* ADSP HEAP */ reg = <22>; memory-region = <&adsp_mem>; diff --git a/arch/arm/boot/dts/qcom/msmtriton.dtsi b/arch/arm/boot/dts/qcom/msmtriton.dtsi index 807c40fcc46e..8374d27e5b56 100644 --- a/arch/arm/boot/dts/qcom/msmtriton.dtsi +++ b/arch/arm/boot/dts/qcom/msmtriton.dtsi @@ -41,59 +41,59 @@ #address-cells = <2>; #size-cells = <0>; - CPU0: cpu@0 { + CPU0: cpu@100 { device_type = "cpu"; compatible = "arm,armv8"; - reg = <0x0 0x0>; + reg = <0x0 0x100>; enable-method = "psci"; }; - CPU1: cpu@1 { + CPU1: cpu@101 { device_type = "cpu"; compatible = "arm,armv8"; - reg = <0x0 0x1>; + reg = <0x0 0x101>; enable-method = "psci"; }; - CPU2: cpu@2 { + CPU2: cpu@102 { device_type = "cpu"; compatible = "arm,armv8"; - reg = <0x0 0x2>; + reg = <0x0 0x102>; enable-method = "psci"; }; - CPU3: cpu@3 { + CPU3: cpu@103 { device_type = "cpu"; compatible = "arm,armv8"; - reg = <0x0 0x3>; + reg = <0x0 0x103>; enable-method = "psci"; }; - CPU4: cpu@100 { + CPU4: cpu@0 { device_type = "cpu"; compatible = "arm,armv8"; - reg = <0x0 0x100>; + reg = <0x0 0x0>; enable-method = "psci"; }; - CPU5: cpu@101 { + CPU5: cpu@1 { device_type = "cpu"; compatible = "arm,armv8"; - reg = <0x0 0x101>; + reg = <0x0 0x1>; enable-method = "psci"; }; - CPU6: cpu@102 { + CPU6: cpu@2 { device_type = "cpu"; compatible = "arm,armv8"; - reg = <0x0 0x102>; + reg = <0x0 0x2>; enable-method = "psci"; }; - CPU7: cpu@103 { + CPU7: cpu@3 { device_type = "cpu"; compatible = "arm,armv8"; - reg = <0x0 0x103>; + reg = <0x0 0x3>; enable-method = "psci"; }; diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h index b0c912feaa2f..8a394f336003 100644 --- a/arch/arm/boot/dts/sama5d2-pinfunc.h +++ b/arch/arm/boot/dts/sama5d2-pinfunc.h @@ -837,8 +837,8 @@ #define PIN_PD23__ISC_FIELD PINMUX_PIN(PIN_PD23, 6, 4) #define PIN_PD24 120 #define PIN_PD24__GPIO PINMUX_PIN(PIN_PD24, 0, 0) -#define PIN_PD24__UTXD2 PINMUX_PIN(PIN_PD23, 1, 2) -#define PIN_PD24__FLEXCOM4_IO3 PINMUX_PIN(PIN_PD23, 3, 3) +#define PIN_PD24__UTXD2 PINMUX_PIN(PIN_PD24, 1, 2) +#define PIN_PD24__FLEXCOM4_IO3 PINMUX_PIN(PIN_PD24, 3, 3) #define PIN_PD25 121 #define PIN_PD25__GPIO PINMUX_PIN(PIN_PD25, 0, 0) #define PIN_PD25__SPI1_SPCK PINMUX_PIN(PIN_PD25, 1, 3) diff --git a/arch/arm/boot/dts/sun5i-r8-chip.dts b/arch/arm/boot/dts/sun5i-r8-chip.dts index 530ab28e9ca2..d21f50ba3172 100644 --- a/arch/arm/boot/dts/sun5i-r8-chip.dts +++ b/arch/arm/boot/dts/sun5i-r8-chip.dts @@ -52,7 +52,7 @@ / { model = "NextThing C.H.I.P."; - compatible = "nextthing,chip", "allwinner,sun5i-r8"; + compatible = "nextthing,chip", "allwinner,sun5i-r8", "allwinner,sun5i-a13"; aliases { i2c0 = &i2c0; diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h deleted file mode 100644 index bca864ac945f..000000000000 --- a/arch/arm/include/asm/mach/mmc.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * arch/arm/include/asm/mach/mmc.h - */ -#ifndef ASMARM_MACH_MMC_H -#define ASMARM_MACH_MMC_H - -#include <linux/mmc/host.h> -#include <linux/mmc/card.h> -#include <linux/mmc/sdio_func.h> - -struct embedded_sdio_data { - struct sdio_cis cis; - struct sdio_cccr cccr; - struct sdio_embedded_func *funcs; - int num_funcs; -}; - -struct mmc_platform_data { - unsigned int ocr_mask; /* available voltages */ - int built_in; /* built-in device flag */ - int card_present; /* card detect state */ - u32 (*translate_vdd)(struct device *, unsigned int); - unsigned int (*status)(struct device *); - struct embedded_sdio_data *embedded_sdio; - int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id); -}; - -#endif diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index aeddd28b3595..92fd2c8a9af0 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) #define pmd_large(pmd) (pmd_val(pmd) & 2) #define pmd_bad(pmd) (pmd_val(pmd) & 2) +#define pmd_present(pmd) (pmd_val(pmd)) #define copy_pmd(pmdpd,pmdps) \ do { \ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index a745a2a53853..fd929b5ded9e 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -212,6 +212,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) : !!(pmd_val(pmd) & (val))) #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val))) +#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID)) #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF)) #define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL)) static inline pte_t pte_mkspecial(pte_t pte) @@ -257,10 +258,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) -/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ +/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */ static inline pmd_t pmd_mknotpresent(pmd_t pmd) { - return __pmd(0); + return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID); } static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 348caabb7625..d62204060cbe 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) #define pmd_none(pmd) (!pmd_val(pmd)) -#define pmd_present(pmd) (pmd_val(pmd)) static inline pte_t *pmd_page_vaddr(pmd_t pmd) { diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index ef9119f7462e..4d9375814b53 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target, if (ret) return ret; - vfp_flush_hwstate(thread); thread->vfpstate.hard = new_vfp; + vfp_flush_hwstate(thread); return 0; } diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 61d96a645ff3..12d727fae0a7 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -886,11 +886,14 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd)); old_pmd = *pmd; - kvm_set_pmd(pmd, *new_pmd); - if (pmd_present(old_pmd)) + if (pmd_present(old_pmd)) { + pmd_clear(pmd); kvm_tlb_flush_vmid_ipa(kvm, addr); - else + } else { get_page(virt_to_page(pmd)); + } + + kvm_set_pmd(pmd, *new_pmd); return 0; } @@ -939,12 +942,14 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, /* Create 2nd stage page table mapping - Level 3 */ old_pte = *pte; - kvm_set_pte(pte, *new_pte); - if (pte_present(old_pte)) + if (pte_present(old_pte)) { + kvm_set_pte(pte, __pte(0)); kvm_tlb_flush_vmid_ipa(kvm, addr); - else + } else { get_page(virt_to_page(pte)); + } + kvm_set_pte(pte, *new_pte); return 0; } diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c index acaf7056efa5..e08d02667c81 100644 --- a/arch/arm/mach-imx/mach-imx6ul.c +++ b/arch/arm/mach-imx/mach-imx6ul.c @@ -46,7 +46,7 @@ static int ksz8081_phy_fixup(struct phy_device *dev) static void __init imx6ul_enet_phy_init(void) { if (IS_BUILTIN(CONFIG_PHYLIB)) - phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff, + phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK, ksz8081_phy_fixup); } diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c index 55348ee5a352..feed36b32ff6 100644 --- a/arch/arm/mach-mvebu/coherency.c +++ b/arch/arm/mach-mvebu/coherency.c @@ -162,22 +162,16 @@ exit: } /* - * This ioremap hook is used on Armada 375/38x to ensure that PCIe - * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This - * is needed as a workaround for a deadlock issue between the PCIe - * interface and the cache controller. + * This ioremap hook is used on Armada 375/38x to ensure that all MMIO + * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is + * needed for the HW I/O coherency mechanism to work properly without + * deadlock. */ static void __iomem * -armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size, - unsigned int mtype, void *caller) +armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size, + unsigned int mtype, void *caller) { - struct resource pcie_mem; - - mvebu_mbus_get_pcie_mem_aperture(&pcie_mem); - - if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end) - mtype = MT_UNCACHED; - + mtype = MT_UNCACHED; return __arm_ioremap_caller(phys_addr, size, mtype, caller); } @@ -186,7 +180,7 @@ static void __init armada_375_380_coherency_init(struct device_node *np) struct device_node *cache_dn; coherency_cpu_base = of_iomap(np, 0); - arch_ioremap_caller = armada_pcie_wa_ioremap_caller; + arch_ioremap_caller = armada_wa_ioremap_caller; /* * We should switch the PL310 to I/O coherency mode only if diff --git a/arch/arm/mach-qcom/board-falcon.c b/arch/arm/mach-qcom/board-falcon.c index e9374050b2cb..aec16886308d 100644 --- a/arch/arm/mach-qcom/board-falcon.c +++ b/arch/arm/mach-qcom/board-falcon.c @@ -31,3 +31,20 @@ DT_MACHINE_START(MSMFALCON_DT, .init_machine = msmfalcon_init, .dt_compat = msmfalcon_dt_match, MACHINE_END + +static const char *msmtriton_dt_match[] __initconst = { + "qcom,msmtriton", + "qcom,apqtriton", + NULL +}; + +static void __init msmtriton_init(void) +{ + board_dt_populate(NULL); +} + +DT_MACHINE_START(MSMTRITON_DT, + "Qualcomm Technologies, Inc. MSM TRITON (Flattened Device Tree)") + .init_machine = msmtriton_init, + .dt_compat = msmtriton_dt_match, +MACHINE_END diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 4cdb28c92e5f..6236b3e1297c 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -73,7 +73,7 @@ config DEBUG_RODATA If in doubt, say Y config DEBUG_ALIGN_RODATA - depends on DEBUG_RODATA && ARM64_4K_PAGES + depends on DEBUG_RODATA bool "Align linker sections up to SECTION_SIZE" help If this option is enabled, sections that may potentially be marked as diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 14c74a66c58e..4c2298924cc3 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -116,7 +116,7 @@ dtbs: prepare scripts dtbs_install: $(Q)$(MAKE) $(dtbinst)=$(boot)/dts -Image.gz-dtb: vmlinux scripts dtbs +Image-dtb Image.gz-dtb: vmlinux scripts dtbs $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ PHONY += vdso_install diff --git a/arch/arm64/boot/.gitignore b/arch/arm64/boot/.gitignore index eb3551131b1e..34e35209fc2e 100644 --- a/arch/arm64/boot/.gitignore +++ b/arch/arm64/boot/.gitignore @@ -1,3 +1,4 @@ Image +Image-dtb Image.gz Image.gz-dtb diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile index a0cef8edcbad..6fee388eb386 100644 --- a/arch/arm64/boot/Makefile +++ b/arch/arm64/boot/Makefile @@ -30,6 +30,9 @@ $(obj)/Image: vmlinux FORCE $(obj)/Image.bz2: $(obj)/Image FORCE $(call if_changed,bzip2) +$(obj)/Image-dtb: $(obj)/Image $(DTB_OBJS) FORCE + $(call if_changed,cat) + $(obj)/Image.gz: $(obj)/Image FORCE $(call if_changed,gzip) diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts index 53442b5ee4ff..3e1a84b01b50 100644 --- a/arch/arm64/boot/dts/arm/juno.dts +++ b/arch/arm64/boot/dts/arm/juno.dts @@ -143,5 +143,310 @@ <&A53_3>; }; + etr@20070000 { + compatible = "arm,coresight-tmc", "arm,primecell"; + reg = <0 0x20070000 0 0x1000>; + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; + port { + etr_in_port: endpoint { + slave-mode; + remote-endpoint = <&replicator_out_port1>; + }; + }; + }; + + tpiu@20030000 { + compatible = "arm,coresight-tpiu", "arm,primecell"; + reg = <0 0x20030000 0 0x1000>; + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; + port { + tpiu_in_port: endpoint { + slave-mode; + remote-endpoint = <&replicator_out_port0>; + }; + }; + }; + + replicator@20020000 { + /* non-configurable replicators don't show up on the + * AMBA bus. As such no need to add "arm,primecell". + */ + compatible = "arm,coresight-replicator"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* replicator output ports */ + port@0 { + reg = <0>; + replicator_out_port0: endpoint { + remote-endpoint = <&tpiu_in_port>; + }; + }; + + port@1 { + reg = <1>; + replicator_out_port1: endpoint { + remote-endpoint = <&etr_in_port>; + }; + }; + + /* replicator input port */ + port@2 { + reg = <0>; + replicator_in_port0: endpoint { + slave-mode; + remote-endpoint = <&etf_out_port>; + }; + }; + }; + }; + + etf@20010000 { + compatible = "arm,coresight-tmc", "arm,primecell"; + reg = <0 0x20010000 0 0x1000>; + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* input port */ + port@0 { + reg = <0>; + etf_in_port: endpoint { + slave-mode; + remote-endpoint = + <&main_funnel_out_port>; + }; + }; + + /* output port */ + port@1 { + reg = <0>; + etf_out_port: endpoint { + remote-endpoint = + <&replicator_in_port0>; + }; + }; + }; + }; + + main_funnel@20040000 { + compatible = "arm,coresight-funnel", "arm,primecell"; + reg = <0 0x20040000 0 0x1000>; + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + main_funnel_out_port: endpoint { + remote-endpoint = + <&etf_in_port>; + }; + }; + + port@1 { + reg = <0>; + main_funnel_in_port0: endpoint { + slave-mode; + remote-endpoint = + <&A72_57_funnel_out_port>; + }; + }; + + port@2 { + reg = <1>; + main_funnel_in_port1: endpoint { + slave-mode; + remote-endpoint = <&A53_funnel_out_port>; + }; + }; + + }; + }; + + A72_57_funnel@220c0000 { + compatible = "arm,coresight-funnel", "arm,primecell"; + reg = <0 0x220c0000 0 0x1000>; + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + A72_57_funnel_out_port: endpoint { + remote-endpoint = + <&main_funnel_in_port0>; + }; + }; + + port@1 { + reg = <0>; + A72_57_funnel_in_port0: endpoint { + slave-mode; + remote-endpoint = + <&A72_57_etm0_out_port>; + }; + }; + + port@2 { + reg = <1>; + A72_57_funnel_in_port1: endpoint { + slave-mode; + remote-endpoint = + <&A72_57_etm1_out_port>; + }; + }; + }; + }; + + A53_funnel@220c0000 { + compatible = "arm,coresight-funnel", "arm,primecell"; + reg = <0 0x230c0000 0 0x1000>; + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + A53_funnel_out_port: endpoint { + remote-endpoint = + <&main_funnel_in_port1>; + }; + }; + + port@1 { + reg = <0>; + A53_funnel_in_port0: endpoint { + slave-mode; + remote-endpoint = <&A53_etm0_out_port>; + }; + }; + + port@2 { + reg = <1>; + A53_funnel_in_port1: endpoint { + slave-mode; + remote-endpoint = <&A53_etm1_out_port>; + }; + }; + port@3 { + reg = <2>; + A53_funnel_in_port2: endpoint { + slave-mode; + remote-endpoint = <&A53_etm2_out_port>; + }; + }; + port@4 { + reg = <3>; + A53_funnel_in_port3: endpoint { + slave-mode; + remote-endpoint = <&A53_etm3_out_port>; + }; + }; + }; + }; + + etm@22040000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0 0x22040000 0 0x1000>; + + cpu = <&A57_0>; + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; + port { + A72_57_etm0_out_port: endpoint { + remote-endpoint = <&A72_57_funnel_in_port0>; + }; + }; + }; + + etm@22140000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0 0x22140000 0 0x1000>; + + cpu = <&A57_1>; + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; + port { + A72_57_etm1_out_port: endpoint { + remote-endpoint = <&A72_57_funnel_in_port1>; + }; + }; + }; + + etm@23040000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0 0x23040000 0 0x1000>; + + cpu = <&A53_0>; + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; + port { + A53_etm0_out_port: endpoint { + remote-endpoint = <&A53_funnel_in_port0>; + }; + }; + }; + + etm@23140000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0 0x23140000 0 0x1000>; + + cpu = <&A53_1>; + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; + port { + A53_etm1_out_port: endpoint { + remote-endpoint = <&A53_funnel_in_port1>; + }; + }; + }; + + etm@23240000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0 0x23240000 0 0x1000>; + + cpu = <&A53_2>; + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; + port { + A53_etm2_out_port: endpoint { + remote-endpoint = <&A53_funnel_in_port2>; + }; + }; + }; + + etm@23340000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0 0x23340000 0 0x1000>; + + cpu = <&A53_3>; + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; + port { + A53_etm3_out_port: endpoint { + remote-endpoint = <&A53_funnel_in_port3>; + }; + }; + }; + #include "juno-base.dtsi" }; diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 861ed8acdc3f..d49e164867e1 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += bug.h generic-y += bugs.h -generic-y += checksum.h generic-y += clkdev.h generic-y += cputime.h generic-y += current.h diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index fcaf3cce639a..f8856a450852 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -245,4 +245,24 @@ lr .req x30 // link register .long \sym\()_hi32 .endm + /* + * mov_q - move an immediate constant into a 64-bit register using + * between 2 and 4 movz/movk instructions (depending on the + * magnitude and sign of the operand) + */ + .macro mov_q, reg, val + .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff) + movz \reg, :abs_g1_s:\val + .else + .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff) + movz \reg, :abs_g2_s:\val + .else + movz \reg, :abs_g3:\val + movk \reg, :abs_g2_nc:\val + .endif + movk \reg, :abs_g1_nc:\val + .endif + movk \reg, :abs_g0_nc:\val + .endm + #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h new file mode 100644 index 000000000000..09f65339d66d --- /dev/null +++ b/arch/arm64/include/asm/checksum.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2016 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_CHECKSUM_H +#define __ASM_CHECKSUM_H + +#include <linux/types.h> + +static inline __sum16 csum_fold(__wsum csum) +{ + u32 sum = (__force u32)csum; + sum += (sum >> 16) | (sum << 16); + return ~(__force __sum16)(sum >> 16); +} +#define csum_fold csum_fold + +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + __uint128_t tmp; + u64 sum; + + tmp = *(const __uint128_t *)iph; + iph += 16; + ihl -= 4; + tmp += ((tmp >> 64) | (tmp << 64)); + sum = tmp >> 64; + do { + sum += *(const u32 *)iph; + iph += 4; + } while (--ihl); + + sum += ((sum >> 32) | (sum << 32)); + return csum_fold(sum >> 32); +} +#define ip_fast_csum ip_fast_csum + +#include <asm-generic/checksum.h> + +#endif /* __ASM_CHECKSUM_H */ diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 24ed037f09fd..83d48a599f69 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) #endif -#ifdef CONFIG_COMPAT - #ifdef __AARCH64EB__ #define COMPAT_ELF_PLATFORM ("v8b") #else #define COMPAT_ELF_PLATFORM ("v8l") #endif +#ifdef CONFIG_COMPAT + #define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) /* AArch32 registers. */ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 5c25b831273d..9786f770088d 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -133,7 +133,6 @@ * Section */ #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) -#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58) #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index c3c2518eecfe..9a09ccf7122d 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -348,6 +348,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#define pmd_present(pmd) pte_present(pmd_pte(pmd)) #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) #define pmd_young(pmd) pte_young(pmd_pte(pmd)) #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) @@ -357,7 +358,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) -#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK)) +#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID)) #define __HAVE_ARCH_PMD_WRITE #define pmd_write(pmd) pte_write(pmd_pte(pmd)) @@ -396,7 +397,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot); #define pmd_none(pmd) (!pmd_val(pmd)) -#define pmd_present(pmd) (pmd_val(pmd)) #define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) @@ -595,6 +595,21 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) } #ifdef CONFIG_ARM64_HW_AFDBM +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +extern int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty); + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +static inline int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); +} +#endif + /* * Atomic pte/pmd modifications. */ @@ -647,9 +662,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, } #ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define __HAVE_ARCH_PMDP_GET_AND_CLEAR -static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, - unsigned long address, pmd_t *pmdp) +#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) { return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp)); } diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index e9e5467e0bf4..a307eb6e7fa8 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -58,6 +58,7 @@ #define COMPAT_PSR_Z_BIT 0x40000000 #define COMPAT_PSR_N_BIT 0x80000000 #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ +#define COMPAT_PSR_GE_MASK 0x000f0000 #ifdef CONFIG_CPU_BIG_ENDIAN #define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT @@ -151,35 +152,9 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) return regs->regs[0]; } -/* - * Are the current registers suitable for user mode? (used to maintain - * security in signal handlers) - */ -static inline int valid_user_regs(struct user_pt_regs *regs) -{ - if (user_mode(regs) && (regs->pstate & PSR_I_BIT) == 0) { - regs->pstate &= ~(PSR_F_BIT | PSR_A_BIT); - - /* The T bit is reserved for AArch64 */ - if (!(regs->pstate & PSR_MODE32_BIT)) - regs->pstate &= ~COMPAT_PSR_T_BIT; - - return 1; - } - - /* - * Force PSR to something logical... - */ - regs->pstate &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | \ - COMPAT_PSR_T_BIT | PSR_MODE32_BIT; - - if (!(regs->pstate & PSR_MODE32_BIT)) { - regs->pstate &= ~COMPAT_PSR_T_BIT; - regs->pstate |= PSR_MODE_EL0t; - } - - return 0; -} +/* We must avoid circular header include via sched.h */ +struct task_struct; +int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task); #define instruction_pointer(regs) ((unsigned long)(regs)->pc) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 17cf73e70752..4b2caefd3a8f 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -23,6 +23,8 @@ #include <linux/bitops.h> #include <linux/bug.h> +#include <linux/compat.h> +#include <linux/elf.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/personality.h> @@ -107,6 +109,7 @@ static const char *const compat_hwcap2_str[] = { static int c_show(struct seq_file *m, void *v) { int i, j; + bool compat = personality(current->personality) == PER_LINUX32; seq_printf(m, "Processor\t: AArch64 Processor rev %d (%s)\n", read_cpuid_id() & 15, ELF_PLATFORM); @@ -120,6 +123,9 @@ static int c_show(struct seq_file *m, void *v) * "processor". Give glibc what it expects. */ seq_printf(m, "processor\t: %d\n", i); + if (compat) + seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", + MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", loops_per_jiffy / (500000UL/HZ), @@ -132,7 +138,7 @@ static int c_show(struct seq_file *m, void *v) * software which does already (at least for 32-bit). */ seq_puts(m, "Features\t:"); - if (personality(current->personality) == PER_LINUX32) { + if (compat) { #ifdef CONFIG_COMPAT for (j = 0; compat_hwcap_str[j]; j++) if (compat_elf_hwcap & (1 << j)) diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index f82036e02485..936022f0655e 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S @@ -61,7 +61,7 @@ ENTRY(entry) */ mov x20, x0 // DTB address ldr x0, [sp, #16] // relocated _text address - movz x21, #:abs_g0:stext_offset + ldr w21, =stext_offset add x21, x0, x21 /* diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a88a15447c3b..491ad4124615 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -25,6 +25,7 @@ #include <linux/irqchip/arm-gic-v3.h> #include <asm/assembler.h> +#include <asm/boot.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/cache.h> @@ -100,8 +101,6 @@ _head: #endif #ifdef CONFIG_EFI - .globl __efistub_stext_offset - .set __efistub_stext_offset, stext - _head .align 3 pe_header: .ascii "PE" @@ -121,11 +120,11 @@ optional_header: .short 0x20b // PE32+ format .byte 0x02 // MajorLinkerVersion .byte 0x14 // MinorLinkerVersion - .long _end - stext // SizeOfCode + .long _end - efi_header_end // SizeOfCode .long 0 // SizeOfInitializedData .long 0 // SizeOfUninitializedData .long __efistub_entry - _head // AddressOfEntryPoint - .long __efistub_stext_offset // BaseOfCode + .long efi_header_end - _head // BaseOfCode extra_header_fields: .quad 0 // ImageBase @@ -142,7 +141,7 @@ extra_header_fields: .long _end - _head // SizeOfImage // Everything before the kernel image is considered part of the header - .long __efistub_stext_offset // SizeOfHeaders + .long efi_header_end - _head // SizeOfHeaders .long 0 // CheckSum .short 0xa // Subsystem (EFI application) .short 0 // DllCharacteristics @@ -186,10 +185,10 @@ section_table: .byte 0 .byte 0 .byte 0 // end of 0 padding of section name - .long _end - stext // VirtualSize - .long __efistub_stext_offset // VirtualAddress - .long _edata - stext // SizeOfRawData - .long __efistub_stext_offset // PointerToRawData + .long _end - efi_header_end // VirtualSize + .long efi_header_end - _head // VirtualAddress + .long _edata - efi_header_end // SizeOfRawData + .long efi_header_end - _head // PointerToRawData .long 0 // PointerToRelocations (0 for executables) .long 0 // PointerToLineNumbers (0 for executables) @@ -198,20 +197,23 @@ section_table: .long 0xe0500020 // Characteristics (section flags) /* - * EFI will load stext onwards at the 4k section alignment + * EFI will load .text onwards at the 4k section alignment * described in the PE/COFF header. To ensure that instruction * sequences using an adrp and a :lo12: immediate will function - * correctly at this alignment, we must ensure that stext is + * correctly at this alignment, we must ensure that .text is * placed at a 4k boundary in the Image to begin with. */ .align 12 +efi_header_end: #endif + __INIT + ENTRY(stext) bl preserve_boot_args bl el2_setup // Drop to EL1, w20=cpu_boot_mode - mov x23, xzr // KASLR offset, defaults to 0 adrp x24, __PHYS_OFFSET + and x23, x24, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 bl set_cpu_boot_mode_flag bl __create_page_tables // x25=TTBR0, x26=TTBR1 /* @@ -220,13 +222,11 @@ ENTRY(stext) * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ - ldr x27, 0f // address to jump to after + bl __cpu_setup // initialise processor + adr_l x27, __primary_switch // address to jump to after // MMU has been enabled - adr_l lr, __enable_mmu // return (PIC) address - b __cpu_setup // initialise processor + b __enable_mmu ENDPROC(stext) - .align 3 -0: .quad __mmap_switched - (_head - TEXT_OFFSET) + KIMAGE_VADDR /* * Preserve the arguments passed by the bootloader in x0 .. x3 @@ -336,7 +336,7 @@ __create_page_tables: cmp x0, x6 b.lo 1b - ldr x7, =SWAPPER_MM_MMUFLAGS + mov x7, SWAPPER_MM_MMUFLAGS /* * Create the identity mapping. @@ -392,12 +392,13 @@ __create_page_tables: * Map the kernel image (starting with PHYS_OFFSET). */ mov x0, x26 // swapper_pg_dir - ldr x5, =KIMAGE_VADDR + mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text) add x5, x5, x23 // add KASLR displacement create_pgd_entry x0, x5, x3, x6 - ldr w6, kernel_img_size - add x6, x6, x5 - mov x3, x24 // phys offset + adrp x6, _end // runtime __pa(_end) + adrp x3, _text // runtime __pa(_text) + sub x6, x6, x3 // _end - _text + add x6, x6, x5 // runtime __va(_end) create_block_map x0, x7, x3, x5, x6 /* @@ -412,16 +413,13 @@ __create_page_tables: ret x28 ENDPROC(__create_page_tables) - -kernel_img_size: - .long _end - (_head - TEXT_OFFSET) .ltorg /* * The following fragment of code is executed with the MMU enabled. */ .set initial_sp, init_thread_union + THREAD_START_SP -__mmap_switched: +__primary_switched: mov x28, lr // preserve LR adr_l x8, vectors // load VBAR_EL1 with virtual msr vbar_el1, x8 // vector table address @@ -435,44 +433,6 @@ __mmap_switched: bl __pi_memset dsb ishst // Make zero page visible to PTW -#ifdef CONFIG_RELOCATABLE - - /* - * Iterate over each entry in the relocation table, and apply the - * relocations in place. - */ - adr_l x8, __dynsym_start // start of symbol table - adr_l x9, __reloc_start // start of reloc table - adr_l x10, __reloc_end // end of reloc table - -0: cmp x9, x10 - b.hs 2f - ldp x11, x12, [x9], #24 - ldr x13, [x9, #-8] - cmp w12, #R_AARCH64_RELATIVE - b.ne 1f - add x13, x13, x23 // relocate - str x13, [x11, x23] - b 0b - -1: cmp w12, #R_AARCH64_ABS64 - b.ne 0b - add x12, x12, x12, lsl #1 // symtab offset: 24x top word - add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word - ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx - ldr x15, [x12, #8] // Elf64_Sym::st_value - cmp w14, #-0xf // SHN_ABS (0xfff1) ? - add x14, x15, x23 // relocate - csel x15, x14, x15, ne - add x15, x13, x15 - str x15, [x11, x23] - b 0b - -2: adr_l x8, kimage_vaddr // make relocated kimage_vaddr - dc cvac, x8 // value visible to secondaries - dsb sy // with MMU off -#endif - adr_l sp, initial_sp, x4 mov x4, sp and x4, x4, #~(THREAD_SIZE - 1) @@ -488,17 +448,19 @@ __mmap_switched: bl kasan_early_init #endif #ifdef CONFIG_RANDOMIZE_BASE - cbnz x23, 0f // already running randomized? + tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? + b.ne 0f mov x0, x21 // pass FDT address in x0 + mov x1, x23 // pass modulo offset in x1 bl kaslr_early_init // parse FDT for KASLR options cbz x0, 0f // KASLR disabled? just proceed - mov x23, x0 // record KASLR offset + orr x23, x23, x0 // record KASLR offset ret x28 // we must enable KASLR, return // to __enable_mmu() 0: #endif b start_kernel -ENDPROC(__mmap_switched) +ENDPROC(__primary_switched) /* * end early head section, begin head code that is also used for @@ -613,7 +575,7 @@ ENDPROC(el2_setup) * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed * in x20. See arch/arm64/include/asm/virt.h for more info. */ -ENTRY(set_cpu_boot_mode_flag) +set_cpu_boot_mode_flag: adr_l x1, __boot_cpu_mode cmp w20, #BOOT_CPU_MODE_EL2 b.ne 1f @@ -646,7 +608,7 @@ ENTRY(secondary_holding_pen) bl el2_setup // Drop to EL1, w20=cpu_boot_mode bl set_cpu_boot_mode_flag mrs x0, mpidr_el1 - ldr x1, =MPIDR_HWID_BITMASK + mov_q x1, MPIDR_HWID_BITMASK and x0, x0, x1 adr_l x3, secondary_holding_pen_release pen: ldr x4, [x3] @@ -666,7 +628,7 @@ ENTRY(secondary_entry) b secondary_startup ENDPROC(secondary_entry) -ENTRY(secondary_startup) +secondary_startup: /* * Common entry point for secondary CPUs. */ @@ -674,14 +636,11 @@ ENTRY(secondary_startup) adrp x26, swapper_pg_dir bl __cpu_setup // initialise processor - ldr x8, kimage_vaddr - ldr w9, 0f - sub x27, x8, w9, sxtw // address to jump to after enabling the MMU + adr_l x27, __secondary_switch // address to jump to after enabling the MMU b __enable_mmu ENDPROC(secondary_startup) -0: .long (_text - TEXT_OFFSET) - __secondary_switched -ENTRY(__secondary_switched) +__secondary_switched: adr_l x5, vectors msr vbar_el1, x5 isb @@ -743,7 +702,6 @@ __enable_mmu: ic iallu // flush instructions fetched dsb nsh // via old mapping isb - add x27, x27, x23 // relocated __mmap_switched #endif br x27 ENDPROC(__enable_mmu) @@ -752,3 +710,53 @@ __no_granule_support: wfe b __no_granule_support ENDPROC(__no_granule_support) + +__primary_switch: +#ifdef CONFIG_RELOCATABLE + /* + * Iterate over each entry in the relocation table, and apply the + * relocations in place. + */ + ldr w8, =__dynsym_offset // offset to symbol table + ldr w9, =__rela_offset // offset to reloc table + ldr w10, =__rela_size // size of reloc table + + mov_q x11, KIMAGE_VADDR // default virtual offset + add x11, x11, x23 // actual virtual offset + add x8, x8, x11 // __va(.dynsym) + add x9, x9, x11 // __va(.rela) + add x10, x9, x10 // __va(.rela) + sizeof(.rela) + +0: cmp x9, x10 + b.hs 2f + ldp x11, x12, [x9], #24 + ldr x13, [x9, #-8] + cmp w12, #R_AARCH64_RELATIVE + b.ne 1f + add x13, x13, x23 // relocate + str x13, [x11, x23] + b 0b + +1: cmp w12, #R_AARCH64_ABS64 + b.ne 0b + add x12, x12, x12, lsl #1 // symtab offset: 24x top word + add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word + ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx + ldr x15, [x12, #8] // Elf64_Sym::st_value + cmp w14, #-0xf // SHN_ABS (0xfff1) ? + add x14, x15, x23 // relocate + csel x15, x14, x15, ne + add x15, x13, x15 + str x15, [x11, x23] + b 0b + +2: +#endif + ldr x8, =__primary_switched + br x8 +ENDPROC(__primary_switch) + +__secondary_switch: + ldr x8, =__secondary_switched + br x8 +ENDPROC(__secondary_switch) diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index db1bf57948f1..f0be31f1dd45 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h @@ -73,6 +73,8 @@ #ifdef CONFIG_EFI +__efistub_stext_offset = stext - _text; + /* * Prevent the symbol aliases below from being emitted into the kallsyms * table, by forcing them to be absolute symbols (which are conveniently diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 582983920054..b05469173ba5 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -74,7 +74,7 @@ extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, * containing function pointers) to be reinitialized, and zero-initialized * .bss variables will be reset to 0. */ -u64 __init kaslr_early_init(u64 dt_phys) +u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset) { void *fdt; u64 seed, offset, mask, module_range; @@ -132,8 +132,8 @@ u64 __init kaslr_early_init(u64 dt_phys) * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this * happens, increase the KASLR offset by the size of the kernel image. */ - if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) != - (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT)) + if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) != + (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) offset = (offset + (u64)(_end - _text)) & mask; if (IS_ENABLED(CONFIG_KASAN)) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index ff7f13239515..fc779ec6f051 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -39,6 +39,7 @@ #include <linux/elf.h> #include <asm/compat.h> +#include <asm/cpufeature.h> #include <asm/debug-monitors.h> #include <asm/pgtable.h> #include <asm/syscall.h> @@ -500,7 +501,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, if (ret) return ret; - if (!valid_user_regs(&newregs)) + if (!valid_user_regs(&newregs, target)) return -EINVAL; task_pt_regs(target)->user_regs = newregs; @@ -770,7 +771,7 @@ static int compat_gpr_set(struct task_struct *target, } - if (valid_user_regs(&newregs.user_regs)) + if (valid_user_regs(&newregs.user_regs, target)) *task_pt_regs(target) = newregs; else ret = -EINVAL; @@ -1272,3 +1273,79 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs) if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); } + +/* + * Bits which are always architecturally RES0 per ARM DDI 0487A.h + * Userspace cannot use these until they have an architectural meaning. + * We also reserve IL for the kernel; SS is handled dynamically. + */ +#define SPSR_EL1_AARCH64_RES0_BITS \ + (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \ + GENMASK_ULL(5, 5)) +#define SPSR_EL1_AARCH32_RES0_BITS \ + (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20)) + +static int valid_compat_regs(struct user_pt_regs *regs) +{ + regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; + + if (!system_supports_mixed_endian_el0()) { + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + regs->pstate |= COMPAT_PSR_E_BIT; + else + regs->pstate &= ~COMPAT_PSR_E_BIT; + } + + if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && + (regs->pstate & COMPAT_PSR_A_BIT) == 0 && + (regs->pstate & COMPAT_PSR_I_BIT) == 0 && + (regs->pstate & COMPAT_PSR_F_BIT) == 0) { + return 1; + } + + /* + * Force PSR to a valid 32-bit EL0t, preserving the same bits as + * arch/arm. + */ + regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT | + COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT | + COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK | + COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT | + COMPAT_PSR_T_BIT; + regs->pstate |= PSR_MODE32_BIT; + + return 0; +} + +static int valid_native_regs(struct user_pt_regs *regs) +{ + regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; + + if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && + (regs->pstate & PSR_D_BIT) == 0 && + (regs->pstate & PSR_A_BIT) == 0 && + (regs->pstate & PSR_I_BIT) == 0 && + (regs->pstate & PSR_F_BIT) == 0) { + return 1; + } + + /* Force PSR to a valid 64-bit EL0t */ + regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; + + return 0; +} + +/* + * Are the current registers suitable for user mode? (used to maintain + * security in signal handlers) + */ +int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) +{ + if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) + regs->pstate &= ~DBG_SPSR_SS; + + if (is_compat_thread(task_thread_info(task))) + return valid_compat_regs(regs); + else + return valid_native_regs(regs); +} diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index e18c48cb6db1..a8eafdbc7cb8 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -115,7 +115,7 @@ static int restore_sigframe(struct pt_regs *regs, */ regs->syscallno = ~0UL; - err |= !valid_user_regs(®s->user_regs); + err |= !valid_user_regs(®s->user_regs, current); if (err == 0) { struct fpsimd_context *fpsimd_ctx = @@ -307,7 +307,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) /* * Check that the resulting registers are actually sane. */ - ret |= !valid_user_regs(®s->user_regs); + ret |= !valid_user_regs(®s->user_regs, current); /* * Fast forward the stepping logic so we step into the signal diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 71ef6dc89ae5..107335637390 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -356,7 +356,7 @@ static int compat_restore_sigframe(struct pt_regs *regs, */ regs->syscallno = ~0UL; - err |= !valid_user_regs(®s->user_regs); + err |= !valid_user_regs(®s->user_regs, current); aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace; if (err == 0) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 6b562a318f84..8b4623eeb62d 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -64,14 +64,19 @@ PECOFF_FILE_ALIGNMENT = 0x200; #endif #if defined(CONFIG_DEBUG_ALIGN_RODATA) -#define ALIGN_DEBUG_RO . = ALIGN(1<<SECTION_SHIFT); -#define ALIGN_DEBUG_RO_MIN(min) ALIGN_DEBUG_RO -#elif defined(CONFIG_DEBUG_RODATA) -#define ALIGN_DEBUG_RO . = ALIGN(1<<PAGE_SHIFT); -#define ALIGN_DEBUG_RO_MIN(min) ALIGN_DEBUG_RO +/* + * 4 KB granule: 1 level 2 entry + * 16 KB granule: 128 level 3 entries, with contiguous bit + * 64 KB granule: 32 level 3 entries, with contiguous bit + */ +#define SEGMENT_ALIGN SZ_2M #else -#define ALIGN_DEBUG_RO -#define ALIGN_DEBUG_RO_MIN(min) . = ALIGN(min); +/* + * 4 KB granule: 16 level 3 entries, with contiguous bit + * 16 KB granule: 4 level 3 entries, without contiguous bit + * 64 KB granule: 1 level 3 entry + */ +#define SEGMENT_ALIGN SZ_64K #endif SECTIONS @@ -97,7 +102,6 @@ SECTIONS _text = .; HEAD_TEXT } - ALIGN_DEBUG_RO_MIN(PAGE_SIZE) .text : { /* Real text segment */ _stext = .; /* Text and read-only data */ __exception_text_start = .; @@ -115,11 +119,12 @@ SECTIONS *(.got) /* Global offset table */ } - RO_DATA(PAGE_SIZE) - EXCEPTION_TABLE(8) + . = ALIGN(SEGMENT_ALIGN); + RO_DATA(PAGE_SIZE) /* everything from this point to */ + EXCEPTION_TABLE(8) /* _etext will be marked RO NX */ NOTES - ALIGN_DEBUG_RO_MIN(PAGE_SIZE) + . = ALIGN(SEGMENT_ALIGN); _etext = .; /* End of text and rodata section */ __init_begin = .; @@ -152,12 +157,9 @@ SECTIONS *(.altinstr_replacement) } .rela : ALIGN(8) { - __reloc_start = .; *(.rela .rela*) - __reloc_end = .; } .dynsym : ALIGN(8) { - __dynsym_start = .; *(.dynsym) } .dynstr : { @@ -167,7 +169,11 @@ SECTIONS *(.hash) } - . = ALIGN(PAGE_SIZE); + __rela_offset = ADDR(.rela) - KIMAGE_VADDR; + __rela_size = SIZEOF(.rela); + __dynsym_offset = ADDR(.dynsym) - KIMAGE_VADDR; + + . = ALIGN(SEGMENT_ALIGN); __init_end = .; _data = .; diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 648112e90ed5..3972e65fbd5a 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -130,7 +130,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT); if (!is_iabt) - esr |= ESR_ELx_EC_DABT_LOW; + esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT; vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT; } diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 69079e5bfc84..55b944a913cb 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -84,6 +84,56 @@ void show_pte(struct mm_struct *mm, unsigned long addr) printk("\n"); } +#ifdef CONFIG_ARM64_HW_AFDBM +/* + * This function sets the access flags (dirty, accessed), as well as write + * permission, and only to a more permissive setting. + * + * It needs to cope with hardware update of the accessed/dirty state by other + * agents in the system and can safely skip the __sync_icache_dcache() call as, + * like set_pte_at(), the PTE is never changed from no-exec to exec here. + * + * Returns whether or not the PTE actually changed. + */ +int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty) +{ + pteval_t old_pteval; + unsigned int tmp; + + if (pte_same(*ptep, entry)) + return 0; + + /* only preserve the access flags and write permission */ + pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY; + + /* + * PTE_RDONLY is cleared by default in the asm below, so set it in + * back if necessary (read-only or clean PTE). + */ + if (!pte_write(entry) || !pte_sw_dirty(entry)) + pte_val(entry) |= PTE_RDONLY; + + /* + * Setting the flags must be done atomically to avoid racing with the + * hardware update of the access/dirty state. + */ + asm volatile("// ptep_set_access_flags\n" + " prfm pstl1strm, %2\n" + "1: ldxr %0, %2\n" + " and %0, %0, %3 // clear PTE_RDONLY\n" + " orr %0, %0, %4 // set flags\n" + " stxr %w1, %0, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)) + : "L" (~PTE_RDONLY), "r" (pte_val(entry))); + + flush_tlb_fix_spurious_fault(vma, address); + return 1; +} +#endif + /* * The kernel tried to access some page that wasn't present. */ diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 0d5b0d0578b3..4a49093dddd7 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -372,6 +372,7 @@ void __init mem_init(void) " vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n" " .init : 0x%p" " - 0x%p" " (%6ld KB)\n" " .text : 0x%p" " - 0x%p" " (%6ld KB)\n" + " .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n" " .data : 0x%p" " - 0x%p" " (%6ld KB)\n" #ifdef CONFIG_SPARSEMEM_VMEMMAP " vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n" @@ -386,7 +387,8 @@ void __init mem_init(void) MLM(MODULES_VADDR, MODULES_END), MLG(VMALLOC_START, VMALLOC_END), MLK_ROUNDUP(__init_begin, __init_end), - MLK_ROUNDUP(_text, _etext), + MLK_ROUNDUP(_text, __start_rodata), + MLK_ROUNDUP(__start_rodata, _etext), MLK_ROUNDUP(_sdata, _edata), #ifdef CONFIG_SPARSEMEM_VMEMMAP MLG(VMEMMAP_START, diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 62096a7e047a..a8a6f91343cc 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -391,7 +391,7 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt, static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end) { - unsigned long kernel_start = __pa(_stext); + unsigned long kernel_start = __pa(_text); unsigned long kernel_end = __pa(_etext); /* @@ -423,7 +423,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end early_pgtable_alloc); /* - * Map the linear alias of the [_stext, _etext) interval as + * Map the linear alias of the [_text, _etext) interval as * read-only/non-executable. This makes the contents of the * region accessible to subsystems such as hibernate, but * protects it from inadvertent modification or execution. @@ -451,12 +451,18 @@ static void __init map_mem(pgd_t *pgd) void mark_rodata_ro(void) { - if (!IS_ENABLED(CONFIG_DEBUG_RODATA)) - return; + unsigned long section_size; - create_mapping_late(__pa(_stext), (unsigned long)_stext, - (unsigned long)_etext - (unsigned long)_stext, - PAGE_KERNEL_ROX); + section_size = (unsigned long)__start_rodata - (unsigned long)_text; + create_mapping_late(__pa(_text), (unsigned long)_text, + section_size, PAGE_KERNEL_ROX); + /* + * mark .rodata as read only. Use _etext rather than __end_rodata to + * cover NOTES and EXCEPTION_TABLE. + */ + section_size = (unsigned long)_etext - (unsigned long)__start_rodata; + create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata, + section_size, PAGE_KERNEL_RO); } void fixup_init(void) @@ -469,8 +475,8 @@ void fixup_init(void) unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); } -static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end, - pgprot_t prot, struct vm_struct *vma) +static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, + pgprot_t prot, struct vm_struct *vma) { phys_addr_t pa_start = __pa(va_start); unsigned long size = va_end - va_start; @@ -495,12 +501,13 @@ static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end, */ static void __init map_kernel(pgd_t *pgd) { - static struct vm_struct vmlinux_text, vmlinux_init, vmlinux_data; + static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data; - map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC, &vmlinux_text); - map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC, - &vmlinux_init); - map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data); + map_kernel_segment(pgd, _text, __start_rodata, PAGE_KERNEL_EXEC, &vmlinux_text); + map_kernel_segment(pgd, __start_rodata, _etext, PAGE_KERNEL, &vmlinux_rodata); + map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC, + &vmlinux_init); + map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data); if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) { /* diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c index b955fafc58ba..d1adc59af5bf 100644 --- a/arch/mips/ath79/early_printk.c +++ b/arch/mips/ath79/early_printk.c @@ -31,13 +31,15 @@ static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val) } while (1); } +#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) + static void prom_putchar_ar71xx(unsigned char ch) { void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE)); - prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE); + prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY); __raw_writel(ch, base + UART_TX * 4); - prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE); + prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY); } static void prom_putchar_ar933x(unsigned char ch) diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 867f924b05c7..e689b894353c 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -298,21 +298,21 @@ .set pop .endm - .macro copy_u_w ws, n + .macro copy_s_w ws, n .set push .set mips32r2 .set fp=64 .set msa - copy_u.w $1, $w\ws[\n] + copy_s.w $1, $w\ws[\n] .set pop .endm - .macro copy_u_d ws, n + .macro copy_s_d ws, n .set push .set mips64r2 .set fp=64 .set msa - copy_u.d $1, $w\ws[\n] + copy_s.d $1, $w\ws[\n] .set pop .endm @@ -346,8 +346,8 @@ #define STH_MSA_INSN 0x5800081f #define STW_MSA_INSN 0x5800082f #define STD_MSA_INSN 0x5800083f -#define COPY_UW_MSA_INSN 0x58f00056 -#define COPY_UD_MSA_INSN 0x58f80056 +#define COPY_SW_MSA_INSN 0x58b00056 +#define COPY_SD_MSA_INSN 0x58b80056 #define INSERT_W_MSA_INSN 0x59300816 #define INSERT_D_MSA_INSN 0x59380816 #else @@ -361,8 +361,8 @@ #define STH_MSA_INSN 0x78000825 #define STW_MSA_INSN 0x78000826 #define STD_MSA_INSN 0x78000827 -#define COPY_UW_MSA_INSN 0x78f00059 -#define COPY_UD_MSA_INSN 0x78f80059 +#define COPY_SW_MSA_INSN 0x78b00059 +#define COPY_SD_MSA_INSN 0x78b80059 #define INSERT_W_MSA_INSN 0x79300819 #define INSERT_D_MSA_INSN 0x79380819 #endif @@ -393,7 +393,7 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word LDB_MSA_INSN | (\wd << 6) .set pop .endm @@ -402,7 +402,7 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word LDH_MSA_INSN | (\wd << 6) .set pop .endm @@ -411,7 +411,7 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word LDW_MSA_INSN | (\wd << 6) .set pop .endm @@ -420,7 +420,7 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word LDD_MSA_INSN | (\wd << 6) .set pop .endm @@ -429,7 +429,7 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word STB_MSA_INSN | (\wd << 6) .set pop .endm @@ -438,7 +438,7 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word STH_MSA_INSN | (\wd << 6) .set pop .endm @@ -447,7 +447,7 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word STW_MSA_INSN | (\wd << 6) .set pop .endm @@ -456,26 +456,26 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word STD_MSA_INSN | (\wd << 6) .set pop .endm - .macro copy_u_w ws, n + .macro copy_s_w ws, n .set push .set noat SET_HARDFLOAT .insn - .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11) + .word COPY_SW_MSA_INSN | (\n << 16) | (\ws << 11) .set pop .endm - .macro copy_u_d ws, n + .macro copy_s_d ws, n .set push .set noat SET_HARDFLOAT .insn - .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11) + .word COPY_SD_MSA_INSN | (\n << 16) | (\ws << 11) .set pop .endm diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h index 723229f4cf27..176de586a71a 100644 --- a/arch/mips/include/asm/cacheflush.h +++ b/arch/mips/include/asm/cacheflush.h @@ -51,7 +51,6 @@ extern void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); extern void __flush_dcache_page(struct page *page); -extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page); #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 static inline void flush_dcache_page(struct page *page) @@ -77,11 +76,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma, static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) { - if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) && - Page_dcache_dirty(page)) { - __flush_icache_page(vma, page); - ClearPageDcacheDirty(page); - } } extern void (*flush_icache_range)(unsigned long start, unsigned long end); diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 6ded8d347af9..dd7cee795709 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -372,6 +372,7 @@ struct kvm_mips_tlb { #define KVM_MIPS_GUEST_TLB_SIZE 64 struct kvm_vcpu_arch { void *host_ebase, *guest_ebase; + int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); unsigned long host_stack; unsigned long host_gp; @@ -784,7 +785,7 @@ extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu); void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count); -void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare); +void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack); void kvm_mips_init_count(struct kvm_vcpu *vcpu); int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl); int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume); diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h index bbb85fe21642..6e4effa6f626 100644 --- a/arch/mips/include/asm/msa.h +++ b/arch/mips/include/asm/msa.h @@ -147,6 +147,19 @@ static inline void restore_msa(struct task_struct *t) _restore_msa(t); } +static inline void init_msa_upper(void) +{ + /* + * Check cpu_has_msa only if it's a constant. This will allow the + * compiler to optimise out code for CPUs without MSA without adding + * an extra redundant check for CPUs with MSA. + */ + if (__builtin_constant_p(cpu_has_msa) && !cpu_has_msa) + return; + + _init_msa_upper(); +} + #ifdef TOOLCHAIN_SUPPORTS_MSA #define __BUILD_MSA_CTL_REG(name, cs) \ diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 18826aa15a7c..4e68c644acc5 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -127,10 +127,14 @@ do { \ } \ } while(0) +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval); + #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) +#define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC) static inline void set_pte(pte_t *ptep, pte_t pte) { @@ -148,7 +152,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte) buddy->pte_high |= _PAGE_GLOBAL; } } -#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -166,6 +169,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) +#define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC) /* * Certain architectures need to do special things when pte's @@ -218,7 +222,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) } #endif } -#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -234,6 +237,22 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt } #endif +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + extern void __update_cache(unsigned long address, pte_t pte); + + if (!pte_present(pteval)) + goto cache_sync_done; + + if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval))) + goto cache_sync_done; + + __update_cache(addr, pteval); +cache_sync_done: + set_pte(ptep, pteval); +} + /* * (pmds are folded into puds so this doesn't get actually called, * but the define is needed for a generic inline function.) @@ -430,15 +449,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte); -extern void __update_cache(struct vm_area_struct *vma, unsigned long address, - pte_t pte); static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { pte_t pte = *ptep; __update_tlb(vma, address, pte); - __update_cache(vma, address, pte); } static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 3f832c3dd8f5..041153f5cf93 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -45,7 +45,7 @@ extern unsigned int vced_count, vcei_count; * User space process size: 2GB. This is hardcoded into a few places, * so don't change it unless you know what you are doing. */ -#define TASK_SIZE 0x7fff8000UL +#define TASK_SIZE 0x80000000UL #endif #define STACK_TOP_MAX TASK_SIZE diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index 28b5d84a5022..ebb5c0f2f90d 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -105,7 +105,7 @@ do { \ __clear_software_ll_bit(); \ if (cpu_has_userlocal) \ write_c0_userlocal(task_thread_info(next)->tp_value); \ - __restore_watch(); \ + __restore_watch(next); \ (last) = resume(prev, next, task_thread_info(next)); \ } while (0) diff --git a/arch/mips/include/asm/watch.h b/arch/mips/include/asm/watch.h index 20126ec79359..6ffe3eadf105 100644 --- a/arch/mips/include/asm/watch.h +++ b/arch/mips/include/asm/watch.h @@ -12,21 +12,21 @@ #include <asm/mipsregs.h> -void mips_install_watch_registers(void); +void mips_install_watch_registers(struct task_struct *t); void mips_read_watch_registers(void); void mips_clear_watch_registers(void); void mips_probe_watch_registers(struct cpuinfo_mips *c); #ifdef CONFIG_HARDWARE_WATCHPOINTS -#define __restore_watch() do { \ +#define __restore_watch(task) do { \ if (unlikely(test_bit(TIF_LOAD_WATCH, \ - ¤t_thread_info()->flags))) { \ - mips_install_watch_registers(); \ + &task_thread_info(task)->flags))) { \ + mips_install_watch_registers(task); \ } \ } while (0) #else -#define __restore_watch() do {} while (0) +#define __restore_watch(task) do {} while (0) #endif #endif /* _ASM_WATCH_H */ diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h index 2cb7fdead570..e2b5337e840f 100644 --- a/arch/mips/include/uapi/asm/siginfo.h +++ b/arch/mips/include/uapi/asm/siginfo.h @@ -28,7 +28,7 @@ #define __ARCH_SIGSYS -#include <uapi/asm-generic/siginfo.h> +#include <asm-generic/siginfo.h> /* We can't use generic siginfo_t, because our si_code and si_errno are swapped */ typedef struct siginfo { @@ -42,13 +42,13 @@ typedef struct siginfo { /* kill() */ struct { - pid_t _pid; /* sender's pid */ + __kernel_pid_t _pid; /* sender's pid */ __ARCH_SI_UID_T _uid; /* sender's uid */ } _kill; /* POSIX.1b timers */ struct { - timer_t _tid; /* timer id */ + __kernel_timer_t _tid; /* timer id */ int _overrun; /* overrun count */ char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)]; sigval_t _sigval; /* same as below */ @@ -57,26 +57,26 @@ typedef struct siginfo { /* POSIX.1b signals */ struct { - pid_t _pid; /* sender's pid */ + __kernel_pid_t _pid; /* sender's pid */ __ARCH_SI_UID_T _uid; /* sender's uid */ sigval_t _sigval; } _rt; /* SIGCHLD */ struct { - pid_t _pid; /* which child */ + __kernel_pid_t _pid; /* which child */ __ARCH_SI_UID_T _uid; /* sender's uid */ int _status; /* exit code */ - clock_t _utime; - clock_t _stime; + __kernel_clock_t _utime; + __kernel_clock_t _stime; } _sigchld; /* IRIX SIGCHLD */ struct { - pid_t _pid; /* which child */ - clock_t _utime; + __kernel_pid_t _pid; /* which child */ + __kernel_clock_t _utime; int _status; /* exit code */ - clock_t _stime; + __kernel_clock_t _stime; } _irix_sigchld; /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ @@ -118,6 +118,4 @@ typedef struct siginfo { #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */ #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */ -#include <asm-generic/siginfo.h> - #endif /* _UAPI_ASM_SIGINFO_H */ diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index 1f5aac7f9ec3..4674a74a08b5 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -28,6 +28,7 @@ #include <asm/inst.h> #include <asm/mips-r2-to-r6-emul.h> #include <asm/local.h> +#include <asm/mipsregs.h> #include <asm/ptrace.h> #include <asm/uaccess.h> @@ -1251,10 +1252,10 @@ fpu_emul: " j 10b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -1326,10 +1327,10 @@ fpu_emul: " j 10b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -1397,10 +1398,10 @@ fpu_emul: " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -1467,10 +1468,10 @@ fpu_emul: " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -1582,14 +1583,14 @@ fpu_emul: " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" - " .word 5b,8b\n" - " .word 6b,8b\n" - " .word 7b,8b\n" - " .word 0b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" + STR(PTR) " 5b,8b\n" + STR(PTR) " 6b,8b\n" + STR(PTR) " 7b,8b\n" + STR(PTR) " 0b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -1701,14 +1702,14 @@ fpu_emul: " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" - " .word 5b,8b\n" - " .word 6b,8b\n" - " .word 7b,8b\n" - " .word 0b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" + STR(PTR) " 5b,8b\n" + STR(PTR) " 6b,8b\n" + STR(PTR) " 7b,8b\n" + STR(PTR) " 0b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -1820,14 +1821,14 @@ fpu_emul: " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" - " .word 5b,8b\n" - " .word 6b,8b\n" - " .word 7b,8b\n" - " .word 0b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" + STR(PTR) " 5b,8b\n" + STR(PTR) " 6b,8b\n" + STR(PTR) " 7b,8b\n" + STR(PTR) " 0b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -1938,14 +1939,14 @@ fpu_emul: " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" - " .word 5b,8b\n" - " .word 6b,8b\n" - " .word 7b,8b\n" - " .word 0b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" + STR(PTR) " 5b,8b\n" + STR(PTR) " 6b,8b\n" + STR(PTR) " 7b,8b\n" + STR(PTR) " 0b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -2000,7 +2001,7 @@ fpu_emul: "j 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" - ".word 1b, 3b\n" + STR(PTR) " 1b,3b\n" ".previous\n" : "=&r"(res), "+&r"(err) : "r"(vaddr), "i"(SIGSEGV) @@ -2058,7 +2059,7 @@ fpu_emul: "j 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" - ".word 1b, 3b\n" + STR(PTR) " 1b,3b\n" ".previous\n" : "+&r"(res), "+&r"(err) : "r"(vaddr), "i"(SIGSEGV)); @@ -2119,7 +2120,7 @@ fpu_emul: "j 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" - ".word 1b, 3b\n" + STR(PTR) " 1b,3b\n" ".previous\n" : "=&r"(res), "+&r"(err) : "r"(vaddr), "i"(SIGSEGV) @@ -2182,7 +2183,7 @@ fpu_emul: "j 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" - ".word 1b, 3b\n" + STR(PTR) " 1b,3b\n" ".previous\n" : "+&r"(res), "+&r"(err) : "r"(vaddr), "i"(SIGSEGV)); diff --git a/arch/mips/kernel/pm.c b/arch/mips/kernel/pm.c index fefdf39d3df3..dc814892133c 100644 --- a/arch/mips/kernel/pm.c +++ b/arch/mips/kernel/pm.c @@ -56,7 +56,7 @@ static void mips_cpu_restore(void) write_c0_userlocal(current_thread_info()->tp_value); /* Restore watch registers */ - __restore_watch(); + __restore_watch(current); } /** diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index f2975d4d1e44..89847bee2b53 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -457,7 +457,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page, *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) { regs = (struct pt_regs *)*sp; pc = regs->cp0_epc; - if (__kernel_text_address(pc)) { + if (!user_mode(regs) && __kernel_text_address(pc)) { *sp = regs->regs[29]; *ra = regs->regs[31]; return pc; @@ -603,6 +603,9 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) return -EOPNOTSUPP; + /* Proceed with the mode switch */ + preempt_disable(); + /* Save FP & vector context, then disable FPU & MSA */ if (task->signal == current->signal) lose_fpu(1); @@ -661,6 +664,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) /* Allow threads to use FP again */ atomic_set(&task->mm->context.fp_mode_switching, 0); + preempt_enable(); return 0; } diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 4f0ac78d17f1..74d581569778 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -57,8 +57,7 @@ static void init_fp_ctx(struct task_struct *target) /* Begin with data registers set to all 1s... */ memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr)); - /* ...and FCSR zeroed */ - target->thread.fpu.fcr31 = 0; + /* FCSR has been preset by `mips_set_personality_nan'. */ /* * Record that the target has "used" math, such that the context @@ -80,6 +79,22 @@ void ptrace_disable(struct task_struct *child) } /* + * Poke at FCSR according to its mask. Don't set the cause bits as + * this is currently not handled correctly in FP context restoration + * and will cause an oops if a corresponding enable bit is set. + */ +static void ptrace_setfcr31(struct task_struct *child, u32 value) +{ + u32 fcr31; + u32 mask; + + value &= ~FPU_CSR_ALL_X; + fcr31 = child->thread.fpu.fcr31; + mask = boot_cpu_data.fpu_msk31; + child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); +} + +/* * Read a general register set. We always use the 64-bit format, even * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. * Registers are sign extended to fill the available space. @@ -159,9 +174,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) { union fpureg *fregs; u64 fpr_val; - u32 fcr31; u32 value; - u32 mask; int i; if (!access_ok(VERIFY_READ, data, 33 * 8)) @@ -176,9 +189,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) } __get_user(value, data + 64); - fcr31 = child->thread.fpu.fcr31; - mask = boot_cpu_data.fpu_msk31; - child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); + ptrace_setfcr31(child, value); /* FIR may not be written. */ @@ -808,7 +819,7 @@ long arch_ptrace(struct task_struct *child, long request, break; #endif case FPC_CSR: - child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X; + ptrace_setfcr31(child, data); break; case DSP_BASE ... DSP_BASE + 5: { dspreg_t *dregs; diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index f09546ee2cdc..bc74485ec805 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -244,17 +244,17 @@ LEAF(\name) .set push .set noat #ifdef CONFIG_64BIT - copy_u_d \wr, 1 + copy_s_d \wr, 1 EX sd $1, \off(\base) #elif defined(CONFIG_CPU_LITTLE_ENDIAN) - copy_u_w \wr, 2 + copy_s_w \wr, 2 EX sw $1, \off(\base) - copy_u_w \wr, 3 + copy_s_w \wr, 3 EX sw $1, (\off+4)(\base) #else /* CONFIG_CPU_BIG_ENDIAN */ - copy_u_w \wr, 2 + copy_s_w \wr, 2 EX sw $1, (\off+4)(\base) - copy_u_w \wr, 3 + copy_s_w \wr, 3 EX sw $1, \off(\base) #endif .set pop diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 66aac55df349..8acae316f26b 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -706,6 +706,9 @@ static void __init arch_mem_init(char **cmdline_p) for_each_memblock(reserved, reg) if (reg->size != 0) reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); + + reserve_bootmem_region(__pa_symbol(&__nosave_begin), + __pa_symbol(&__nosave_end)); /* Reserve for hibernation */ } static void __init resource_init(void) diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index bf792e2839a6..9e35b6b26aa8 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -195,6 +195,9 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size) unsigned int csr; int i, err; + if (!config_enabled(CONFIG_CPU_HAS_MSA)) + return SIGSYS; + if (size != sizeof(*msa)) return -EINVAL; @@ -398,8 +401,8 @@ int protected_restore_fp_context(void __user *sc) } fp_done: - if (used & USED_EXTCONTEXT) - err |= restore_extcontext(sc_to_extcontext(sc)); + if (!err && (used & USED_EXTCONTEXT)) + err = restore_extcontext(sc_to_extcontext(sc)); return err ?: sig; } @@ -767,15 +770,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) sigset_t *oldset = sigmask_to_save(); int ret; struct mips_abi *abi = current->thread.abi; -#ifdef CONFIG_CPU_MICROMIPS - void *vdso; - unsigned long tmp = (unsigned long)current->mm->context.vdso; - - set_isa16_mode(tmp); - vdso = (void *)tmp; -#else void *vdso = current->mm->context.vdso; -#endif if (regs->regs[0]) { switch(regs->regs[2]) { diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index ca9a81007489..99a402231f4d 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -144,7 +144,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) if (!task) task = current; - if (raw_show_trace || !__kernel_text_address(pc)) { + if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) { show_raw_backtrace(sp); return; } @@ -1241,7 +1241,7 @@ static int enable_restore_fp_context(int msa) err = init_fpu(); if (msa && !err) { enable_msa(); - _init_msa_upper(); + init_msa_upper(); set_thread_flag(TIF_USEDMSA); set_thread_flag(TIF_MSA_CTX_LIVE); } @@ -1304,7 +1304,7 @@ static int enable_restore_fp_context(int msa) */ prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE); if (!prior_msa && was_fpu_owner) { - _init_msa_upper(); + init_msa_upper(); goto out; } @@ -1321,7 +1321,7 @@ static int enable_restore_fp_context(int msa) * of each vector register such that it cannot see data left * behind by another task. */ - _init_msa_upper(); + init_msa_upper(); } else { /* We need to restore the vector context. */ restore_msa(current); diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c index 2a03abb5bd2c..9b78e375118e 100644 --- a/arch/mips/kernel/watch.c +++ b/arch/mips/kernel/watch.c @@ -15,10 +15,9 @@ * Install the watch registers for the current thread. A maximum of * four registers are installed although the machine may have more. */ -void mips_install_watch_registers(void) +void mips_install_watch_registers(struct task_struct *t) { - struct mips3264_watch_reg_state *watches = - ¤t->thread.watch.mips3264; + struct mips3264_watch_reg_state *watches = &t->thread.watch.mips3264; switch (current_cpu_data.watch_reg_use_cnt) { default: BUG(); diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 41b1b090f56f..dc10c77b7500 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -302,12 +302,31 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) */ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) { - ktime_t expires; + struct mips_coproc *cop0 = vcpu->arch.cop0; + ktime_t expires, threshold; + uint32_t count, compare; int running; - /* Is the hrtimer pending? */ + /* Calculate the biased and scaled guest CP0_Count */ + count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); + compare = kvm_read_c0_guest_compare(cop0); + + /* + * Find whether CP0_Count has reached the closest timer interrupt. If + * not, we shouldn't inject it. + */ + if ((int32_t)(count - compare) < 0) + return count; + + /* + * The CP0_Count we're going to return has already reached the closest + * timer interrupt. Quickly check if it really is a new interrupt by + * looking at whether the interval until the hrtimer expiry time is + * less than 1/4 of the timer period. + */ expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); - if (ktime_compare(now, expires) >= 0) { + threshold = ktime_add_ns(now, vcpu->arch.count_period / 4); + if (ktime_before(expires, threshold)) { /* * Cancel it while we handle it so there's no chance of * interference with the timeout handler. @@ -329,8 +348,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) } } - /* Return the biased and scaled guest CP0_Count */ - return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); + return count; } /** @@ -420,32 +438,6 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, } /** - * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer. - * @vcpu: Virtual CPU. - * - * Recalculates and updates the expiry time of the hrtimer. This can be used - * after timer parameters have been altered which do not depend on the time that - * the change occurs (in those cases kvm_mips_freeze_hrtimer() and - * kvm_mips_resume_hrtimer() are used directly). - * - * It is guaranteed that no timer interrupts will be lost in the process. - * - * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). - */ -static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu) -{ - ktime_t now; - uint32_t count; - - /* - * freeze_hrtimer takes care of a timer interrupts <= count, and - * resume_hrtimer the hrtimer takes care of a timer interrupts > count. - */ - now = kvm_mips_freeze_hrtimer(vcpu, &count); - kvm_mips_resume_hrtimer(vcpu, now, count); -} - -/** * kvm_mips_write_count() - Modify the count and update timer. * @vcpu: Virtual CPU. * @count: Guest CP0_Count value to set. @@ -540,23 +532,42 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) * kvm_mips_write_compare() - Modify compare and update timer. * @vcpu: Virtual CPU. * @compare: New CP0_Compare value. + * @ack: Whether to acknowledge timer interrupt. * * Update CP0_Compare to a new value and update the timeout. + * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure + * any pending timer interrupt is preserved. */ -void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare) +void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack) { struct mips_coproc *cop0 = vcpu->arch.cop0; + int dc; + u32 old_compare = kvm_read_c0_guest_compare(cop0); + ktime_t now; + uint32_t count; /* if unchanged, must just be an ack */ - if (kvm_read_c0_guest_compare(cop0) == compare) + if (old_compare == compare) { + if (!ack) + return; + kvm_mips_callbacks->dequeue_timer_int(vcpu); + kvm_write_c0_guest_compare(cop0, compare); return; + } + + /* freeze_hrtimer() takes care of timer interrupts <= count */ + dc = kvm_mips_count_disabled(vcpu); + if (!dc) + now = kvm_mips_freeze_hrtimer(vcpu, &count); + + if (ack) + kvm_mips_callbacks->dequeue_timer_int(vcpu); - /* Update compare */ kvm_write_c0_guest_compare(cop0, compare); - /* Update timeout if count enabled */ - if (!kvm_mips_count_disabled(vcpu)) - kvm_mips_update_hrtimer(vcpu); + /* resume_hrtimer() takes care of timer interrupts > count */ + if (!dc) + kvm_mips_resume_hrtimer(vcpu, now, count); } /** @@ -1095,9 +1106,9 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, /* If we are writing to COMPARE */ /* Clear pending timer interrupt, if any */ - kvm_mips_callbacks->dequeue_timer_int(vcpu); kvm_mips_write_compare(vcpu, - vcpu->arch.gprs[rt]); + vcpu->arch.gprs[rt], + true); } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { unsigned int old_val, val, change; diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h index 4ab4bdfad703..2143884709e4 100644 --- a/arch/mips/kvm/interrupt.h +++ b/arch/mips/kvm/interrupt.h @@ -28,6 +28,7 @@ #define MIPS_EXC_MAX 12 /* XXXSL More to follow */ +extern char __kvm_mips_vcpu_run_end[]; extern char mips32_exception[], mips32_exceptionEnd[]; extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S index 7e2210846b8b..77706433651b 100644 --- a/arch/mips/kvm/locore.S +++ b/arch/mips/kvm/locore.S @@ -227,6 +227,7 @@ FEXPORT(__kvm_mips_load_k0k1) /* Jump to guest */ eret +EXPORT(__kvm_mips_vcpu_run_end) VECTOR(MIPSX(exception), unknown) /* Find out what mode we came from and jump to the proper handler. */ diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 2683d04fdda5..e86b7499921a 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -314,6 +314,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) memcpy(gebase + offset, mips32_GuestException, mips32_GuestExceptionEnd - mips32_GuestException); +#ifdef MODULE + offset += mips32_GuestExceptionEnd - mips32_GuestException; + memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run, + __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run); + vcpu->arch.vcpu_run = gebase + offset; +#else + vcpu->arch.vcpu_run = __kvm_mips_vcpu_run; +#endif + /* Invalidate the icache for these ranges */ local_flush_icache_range((unsigned long)gebase, (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); @@ -403,7 +412,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) /* Disable hardware page table walking while in guest */ htw_stop(); - r = __kvm_mips_vcpu_run(run, vcpu); + r = vcpu->arch.vcpu_run(run, vcpu); /* Re-enable HTW before enabling interrupts */ htw_start(); diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index d836ed5b0bc7..307cc4c98bdd 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -547,7 +547,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, kvm_mips_write_count(vcpu, v); break; case KVM_REG_MIPS_CP0_COMPARE: - kvm_mips_write_compare(vcpu, v); + kvm_mips_write_compare(vcpu, v, false); break; case KVM_REG_MIPS_CP0_CAUSE: /* diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c index beb80f316095..927dc94a030f 100644 --- a/arch/mips/lib/ashldi3.c +++ b/arch/mips/lib/ashldi3.c @@ -2,7 +2,7 @@ #include "libgcc.h" -long long __ashldi3(long long u, word_type b) +long long notrace __ashldi3(long long u, word_type b) { DWunion uu, w; word_type bm; diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c index c884a912b660..9fdf1a598428 100644 --- a/arch/mips/lib/ashrdi3.c +++ b/arch/mips/lib/ashrdi3.c @@ -2,7 +2,7 @@ #include "libgcc.h" -long long __ashrdi3(long long u, word_type b) +long long notrace __ashrdi3(long long u, word_type b) { DWunion uu, w; word_type bm; diff --git a/arch/mips/lib/bswapdi.c b/arch/mips/lib/bswapdi.c index 77e5f9c1f005..e3e77aa52c95 100644 --- a/arch/mips/lib/bswapdi.c +++ b/arch/mips/lib/bswapdi.c @@ -1,6 +1,6 @@ #include <linux/module.h> -unsigned long long __bswapdi2(unsigned long long u) +unsigned long long notrace __bswapdi2(unsigned long long u) { return (((u) & 0xff00000000000000ull) >> 56) | (((u) & 0x00ff000000000000ull) >> 40) | diff --git a/arch/mips/lib/bswapsi.c b/arch/mips/lib/bswapsi.c index 2b302ff121d2..530a8afe6fda 100644 --- a/arch/mips/lib/bswapsi.c +++ b/arch/mips/lib/bswapsi.c @@ -1,6 +1,6 @@ #include <linux/module.h> -unsigned int __bswapsi2(unsigned int u) +unsigned int notrace __bswapsi2(unsigned int u) { return (((u) & 0xff000000) >> 24) | (((u) & 0x00ff0000) >> 8) | diff --git a/arch/mips/lib/cmpdi2.c b/arch/mips/lib/cmpdi2.c index 8c1306437ed1..06857da96993 100644 --- a/arch/mips/lib/cmpdi2.c +++ b/arch/mips/lib/cmpdi2.c @@ -2,7 +2,7 @@ #include "libgcc.h" -word_type __cmpdi2(long long a, long long b) +word_type notrace __cmpdi2(long long a, long long b) { const DWunion au = { .ll = a diff --git a/arch/mips/lib/lshrdi3.c b/arch/mips/lib/lshrdi3.c index dcf8d6810b7c..364547449c65 100644 --- a/arch/mips/lib/lshrdi3.c +++ b/arch/mips/lib/lshrdi3.c @@ -2,7 +2,7 @@ #include "libgcc.h" -long long __lshrdi3(long long u, word_type b) +long long notrace __lshrdi3(long long u, word_type b) { DWunion uu, w; word_type bm; diff --git a/arch/mips/lib/ucmpdi2.c b/arch/mips/lib/ucmpdi2.c index bb4cb2f828ea..bd599f58234c 100644 --- a/arch/mips/lib/ucmpdi2.c +++ b/arch/mips/lib/ucmpdi2.c @@ -2,7 +2,7 @@ #include "libgcc.h" -word_type __ucmpdi2(unsigned long long a, unsigned long long b) +word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b) { const DWunion au = {.ll = a}; const DWunion bu = {.ll = b}; diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c index 6f9e010cec4d..282c5a8c2fcd 100644 --- a/arch/mips/loongson64/loongson-3/numa.c +++ b/arch/mips/loongson64/loongson-3/numa.c @@ -213,10 +213,10 @@ static void __init node_mem_init(unsigned int node) BOOTMEM_DEFAULT); if (node == 0 && node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT)) { - /* Reserve 0xff800000~0xffffffff for RS780E integrated GPU */ + /* Reserve 0xfe000000~0xffffffff for RS780E integrated GPU */ reserve_bootmem_node(NODE_DATA(node), - (node_addrspace_offset | 0xff800000), - 8 << 20, BOOTMEM_DEFAULT); + (node_addrspace_offset | 0xfe000000), + 32 << 20, BOOTMEM_DEFAULT); } sparse_memory_present_with_active_regions(node); diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 32f0e19a0d7f..734a2c7665ec 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -445,9 +445,11 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, case spec_op: switch (insn.r_format.func) { case jalr_op: - regs->regs[insn.r_format.rd] = - regs->cp0_epc + dec_insn.pc_inc + - dec_insn.next_pc_inc; + if (insn.r_format.rd != 0) { + regs->regs[insn.r_format.rd] = + regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + } /* Fall through */ case jr_op: /* For R6, JR already emulated in jalr_op */ diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index aab218c36e0d..e87bccd6e0aa 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -16,6 +16,7 @@ #include <linux/mm.h> #include <asm/cacheflush.h> +#include <asm/highmem.h> #include <asm/processor.h> #include <asm/cpu.h> #include <asm/cpu-features.h> @@ -83,8 +84,6 @@ void __flush_dcache_page(struct page *page) struct address_space *mapping = page_mapping(page); unsigned long addr; - if (PageHighMem(page)) - return; if (mapping && !mapping_mapped(mapping)) { SetPageDcacheDirty(page); return; @@ -95,8 +94,15 @@ void __flush_dcache_page(struct page *page) * case is for exec env/arg pages and those are %99 certainly going to * get faulted into the tlb (and thus flushed) anyways. */ - addr = (unsigned long) page_address(page); + if (PageHighMem(page)) + addr = (unsigned long)kmap_atomic(page); + else + addr = (unsigned long)page_address(page); + flush_data_cache_page(addr); + + if (PageHighMem(page)) + __kunmap_atomic((void *)addr); } EXPORT_SYMBOL(__flush_dcache_page); @@ -119,33 +125,28 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) EXPORT_SYMBOL(__flush_anon_page); -void __flush_icache_page(struct vm_area_struct *vma, struct page *page) -{ - unsigned long addr; - - if (PageHighMem(page)) - return; - - addr = (unsigned long) page_address(page); - flush_data_cache_page(addr); -} -EXPORT_SYMBOL_GPL(__flush_icache_page); - -void __update_cache(struct vm_area_struct *vma, unsigned long address, - pte_t pte) +void __update_cache(unsigned long address, pte_t pte) { struct page *page; unsigned long pfn, addr; - int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc; + int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc; pfn = pte_pfn(pte); if (unlikely(!pfn_valid(pfn))) return; page = pfn_to_page(pfn); - if (page_mapping(page) && Page_dcache_dirty(page)) { - addr = (unsigned long) page_address(page); + if (Page_dcache_dirty(page)) { + if (PageHighMem(page)) + addr = (unsigned long)kmap_atomic(page); + else + addr = (unsigned long)page_address(page); + if (exec || pages_do_alias(addr, address & PAGE_MASK)) flush_data_cache_page(addr); + + if (PageHighMem(page)) + __kunmap_atomic((void *)addr); + ClearPageDcacheDirty(page); } } diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 14568900fc1d..090393aa0f20 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -5,10 +5,12 @@ obj-vdso-y := elf.o gettimeofday.o sigreturn.o ccflags-vdso := \ $(filter -I%,$(KBUILD_CFLAGS)) \ $(filter -E%,$(KBUILD_CFLAGS)) \ + $(filter -mmicromips,$(KBUILD_CFLAGS)) \ $(filter -march=%,$(KBUILD_CFLAGS)) cflags-vdso := $(ccflags-vdso) \ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ - -O2 -g -fPIC -fno-common -fno-builtin -G 0 -DDISABLE_BRANCH_PROFILING \ + -O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \ + -DDISABLE_BRANCH_PROFILING \ $(call cc-option, -fno-stack-protector) aflags-vdso := $(ccflags-vdso) \ $(filter -I%,$(KBUILD_CFLAGS)) \ diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index d7c0acb35ec2..8d49614d600d 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -666,7 +666,7 @@ void handle_unaligned(struct pt_regs *regs) break; } - if (modify && R1(regs->iir)) + if (ret == 0 && modify && R1(regs->iir)) regs->gr[R1(regs->iir)] = newbase; @@ -677,6 +677,14 @@ void handle_unaligned(struct pt_regs *regs) if (ret) { + /* + * The unaligned handler failed. + * If we were called by __get_user() or __put_user() jump + * to it's exception fixup handler instead of crashing. + */ + if (!user_mode(regs) && fixup_exception(regs)) + return; + printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret); die_if_kernel("Unaligned data reference", regs, 28); diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 2220f7a60def..070fa8552051 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -707,7 +707,7 @@ #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */ #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ #define SPRN_MMCR1 798 -#define SPRN_MMCR2 769 +#define SPRN_MMCR2 785 #define SPRN_MMCRA 0x312 #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL @@ -744,13 +744,13 @@ #define SPRN_PMC6 792 #define SPRN_PMC7 793 #define SPRN_PMC8 794 -#define SPRN_SIAR 780 -#define SPRN_SDAR 781 #define SPRN_SIER 784 #define SIER_SIPR 0x2000000 /* Sampled MSR_PR */ #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ +#define SPRN_SIAR 796 +#define SPRN_SDAR 797 #define SPRN_TACR 888 #define SPRN_TCSCR 889 #define SPRN_CSIGR 890 diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 40e4d4a27663..b34e8a54f7db 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -1072,7 +1072,7 @@ void eeh_add_device_early(struct pci_dn *pdn) struct pci_controller *phb; struct eeh_dev *edev = pdn_to_eeh_dev(pdn); - if (!edev || !eeh_enabled()) + if (!edev) return; if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE)) diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 52c1e273f8cd..247a0dc012f1 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -166,6 +166,16 @@ static void *eeh_dev_save_state(void *data, void *userdata) if (!edev) return NULL; + /* + * We cannot access the config space on some adapters. + * Otherwise, it will cause fenced PHB. We don't save + * the content in their config space and will restore + * from the initial config space saved when the EEH + * device is created. + */ + if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) + return NULL; + pdev = eeh_dev_to_pci_dev(edev); if (!pdev) return NULL; @@ -305,6 +315,19 @@ static void *eeh_dev_restore_state(void *data, void *userdata) if (!edev) return NULL; + /* + * The content in the config space isn't saved because + * the blocked config space on some adapters. We have + * to restore the initial saved config space when the + * EEH device is created. + */ + if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) { + if (list_is_last(&edev->list, &edev->pe->edevs)) + eeh_pe_restore_bars(edev->pe); + + return NULL; + } + pdev = eeh_dev_to_pci_dev(edev); if (!pdev) return NULL; @@ -504,9 +527,6 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe) /* Save states */ eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL); - /* Report error */ - eeh_pe_dev_traverse(pe, eeh_report_error, &result); - /* Issue reset */ ret = eeh_reset_pe(pe); if (ret) { diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 0a0399c2af11..b81ccc5fb32d 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -962,11 +962,6 @@ hv_facility_unavailable_relon_trampoline: #endif STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist) - /* Other future vectors */ - .align 7 - .globl __end_interrupts -__end_interrupts: - .align 7 system_call_entry: b system_call_common @@ -1253,6 +1248,17 @@ __end_handlers: STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable) + /* + * The __end_interrupts marker must be past the out-of-line (OOL) + * handlers, so that they are copied to real address 0x100 when running + * a relocatable kernel. This ensures they can be reached from the short + * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch + * directly, without using LOAD_HANDLER(). + */ + .align 7 + .globl __end_interrupts +__end_interrupts: + #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) /* * Data area reserved for FWNMI option. diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index a7b91b54c813..b7abf3cd2a67 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1239,6 +1239,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) current->thread.regs = regs - 1; } +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* + * Clear any transactional state, we're exec()ing. The cause is + * not important as there will never be a recheckpoint so it's not + * user visible. + */ + if (MSR_TM_SUSPENDED(mfmsr())) + tm_reclaim_current(0); +#endif + memset(regs->gpr, 0, sizeof(regs->gpr)); regs->ctr = 0; regs->link = 0; diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 92dea8df6b26..b7e86e00048f 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -655,6 +655,7 @@ unsigned char ibm_architecture_vec[] = { W(0xffff0000), W(0x003e0000), /* POWER6 */ W(0xffff0000), W(0x003f0000), /* POWER7 */ W(0xffff0000), W(0x004b0000), /* POWER8E */ + W(0xffff0000), W(0x004c0000), /* POWER8NVL */ W(0xffff0000), W(0x004d0000), /* POWER8 */ W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ @@ -717,7 +718,7 @@ unsigned char ibm_architecture_vec[] = { * must match by the macro below. Update the definition if * the structure layout changes. */ -#define IBM_ARCH_VEC_NRCORES_OFFSET 125 +#define IBM_ARCH_VEC_NRCORES_OFFSET 133 W(NR_CPUS), /* number of cores supported */ 0, 0, diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index ac3ffd97e059..405baaf96864 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -615,29 +615,50 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe) { int config_addr; int ret; + /* Waiting 0.2s maximum before skipping configuration */ + int max_wait = 200; /* Figure out the PE address */ config_addr = pe->config_addr; if (pe->addr) config_addr = pe->addr; - /* Use new configure-pe function, if supported */ - if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { - ret = rtas_call(ibm_configure_pe, 3, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid)); - } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { - ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid)); - } else { - return -EFAULT; - } + while (max_wait > 0) { + /* Use new configure-pe function, if supported */ + if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { + ret = rtas_call(ibm_configure_pe, 3, 1, NULL, + config_addr, BUID_HI(pe->phb->buid), + BUID_LO(pe->phb->buid)); + } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { + ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, + config_addr, BUID_HI(pe->phb->buid), + BUID_LO(pe->phb->buid)); + } else { + return -EFAULT; + } - if (ret) - pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", - __func__, pe->phb->global_number, pe->addr, ret); + if (!ret) + return ret; + + /* + * If RTAS returns a delay value that's above 100ms, cut it + * down to 100ms in case firmware made a mistake. For more + * on how these delay values work see rtas_busy_delay_time + */ + if (ret > RTAS_EXTENDED_DELAY_MIN+2 && + ret <= RTAS_EXTENDED_DELAY_MAX) + ret = RTAS_EXTENDED_DELAY_MIN+2; + + max_wait -= rtas_busy_delay_time(ret); + + if (max_wait < 0) + break; + + rtas_busy_delay(ret); + } + pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", + __func__, pe->phb->global_number, pe->addr, ret); return ret; } diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index bd98ce2be17b..3e8865b187de 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -912,7 +912,8 @@ machine_arch_initcall(pseries, find_existing_ddw_windows); static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, struct ddw_query_response *query) { - struct eeh_dev *edev; + struct device_node *dn; + struct pci_dn *pdn; u32 cfg_addr; u64 buid; int ret; @@ -923,11 +924,10 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, * Retrieve them from the pci device, not the node with the * dma-window property */ - edev = pci_dev_to_eeh_dev(dev); - cfg_addr = edev->config_addr; - if (edev->pe_config_addr) - cfg_addr = edev->pe_config_addr; - buid = edev->phb->buid; + dn = pci_device_to_OF_node(dev); + pdn = PCI_DN(dn); + buid = pdn->phb->buid; + cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8)); ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, cfg_addr, BUID_HI(buid), BUID_LO(buid)); @@ -941,7 +941,8 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, struct ddw_create_response *create, int page_shift, int window_shift) { - struct eeh_dev *edev; + struct device_node *dn; + struct pci_dn *pdn; u32 cfg_addr; u64 buid; int ret; @@ -952,11 +953,10 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, * Retrieve them from the pci device, not the node with the * dma-window property */ - edev = pci_dev_to_eeh_dev(dev); - cfg_addr = edev->config_addr; - if (edev->pe_config_addr) - cfg_addr = edev->pe_config_addr; - buid = edev->phb->buid; + dn = pci_device_to_OF_node(dev); + pdn = PCI_DN(dn); + buid = pdn->phb->buid; + cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8)); do { /* extra outputs are LIOBN and dma-addr (hi, lo) */ diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h index 5e04f3cbd320..8ae236b0f80b 100644 --- a/arch/s390/include/asm/fpu/api.h +++ b/arch/s390/include/asm/fpu/api.h @@ -22,7 +22,7 @@ static inline int test_fp_ctl(u32 fpc) " la %0,0\n" "1:\n" EX_TABLE(0b,1b) - : "=d" (rc), "=d" (orig_fpc) + : "=d" (rc), "=&d" (orig_fpc) : "d" (fpc), "0" (-EINVAL)); return rc; } diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h index f010c93a88b1..fda605dbc1b4 100644 --- a/arch/s390/net/bpf_jit.h +++ b/arch/s390/net/bpf_jit.h @@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; * | | | * +---------------+ | * | 8 byte skbp | | - * R15+170 -> +---------------+ | + * R15+176 -> +---------------+ | * | 8 byte hlen | | * R15+168 -> +---------------+ | * | 4 byte align | | @@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; #define STK_OFF (STK_SPACE - STK_160_UNUSED) #define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ #define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ -#define STK_OFF_SKBP 170 /* Offset of SKB pointer on stack */ +#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */ #define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */ #define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */ diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 9a0c4c22e536..0e2919dd8df3 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -45,7 +45,7 @@ struct bpf_jit { int labels[1]; /* Labels for local jumps */ }; -#define BPF_SIZE_MAX 0x7ffff /* Max size for program (20 bit signed displ) */ +#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */ #define SEEN_SKB 1 /* skb access */ #define SEEN_MEM 2 /* use mem[] for temporary storage */ @@ -446,7 +446,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit, bool is_classic) emit_load_skb_data_hlen(jit); if (jit->seen & SEEN_SKB_CHANGE) /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, + EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15, STK_OFF_SKBP); /* Clear A (%b0) and X (%b7) registers for converted BPF programs */ if (is_classic) { diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h index 10e9dabc4c41..f0700cfeedd7 100644 --- a/arch/sparc/include/asm/head_64.h +++ b/arch/sparc/include/asm/head_64.h @@ -15,6 +15,10 @@ #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) +#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) +#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) +#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) + #define __CHEETAH_ID 0x003e0014 #define __JALAPENO_ID 0x003e0016 #define __SERRANO_ID 0x003e0022 diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 131d36fcd07a..408b715c95a5 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -375,7 +375,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) #define pgprot_noncached pgprot_noncached #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) -static inline pte_t pte_mkhuge(pte_t pte) +static inline unsigned long __pte_huge_mask(void) { unsigned long mask; @@ -390,8 +390,19 @@ static inline pte_t pte_mkhuge(pte_t pte) : "=r" (mask) : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V)); - return __pte(pte_val(pte) | mask); + return mask; +} + +static inline pte_t pte_mkhuge(pte_t pte) +{ + return __pte(pte_val(pte) | __pte_huge_mask()); +} + +static inline bool is_hugetlb_pte(pte_t pte) +{ + return !!(pte_val(pte) & __pte_huge_mask()); } + #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline pmd_t pmd_mkhuge(pmd_t pmd) { @@ -403,6 +414,11 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) return __pmd(pte_val(pte)); } #endif +#else +static inline bool is_hugetlb_pte(pte_t pte) +{ + return false; +} #endif static inline pte_t pte_mkdirty(pte_t pte) @@ -865,6 +881,19 @@ static inline unsigned long pud_pfn(pud_t pud) void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig, int fullmm); +static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, + pte_t *ptep, pte_t orig, int fullmm) +{ + /* It is more efficient to let flush_tlb_kernel_range() + * handle init_mm tlb flushes. + * + * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U + * and SUN4V pte layout, so this inline test is fine. + */ + if (likely(mm != &init_mm) && pte_accessible(mm, orig)) + tlb_batch_add(mm, vaddr, ptep, orig, fullmm); +} + #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, @@ -881,15 +910,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t orig = *ptep; *ptep = pte; - - /* It is more efficient to let flush_tlb_kernel_range() - * handle init_mm tlb flushes. - * - * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U - * and SUN4V pte layout, so this inline test is fine. - */ - if (likely(mm != &init_mm) && pte_accessible(mm, orig)) - tlb_batch_add(mm, addr, ptep, orig, fullmm); + maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm); } #define set_pte_at(mm,addr,ptep,pte) \ diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index dea1cfa2122b..a8e192e90700 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h @@ -8,6 +8,7 @@ #define TLB_BATCH_NR 192 struct tlb_batch { + bool huge; struct mm_struct *mm; unsigned long tlb_nr; unsigned long active; @@ -16,7 +17,7 @@ struct tlb_batch { void flush_tsb_kernel_range(unsigned long start, unsigned long end); void flush_tsb_user(struct tlb_batch *tb); -void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr); +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge); /* TLB flush operations. */ diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h index 71b5a67522ab..781b9f1dbdc2 100644 --- a/arch/sparc/include/asm/ttable.h +++ b/arch/sparc/include/asm/ttable.h @@ -589,8 +589,8 @@ user_rtt_fill_64bit: \ restored; \ nop; nop; nop; nop; nop; nop; \ nop; nop; nop; nop; nop; \ - ba,a,pt %xcc, user_rtt_fill_fixup; \ - ba,a,pt %xcc, user_rtt_fill_fixup; \ + ba,a,pt %xcc, user_rtt_fill_fixup_dax; \ + ba,a,pt %xcc, user_rtt_fill_fixup_mna; \ ba,a,pt %xcc, user_rtt_fill_fixup; @@ -652,8 +652,8 @@ user_rtt_fill_32bit: \ restored; \ nop; nop; nop; nop; nop; \ nop; nop; nop; \ - ba,a,pt %xcc, user_rtt_fill_fixup; \ - ba,a,pt %xcc, user_rtt_fill_fixup; \ + ba,a,pt %xcc, user_rtt_fill_fixup_dax; \ + ba,a,pt %xcc, user_rtt_fill_fixup_mna; \ ba,a,pt %xcc, user_rtt_fill_fixup; diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 7cf9c6ea3f1f..fdb13327fded 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg CFLAGS_REMOVE_pcr.o := -pg endif +obj-$(CONFIG_SPARC64) += urtt_fill.o obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o obj-$(CONFIG_SPARC32) += etrap_32.o obj-$(CONFIG_SPARC32) += rtrap_32.o diff --git a/arch/sparc/kernel/cherrs.S b/arch/sparc/kernel/cherrs.S index 4ee1ad420862..655628def68e 100644 --- a/arch/sparc/kernel/cherrs.S +++ b/arch/sparc/kernel/cherrs.S @@ -214,8 +214,7 @@ do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ subcc %g1, %g2, %g1 ! Next cacheline bge,pt %icc, 1b nop - ba,pt %xcc, dcpe_icpe_tl1_common - nop + ba,a,pt %xcc, dcpe_icpe_tl1_common do_dcpe_tl1_fatal: sethi %hi(1f), %g7 @@ -224,8 +223,7 @@ do_dcpe_tl1_fatal: mov 0x2, %o0 call cheetah_plus_parity_error add %sp, PTREGS_OFF, %o1 - ba,pt %xcc, rtrap - nop + ba,a,pt %xcc, rtrap .size do_dcpe_tl1,.-do_dcpe_tl1 .globl do_icpe_tl1 @@ -259,8 +257,7 @@ do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ subcc %g1, %g2, %g1 bge,pt %icc, 1b nop - ba,pt %xcc, dcpe_icpe_tl1_common - nop + ba,a,pt %xcc, dcpe_icpe_tl1_common do_icpe_tl1_fatal: sethi %hi(1f), %g7 @@ -269,8 +266,7 @@ do_icpe_tl1_fatal: mov 0x3, %o0 call cheetah_plus_parity_error add %sp, PTREGS_OFF, %o1 - ba,pt %xcc, rtrap - nop + ba,a,pt %xcc, rtrap .size do_icpe_tl1,.-do_icpe_tl1 .type dcpe_icpe_tl1_common,#function @@ -456,7 +452,7 @@ __cheetah_log_error: cmp %g2, 0x63 be c_cee nop - ba,pt %xcc, c_deferred + ba,a,pt %xcc, c_deferred .size __cheetah_log_error,.-__cheetah_log_error /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index 33c02b15f478..a83707c83be8 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -948,7 +948,24 @@ linux_syscall_trace: cmp %o0, 0 bne 3f mov -ENOSYS, %o0 + + /* Syscall tracing can modify the registers. */ + ld [%sp + STACKFRAME_SZ + PT_G1], %g1 + sethi %hi(sys_call_table), %l7 + ld [%sp + STACKFRAME_SZ + PT_I0], %i0 + or %l7, %lo(sys_call_table), %l7 + ld [%sp + STACKFRAME_SZ + PT_I1], %i1 + ld [%sp + STACKFRAME_SZ + PT_I2], %i2 + ld [%sp + STACKFRAME_SZ + PT_I3], %i3 + ld [%sp + STACKFRAME_SZ + PT_I4], %i4 + ld [%sp + STACKFRAME_SZ + PT_I5], %i5 + cmp %g1, NR_syscalls + bgeu 3f + mov -ENOSYS, %o0 + + sll %g1, 2, %l4 mov %i0, %o0 + ld [%l7 + %l4], %l7 mov %i1, %o1 mov %i2, %o2 mov %i3, %o3 diff --git a/arch/sparc/kernel/fpu_traps.S b/arch/sparc/kernel/fpu_traps.S index a6864826a4bd..336d2750fe78 100644 --- a/arch/sparc/kernel/fpu_traps.S +++ b/arch/sparc/kernel/fpu_traps.S @@ -100,8 +100,8 @@ do_fpdis: fmuld %f0, %f2, %f26 faddd %f0, %f2, %f28 fmuld %f0, %f2, %f30 - b,pt %xcc, fpdis_exit - nop + ba,a,pt %xcc, fpdis_exit + 2: andcc %g5, FPRS_DU, %g0 bne,pt %icc, 3f fzero %f32 @@ -144,8 +144,8 @@ do_fpdis: fmuld %f32, %f34, %f58 faddd %f32, %f34, %f60 fmuld %f32, %f34, %f62 - ba,pt %xcc, fpdis_exit - nop + ba,a,pt %xcc, fpdis_exit + 3: mov SECONDARY_CONTEXT, %g3 add %g6, TI_FPREGS, %g1 @@ -197,8 +197,7 @@ fpdis_exit2: fp_other_bounce: call do_fpother add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop + ba,a,pt %xcc, rtrap .size fp_other_bounce,.-fp_other_bounce .align 32 diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index f2d30cab5b3f..51faf92ace00 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -461,9 +461,8 @@ sun4v_chip_type: subcc %g3, 1, %g3 bne,pt %xcc, 41b add %g1, 1, %g1 - mov SUN4V_CHIP_SPARC64X, %g4 ba,pt %xcc, 5f - nop + mov SUN4V_CHIP_SPARC64X, %g4 49: mov SUN4V_CHIP_UNKNOWN, %g4 @@ -548,8 +547,7 @@ sun4u_init: stxa %g0, [%g7] ASI_DMMU membar #Sync - ba,pt %xcc, sun4u_continue - nop + ba,a,pt %xcc, sun4u_continue sun4v_init: /* Set ctx 0 */ @@ -560,14 +558,12 @@ sun4v_init: mov SECONDARY_CONTEXT, %g7 stxa %g0, [%g7] ASI_MMU membar #Sync - ba,pt %xcc, niagara_tlb_fixup - nop + ba,a,pt %xcc, niagara_tlb_fixup sun4u_continue: BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup) - ba,pt %xcc, spitfire_tlb_fixup - nop + ba,a,pt %xcc, spitfire_tlb_fixup niagara_tlb_fixup: mov 3, %g2 /* Set TLB type to hypervisor. */ @@ -639,8 +635,7 @@ niagara_patch: call hypervisor_patch_cachetlbops nop - ba,pt %xcc, tlb_fixup_done - nop + ba,a,pt %xcc, tlb_fixup_done cheetah_tlb_fixup: mov 2, %g2 /* Set TLB type to cheetah+. */ @@ -659,8 +654,7 @@ cheetah_tlb_fixup: call cheetah_patch_cachetlbops nop - ba,pt %xcc, tlb_fixup_done - nop + ba,a,pt %xcc, tlb_fixup_done spitfire_tlb_fixup: /* Set TLB type to spitfire. */ @@ -782,8 +776,7 @@ setup_trap_table: call %o1 add %sp, (2047 + 128), %o0 - ba,pt %xcc, 2f - nop + ba,a,pt %xcc, 2f 1: sethi %hi(sparc64_ttable_tl0), %o0 set prom_set_trap_table_name, %g2 @@ -822,8 +815,7 @@ setup_trap_table: BRANCH_IF_ANY_CHEETAH(o2, o3, 1f) - ba,pt %xcc, 2f - nop + ba,a,pt %xcc, 2f /* Disable STICK_INT interrupts. */ 1: diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S index 753b4f031bfb..34b4933900bf 100644 --- a/arch/sparc/kernel/misctrap.S +++ b/arch/sparc/kernel/misctrap.S @@ -18,8 +18,7 @@ __do_privact: 109: or %g7, %lo(109b), %g7 call do_privact add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop + ba,a,pt %xcc, rtrap .size __do_privact,.-__do_privact .type do_mna,#function @@ -46,8 +45,7 @@ do_mna: mov %l5, %o2 call mem_address_unaligned add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop + ba,a,pt %xcc, rtrap .size do_mna,.-do_mna .type do_lddfmna,#function @@ -65,8 +63,7 @@ do_lddfmna: mov %l5, %o2 call handle_lddfmna add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop + ba,a,pt %xcc, rtrap .size do_lddfmna,.-do_lddfmna .type do_stdfmna,#function @@ -84,8 +81,7 @@ do_stdfmna: mov %l5, %o2 call handle_stdfmna add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop + ba,a,pt %xcc, rtrap .size do_stdfmna,.-do_stdfmna .type breakpoint_trap,#function diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index badf0951d73c..9f9614df9e1e 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -994,6 +994,23 @@ void pcibios_set_master(struct pci_dev *dev) /* No special bus mastering setup handling */ } +#ifdef CONFIG_PCI_IOV +int pcibios_add_device(struct pci_dev *dev) +{ + struct pci_dev *pdev; + + /* Add sriov arch specific initialization here. + * Copy dev_archdata from PF to VF + */ + if (dev->is_virtfn) { + pdev = dev->physfn; + memcpy(&dev->dev.archdata, &pdev->dev.archdata, + sizeof(struct dev_archdata)); + } + return 0; +} +#endif /* CONFIG_PCI_IOV */ + static int __init pcibios_init(void) { pci_dfl_cache_line_size = 64 >> 2; diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index d08bdaffdbfc..216948ca4382 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -14,10 +14,6 @@ #include <asm/visasm.h> #include <asm/processor.h> -#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) -#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) -#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) - #ifdef CONFIG_CONTEXT_TRACKING # define SCHEDULE_USER schedule_user #else @@ -242,52 +238,17 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 wrpr %g1, %cwp ba,a,pt %xcc, user_rtt_fill_64bit -user_rtt_fill_fixup: - rdpr %cwp, %g1 - add %g1, 1, %g1 - wrpr %g1, 0x0, %cwp - - rdpr %wstate, %g2 - sll %g2, 3, %g2 - wrpr %g2, 0x0, %wstate - - /* We know %canrestore and %otherwin are both zero. */ - - sethi %hi(sparc64_kern_pri_context), %g2 - ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 - mov PRIMARY_CONTEXT, %g1 - -661: stxa %g2, [%g1] ASI_DMMU - .section .sun4v_1insn_patch, "ax" - .word 661b - stxa %g2, [%g1] ASI_MMU - .previous - - sethi %hi(KERNBASE), %g1 - flush %g1 +user_rtt_fill_fixup_dax: + ba,pt %xcc, user_rtt_fill_fixup_common + mov 1, %g3 - or %g4, FAULT_CODE_WINFIXUP, %g4 - stb %g4, [%g6 + TI_FAULT_CODE] - stx %g5, [%g6 + TI_FAULT_ADDR] +user_rtt_fill_fixup_mna: + ba,pt %xcc, user_rtt_fill_fixup_common + mov 2, %g3 - mov %g6, %l1 - wrpr %g0, 0x0, %tl - -661: nop - .section .sun4v_1insn_patch, "ax" - .word 661b - SET_GL(0) - .previous - - wrpr %g0, RTRAP_PSTATE, %pstate - - mov %l1, %g6 - ldx [%g6 + TI_TASK], %g4 - LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) - call do_sparc64_fault - add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop +user_rtt_fill_fixup: + ba,pt %xcc, user_rtt_fill_fixup_common + clr %g3 user_rtt_pre_restore: add %g1, 1, %g1 diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 4eed773a7735..77655f0f0fc7 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) return 0; } +/* Checks if the fp is valid. We always build signal frames which are + * 16-byte aligned, therefore we can always enforce that the restore + * frame has that property as well. + */ +static bool invalid_frame_pointer(void __user *fp, int fplen) +{ + if ((((unsigned long) fp) & 15) || + ((unsigned long)fp) > 0x100000000ULL - fplen) + return true; + return false; +} + void do_sigreturn32(struct pt_regs *regs) { struct signal_frame32 __user *sf; compat_uptr_t fpu_save; compat_uptr_t rwin_save; - unsigned int psr; + unsigned int psr, ufp; unsigned pc, npc; sigset_t set; compat_sigset_t seta; @@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs) sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ - if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || - (((unsigned long) sf) & 3)) + if (invalid_frame_pointer(sf, sizeof(*sf))) + goto segv; + + if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) + goto segv; + + if (ufp & 0x7) goto segv; - if (get_user(pc, &sf->info.si_regs.pc) || + if (__get_user(pc, &sf->info.si_regs.pc) || __get_user(npc, &sf->info.si_regs.npc)) goto segv; @@ -227,7 +244,7 @@ segv: asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) { struct rt_signal_frame32 __user *sf; - unsigned int psr, pc, npc; + unsigned int psr, pc, npc, ufp; compat_uptr_t fpu_save; compat_uptr_t rwin_save; sigset_t set; @@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ - if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || - (((unsigned long) sf) & 3)) + if (invalid_frame_pointer(sf, sizeof(*sf))) goto segv; - if (get_user(pc, &sf->regs.pc) || + if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) + goto segv; + + if (ufp & 0x7) + goto segv; + + if (__get_user(pc, &sf->regs.pc) || __get_user(npc, &sf->regs.npc)) goto segv; @@ -307,14 +329,6 @@ segv: force_sig(SIGSEGV, current); } -/* Checks if the fp is valid */ -static int invalid_frame_pointer(void __user *fp, int fplen) -{ - if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen) - return 1; - return 0; -} - static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) { unsigned long sp; diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 52aa5e4ce5e7..c3c12efe0bc0 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -60,10 +60,22 @@ struct rt_signal_frame { #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) +/* Checks if the fp is valid. We always build signal frames which are + * 16-byte aligned, therefore we can always enforce that the restore + * frame has that property as well. + */ +static inline bool invalid_frame_pointer(void __user *fp, int fplen) +{ + if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen)) + return true; + + return false; +} + asmlinkage void do_sigreturn(struct pt_regs *regs) { + unsigned long up_psr, pc, npc, ufp; struct signal_frame __user *sf; - unsigned long up_psr, pc, npc; sigset_t set; __siginfo_fpu_t __user *fpu_save; __siginfo_rwin_t __user *rwin_save; @@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ - if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) + if (!invalid_frame_pointer(sf, sizeof(*sf))) + goto segv_and_exit; + + if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) goto segv_and_exit; - if (((unsigned long) sf) & 3) + if (ufp & 0x7) goto segv_and_exit; err = __get_user(pc, &sf->info.si_regs.pc); @@ -127,7 +142,7 @@ segv_and_exit: asmlinkage void do_rt_sigreturn(struct pt_regs *regs) { struct rt_signal_frame __user *sf; - unsigned int psr, pc, npc; + unsigned int psr, pc, npc, ufp; __siginfo_fpu_t __user *fpu_save; __siginfo_rwin_t __user *rwin_save; sigset_t set; @@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) synchronize_user_stack(); sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; - if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || - (((unsigned long) sf) & 0x03)) + if (!invalid_frame_pointer(sf, sizeof(*sf))) + goto segv; + + if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) + goto segv; + + if (ufp & 0x7) goto segv; err = __get_user(pc, &sf->regs.pc); @@ -178,15 +198,6 @@ segv: force_sig(SIGSEGV, current); } -/* Checks if the fp is valid */ -static inline int invalid_frame_pointer(void __user *fp, int fplen) -{ - if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen)) - return 1; - - return 0; -} - static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) { unsigned long sp = regs->u_regs[UREG_FP]; diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index d88beff47bab..5ee930c48f4c 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -52,7 +52,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs) unsigned char fenab; int err; - flush_user_windows(); + synchronize_user_stack(); if (get_thread_wsaved() || (((unsigned long)ucp) & (sizeof(unsigned long)-1)) || (!__access_ok(ucp, sizeof(*ucp)))) @@ -234,6 +234,17 @@ do_sigsegv: goto out; } +/* Checks if the fp is valid. We always build rt signal frames which + * are 16-byte aligned, therefore we can always enforce that the + * restore frame has that property as well. + */ +static bool invalid_frame_pointer(void __user *fp) +{ + if (((unsigned long) fp) & 15) + return true; + return false; +} + struct rt_signal_frame { struct sparc_stackf ss; siginfo_t info; @@ -246,8 +257,8 @@ struct rt_signal_frame { void do_rt_sigreturn(struct pt_regs *regs) { + unsigned long tpc, tnpc, tstate, ufp; struct rt_signal_frame __user *sf; - unsigned long tpc, tnpc, tstate; __siginfo_fpu_t __user *fpu_save; __siginfo_rwin_t __user *rwin_save; sigset_t set; @@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs) (regs->u_regs [UREG_FP] + STACK_BIAS); /* 1. Make sure we are not getting garbage from the user */ - if (((unsigned long) sf) & 3) + if (invalid_frame_pointer(sf)) + goto segv; + + if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) goto segv; - err = get_user(tpc, &sf->regs.tpc); + if ((ufp + STACK_BIAS) & 0x7) + goto segv; + + err = __get_user(tpc, &sf->regs.tpc); err |= __get_user(tnpc, &sf->regs.tnpc); if (test_thread_flag(TIF_32BIT)) { tpc &= 0xffffffff; @@ -308,14 +325,6 @@ segv: force_sig(SIGSEGV, current); } -/* Checks if the fp is valid */ -static int invalid_frame_pointer(void __user *fp) -{ - if (((unsigned long) fp) & 15) - return 1; - return 0; -} - static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) { unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c index 0f6eebe71e6c..e5fe8cef9a69 100644 --- a/arch/sparc/kernel/sigutil_32.c +++ b/arch/sparc/kernel/sigutil_32.c @@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) { int err; + + if (((unsigned long) fpu) & 3) + return -EFAULT; + #ifdef CONFIG_SMP if (test_tsk_thread_flag(current, TIF_USEDFPU)) regs->psr &= ~PSR_EF; @@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp) struct thread_info *t = current_thread_info(); int i, wsaved, err; - __get_user(wsaved, &rp->wsaved); + if (((unsigned long) rp) & 3) + return -EFAULT; + + get_user(wsaved, &rp->wsaved); if (wsaved > NSWINS) return -EFAULT; diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c index 387834a9c56a..36aadcbeac69 100644 --- a/arch/sparc/kernel/sigutil_64.c +++ b/arch/sparc/kernel/sigutil_64.c @@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) unsigned long fprs; int err; - err = __get_user(fprs, &fpu->si_fprs); + if (((unsigned long) fpu) & 7) + return -EFAULT; + + err = get_user(fprs, &fpu->si_fprs); fprs_write(0); regs->tstate &= ~TSTATE_PEF; if (fprs & FPRS_DL) @@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp) struct thread_info *t = current_thread_info(); int i, wsaved, err; - __get_user(wsaved, &rp->wsaved); + if (((unsigned long) rp) & 7) + return -EFAULT; + + get_user(wsaved, &rp->wsaved); if (wsaved > NSWINS) return -EFAULT; diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S index c357e40ffd01..4a73009f66a5 100644 --- a/arch/sparc/kernel/spiterrs.S +++ b/arch/sparc/kernel/spiterrs.S @@ -85,8 +85,7 @@ __spitfire_cee_trap_continue: ba,pt %xcc, etraptl1 rd %pc, %g7 - ba,pt %xcc, 2f - nop + ba,a,pt %xcc, 2f 1: ba,pt %xcc, etrap_irq rd %pc, %g7 @@ -100,8 +99,7 @@ __spitfire_cee_trap_continue: mov %l5, %o2 call spitfire_access_error add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop + ba,a,pt %xcc, rtrap .size __spitfire_access_error,.-__spitfire_access_error /* This is the trap handler entry point for ECC correctable @@ -179,8 +177,7 @@ __spitfire_data_access_exception_tl1: mov %l5, %o2 call spitfire_data_access_exception_tl1 add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop + ba,a,pt %xcc, rtrap .size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1 .type __spitfire_data_access_exception,#function @@ -200,8 +197,7 @@ __spitfire_data_access_exception: mov %l5, %o2 call spitfire_data_access_exception add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop + ba,a,pt %xcc, rtrap .size __spitfire_data_access_exception,.-__spitfire_data_access_exception .type __spitfire_insn_access_exception_tl1,#function @@ -220,8 +216,7 @@ __spitfire_insn_access_exception_tl1: mov %l5, %o2 call spitfire_insn_access_exception_tl1 add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop + ba,a,pt %xcc, rtrap .size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1 .type __spitfire_insn_access_exception,#function @@ -240,6 +235,5 @@ __spitfire_insn_access_exception: mov %l5, %o2 call spitfire_insn_access_exception add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop + ba,a,pt %xcc, rtrap .size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index bb0008927598..c4a1b5c40e4e 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -158,7 +158,25 @@ linux_syscall_trace32: add %sp, PTREGS_OFF, %o0 brnz,pn %o0, 3f mov -ENOSYS, %o0 + + /* Syscall tracing can modify the registers. */ + ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 + sethi %hi(sys_call_table32), %l7 + ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 + or %l7, %lo(sys_call_table32), %l7 + ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 + ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2 + ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3 + ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4 + ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5 + + cmp %g1, NR_syscalls + bgeu,pn %xcc, 3f + mov -ENOSYS, %o0 + + sll %g1, 2, %l4 srl %i0, 0, %o0 + lduw [%l7 + %l4], %l7 srl %i4, 0, %o4 srl %i1, 0, %o1 srl %i2, 0, %o2 @@ -170,7 +188,25 @@ linux_syscall_trace: add %sp, PTREGS_OFF, %o0 brnz,pn %o0, 3f mov -ENOSYS, %o0 + + /* Syscall tracing can modify the registers. */ + ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 + sethi %hi(sys_call_table64), %l7 + ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 + or %l7, %lo(sys_call_table64), %l7 + ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 + ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2 + ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3 + ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4 + ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5 + + cmp %g1, NR_syscalls + bgeu,pn %xcc, 3f + mov -ENOSYS, %o0 + + sll %g1, 2, %l4 mov %i0, %o0 + lduw [%l7 + %l4], %l7 mov %i1, %o1 mov %i2, %o2 mov %i3, %o3 diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S new file mode 100644 index 000000000000..5604a2b051d4 --- /dev/null +++ b/arch/sparc/kernel/urtt_fill.S @@ -0,0 +1,98 @@ +#include <asm/thread_info.h> +#include <asm/trap_block.h> +#include <asm/spitfire.h> +#include <asm/ptrace.h> +#include <asm/head.h> + + .text + .align 8 + .globl user_rtt_fill_fixup_common +user_rtt_fill_fixup_common: + rdpr %cwp, %g1 + add %g1, 1, %g1 + wrpr %g1, 0x0, %cwp + + rdpr %wstate, %g2 + sll %g2, 3, %g2 + wrpr %g2, 0x0, %wstate + + /* We know %canrestore and %otherwin are both zero. */ + + sethi %hi(sparc64_kern_pri_context), %g2 + ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 + mov PRIMARY_CONTEXT, %g1 + +661: stxa %g2, [%g1] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g1] ASI_MMU + .previous + + sethi %hi(KERNBASE), %g1 + flush %g1 + + mov %g4, %l4 + mov %g5, %l5 + brnz,pn %g3, 1f + mov %g3, %l3 + + or %g4, FAULT_CODE_WINFIXUP, %g4 + stb %g4, [%g6 + TI_FAULT_CODE] + stx %g5, [%g6 + TI_FAULT_ADDR] +1: + mov %g6, %l1 + wrpr %g0, 0x0, %tl + +661: nop + .section .sun4v_1insn_patch, "ax" + .word 661b + SET_GL(0) + .previous + + wrpr %g0, RTRAP_PSTATE, %pstate + + mov %l1, %g6 + ldx [%g6 + TI_TASK], %g4 + LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) + + brnz,pn %l3, 1f + nop + + call do_sparc64_fault + add %sp, PTREGS_OFF, %o0 + ba,pt %xcc, rtrap + nop + +1: cmp %g3, 2 + bne,pn %xcc, 2f + nop + + sethi %hi(tlb_type), %g1 + lduw [%g1 + %lo(tlb_type)], %g1 + cmp %g1, 3 + bne,pt %icc, 1f + add %sp, PTREGS_OFF, %o0 + mov %l4, %o2 + call sun4v_do_mna + mov %l5, %o1 + ba,a,pt %xcc, rtrap +1: mov %l4, %o1 + mov %l5, %o2 + call mem_address_unaligned + nop + ba,a,pt %xcc, rtrap + +2: sethi %hi(tlb_type), %g1 + mov %l4, %o1 + lduw [%g1 + %lo(tlb_type)], %g1 + mov %l5, %o2 + cmp %g1, 3 + bne,pt %icc, 1f + add %sp, PTREGS_OFF, %o0 + call sun4v_data_access_exception + nop + ba,a,pt %xcc, rtrap + +1: call spitfire_data_access_exception + nop + ba,a,pt %xcc, rtrap diff --git a/arch/sparc/kernel/utrap.S b/arch/sparc/kernel/utrap.S index b7f0f3f3a909..c731e8023d3e 100644 --- a/arch/sparc/kernel/utrap.S +++ b/arch/sparc/kernel/utrap.S @@ -11,8 +11,7 @@ utrap_trap: /* %g3=handler,%g4=level */ mov %l4, %o1 call bad_trap add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop + ba,a,pt %xcc, rtrap invoke_utrap: sllx %g3, 3, %g3 diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index f1a2f688b28a..4a41d412dd3d 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -33,6 +33,10 @@ ENTRY(_start) jiffies = jiffies_64; #endif +#ifdef CONFIG_SPARC64 +ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large") +#endif + SECTIONS { #ifdef CONFIG_SPARC64 diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S index 1e67ce958369..855019a8590e 100644 --- a/arch/sparc/kernel/winfixup.S +++ b/arch/sparc/kernel/winfixup.S @@ -32,8 +32,7 @@ fill_fixup: rd %pc, %g7 call do_sparc64_fault add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop + ba,a,pt %xcc, rtrap /* Be very careful about usage of the trap globals here. * You cannot touch %g5 as that has the fault information. diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 131eaf4ad7f5..364d093f46c6 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -176,17 +176,31 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { int i; + pte_t orig[2]; + unsigned long nptes; if (!pte_present(*ptep) && pte_present(entry)) mm->context.huge_pte_count++; addr &= HPAGE_MASK; - for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { - set_pte_at(mm, addr, ptep, entry); + + nptes = 1 << HUGETLB_PAGE_ORDER; + orig[0] = *ptep; + orig[1] = *(ptep + nptes / 2); + for (i = 0; i < nptes; i++) { + *ptep = entry; ptep++; addr += PAGE_SIZE; pte_val(entry) += PAGE_SIZE; } + + /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ + addr -= REAL_HPAGE_SIZE; + ptep -= nptes / 2; + maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0); + addr -= REAL_HPAGE_SIZE; + ptep -= nptes / 2; + maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0); } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, @@ -194,19 +208,28 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, { pte_t entry; int i; + unsigned long nptes; entry = *ptep; if (pte_present(entry)) mm->context.huge_pte_count--; addr &= HPAGE_MASK; - - for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { - pte_clear(mm, addr, ptep); + nptes = 1 << HUGETLB_PAGE_ORDER; + for (i = 0; i < nptes; i++) { + *ptep = __pte(0UL); addr += PAGE_SIZE; ptep++; } + /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ + addr -= REAL_HPAGE_SIZE; + ptep -= nptes / 2; + maybe_tlb_batch_add(mm, addr, ptep, entry, 0); + addr -= REAL_HPAGE_SIZE; + ptep -= nptes / 2; + maybe_tlb_batch_add(mm, addr, ptep, entry, 0); + return entry; } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 3025bd57f7ab..3c4b8975fa76 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -324,18 +324,6 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde tsb_insert(tsb, tag, tte); } -#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) -static inline bool is_hugetlb_pte(pte_t pte) -{ - if ((tlb_type == hypervisor && - (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || - (tlb_type != hypervisor && - (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) - return true; - return false; -} -#endif - void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct mm_struct *mm; @@ -1267,13 +1255,6 @@ static int __init numa_parse_mdesc(void) int i, j, err, count; u64 node; - /* Some sane defaults for numa latency values */ - for (i = 0; i < MAX_NUMNODES; i++) { - for (j = 0; j < MAX_NUMNODES; j++) - numa_latency[i][j] = (i == j) ? - LOCAL_DISTANCE : REMOTE_DISTANCE; - } - node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); if (node == MDESC_NODE_NULL) { mdesc_release(md); @@ -1369,10 +1350,18 @@ static int __init numa_parse_sun4u(void) static int __init bootmem_init_numa(void) { + int i, j; int err = -1; numadbg("bootmem_init_numa()\n"); + /* Some sane defaults for numa latency values */ + for (i = 0; i < MAX_NUMNODES; i++) { + for (j = 0; j < MAX_NUMNODES; j++) + numa_latency[i][j] = (i == j) ? + LOCAL_DISTANCE : REMOTE_DISTANCE; + } + if (numa_enabled) { if (tlb_type == hypervisor) err = numa_parse_mdesc(); @@ -2832,9 +2821,10 @@ void hugetlb_setup(struct pt_regs *regs) * the Data-TLB for huge pages. */ if (tlb_type == cheetah_plus) { + bool need_context_reload = false; unsigned long ctx; - spin_lock(&ctx_alloc_lock); + spin_lock_irq(&ctx_alloc_lock); ctx = mm->context.sparc64_ctx_val; ctx &= ~CTX_PGSZ_MASK; ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; @@ -2853,9 +2843,12 @@ void hugetlb_setup(struct pt_regs *regs) * also executing in this address space. */ mm->context.sparc64_ctx_val = ctx; - on_each_cpu(context_reload, mm, 0); + need_context_reload = true; } - spin_unlock(&ctx_alloc_lock); + spin_unlock_irq(&ctx_alloc_lock); + + if (need_context_reload) + on_each_cpu(context_reload, mm, 0); } } #endif diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 9df2190c097e..f81cd9736700 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void) } static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, - bool exec) + bool exec, bool huge) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; @@ -84,13 +84,21 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, } if (!tb->active) { - flush_tsb_user_page(mm, vaddr); + flush_tsb_user_page(mm, vaddr, huge); global_flush_tlb_page(mm, vaddr); goto out; } - if (nr == 0) + if (nr == 0) { tb->mm = mm; + tb->huge = huge; + } + + if (tb->huge != huge) { + flush_tlb_pending(); + tb->huge = huge; + nr = 0; + } tb->vaddrs[nr] = vaddr; tb->tlb_nr = ++nr; @@ -104,6 +112,8 @@ out: void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig, int fullmm) { + bool huge = is_hugetlb_pte(orig); + if (tlb_type != hypervisor && pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); @@ -129,7 +139,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, no_cache_flush: if (!fullmm) - tlb_batch_add_one(mm, vaddr, pte_exec(orig)); + tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -145,7 +155,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, if (pte_val(*pte) & _PAGE_VALID) { bool exec = pte_exec(*pte); - tlb_batch_add_one(mm, vaddr, exec); + tlb_batch_add_one(mm, vaddr, exec, false); } pte++; vaddr += PAGE_SIZE; @@ -185,8 +195,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, pte_t orig_pte = __pte(pmd_val(orig)); bool exec = pte_exec(orig_pte); - tlb_batch_add_one(mm, addr, exec); - tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); + tlb_batch_add_one(mm, addr, exec, true); + tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, + true); } else { tlb_batch_pmd_scan(mm, addr, orig); } diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index a06576683c38..a0604a493a36 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -76,14 +76,15 @@ void flush_tsb_user(struct tlb_batch *tb) spin_lock_irqsave(&mm->context.lock, flags); - base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; - nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; - if (tlb_type == cheetah_plus || tlb_type == hypervisor) - base = __pa(base); - __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); - + if (!tb->huge) { + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); + } #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { + if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) @@ -94,20 +95,21 @@ void flush_tsb_user(struct tlb_batch *tb) spin_unlock_irqrestore(&mm->context.lock, flags); } -void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge) { unsigned long nentries, base, flags; spin_lock_irqsave(&mm->context.lock, flags); - base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; - nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; - if (tlb_type == cheetah_plus || tlb_type == hypervisor) - base = __pa(base); - __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); - + if (!huge) { + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); + } #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { + if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index bbe1a62efc02..4a3a140f26bf 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -163,6 +163,9 @@ isoimage: $(obj)/bzImage for i in lib lib64 share end ; do \ if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \ cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \ + if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \ + cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \ + fi ; \ break ; \ fi ; \ if [ $$i = end ] ; then exit 1 ; fi ; \ diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 29fa475ec518..c986d0b3bc35 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -71,8 +71,8 @@ int amd_cache_northbridges(void) while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL) i++; - if (i == 0) - return 0; + if (!i) + return -ENODEV; nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL); if (!nb) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 078de2e86b7a..5f82cd59f0e5 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -3601,7 +3601,7 @@ __init int intel_pmu_init(void) c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; } c->idxmsk64 &= - ~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed)); + ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed)); c->weight = hweight64(c->idxmsk64); } } diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c index 868e1194337f..49e35d003b74 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_pt.c +++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c @@ -694,6 +694,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf, /* clear STOP and INT from current entry */ buf->topa_index[buf->stop_pos]->stop = 0; + buf->topa_index[buf->stop_pos]->intr = 0; buf->topa_index[buf->intr_pos]->intr = 0; /* how many pages till the STOP marker */ @@ -718,6 +719,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf, buf->intr_pos = idx; buf->topa_index[buf->stop_pos]->stop = 1; + buf->topa_index[buf->stop_pos]->intr = 1; buf->topa_index[buf->intr_pos]->intr = 1; return 0; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 1deffe6cc873..023c442c33bb 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -959,7 +959,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) * normal page fault. */ regs->ip = (unsigned long)cur->addr; + /* + * Trap flag (TF) has been set here because this fault + * happened where the single stepping will be done. + * So clear it by resetting the current kprobe: + */ + regs->flags &= ~X86_EFLAGS_TF; + + /* + * If the TF flag was set before the kprobe hit, + * don't touch it: + */ regs->flags |= kcb->kprobe_old_flags; + if (kcb->kprobe_status == KPROBE_REENTER) restore_previous_kprobe(kcb); else diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ade185a46b1d..679302c312f8 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -109,6 +109,12 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) preempt_count_dec(); } +/* + * In IST context, we explicitly disable preemption. This serves two + * purposes: it makes it much less likely that we would accidentally + * schedule in IST context and it will force a warning if we somehow + * manage to schedule by accident. + */ void ist_enter(struct pt_regs *regs) { if (user_mode(regs)) { @@ -123,13 +129,7 @@ void ist_enter(struct pt_regs *regs) rcu_nmi_enter(); } - /* - * We are atomic because we're on the IST stack; or we're on - * x86_32, in which case we still shouldn't schedule; or we're - * on x86_64 and entered from user mode, in which case we're - * still atomic unless ist_begin_non_atomic is called. - */ - preempt_count_add(HARDIRQ_OFFSET); + preempt_disable(); /* This code is a bit fragile. Test it. */ RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); @@ -137,7 +137,7 @@ void ist_enter(struct pt_regs *regs) void ist_exit(struct pt_regs *regs) { - preempt_count_sub(HARDIRQ_OFFSET); + preempt_enable_no_resched(); if (!user_mode(regs)) rcu_nmi_exit(); @@ -168,7 +168,7 @@ void ist_begin_non_atomic(struct pt_regs *regs) BUG_ON((unsigned long)(current_top_of_stack() - current_stack_pointer()) >= THREAD_SIZE); - preempt_count_sub(HARDIRQ_OFFSET); + preempt_enable_no_resched(); } /** @@ -178,7 +178,7 @@ void ist_begin_non_atomic(struct pt_regs *regs) */ void ist_end_non_atomic(void) { - preempt_count_add(HARDIRQ_OFFSET); + preempt_disable(); } static nokprobe_inline int diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 6525e926f566..2e1fd586b895 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -509,6 +509,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, do_cpuid_1_ent(&entry[i], function, idx); if (idx == 1) { entry[i].eax &= kvm_supported_word10_x86_features; + cpuid_mask(&entry[i].eax, 10); entry[i].ebx = 0; if (entry[i].eax & (F(XSAVES)|F(XSAVEC))) entry[i].ebx = diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index 3f8c732117ec..c146f3c262c3 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -44,8 +44,6 @@ static bool msr_mtrr_valid(unsigned msr) case MSR_MTRRdefType: case MSR_IA32_CR_PAT: return true; - case 0x2f8: - return true; } return false; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f34ab71dfd57..41e7943004fe 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4954,8 +4954,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; - vmx_set_cr0(vcpu, cr0); /* enter rmode */ vmx->vcpu.arch.cr0 = cr0; + vmx_set_cr0(vcpu, cr0); /* enter rmode */ vmx_set_cr4(vcpu, 0); vmx_set_efer(vcpu, 0); vmx_fpu_activate(vcpu); @@ -6579,7 +6579,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, /* Checks for #GP/#SS exceptions. */ exn = false; - if (is_protmode(vcpu)) { + if (is_long_mode(vcpu)) { + /* Long mode: #GP(0)/#SS(0) if the memory address is in a + * non-canonical form. This is the only check on the memory + * destination for long mode! + */ + exn = is_noncanonical_address(*ret); + } else if (is_protmode(vcpu)) { /* Protected mode: apply checks for segment validity in the * following order: * - segment type check (#GP(0) may be thrown) @@ -6596,17 +6602,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, * execute-only code segment */ exn = ((s.type & 0xa) == 8); - } - if (exn) { - kvm_queue_exception_e(vcpu, GP_VECTOR, 0); - return 1; - } - if (is_long_mode(vcpu)) { - /* Long mode: #GP(0)/#SS(0) if the memory address is in a - * non-canonical form. This is an only check for long mode. - */ - exn = is_noncanonical_address(*ret); - } else if (is_protmode(vcpu)) { + if (exn) { + kvm_queue_exception_e(vcpu, GP_VECTOR, 0); + return 1; + } /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. */ exn = (s.unusable != 0); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 605cea75eb0d..be222666b1c2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3014,6 +3014,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, if (dbgregs->flags) return -EINVAL; + if (dbgregs->dr6 & ~0xffffffffull) + return -EINVAL; + if (dbgregs->dr7 & ~0xffffffffull) + return -EINVAL; + memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); kvm_update_dr0123(vcpu); vcpu->arch.dr6 = dbgregs->dr6; diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index ff31ab464213..c6d6efed392a 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -488,8 +488,11 @@ int __init pci_xen_initial_domain(void) #endif __acpi_register_gsi = acpi_register_gsi_xen; __acpi_unregister_gsi = NULL; - /* Pre-allocate legacy irqs */ - for (irq = 0; irq < nr_legacy_irqs(); irq++) { + /* + * Pre-allocate the legacy IRQs. Use NR_LEGACY_IRQS here + * because we don't have a PIC and thus nr_legacy_irqs() is zero. + */ + for (irq = 0; irq < NR_IRQS_LEGACY; irq++) { int trigger, polarity; if (acpi_get_override_irq(irq, &trigger, &polarity) == -1) diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 7ab29518a3b9..e345891450c3 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -393,6 +393,9 @@ static unsigned long __init xen_set_identity_and_remap_chunk( unsigned long i = 0; unsigned long n = end_pfn - start_pfn; + if (remap_pfn == 0) + remap_pfn = nr_pages; + while (i < n) { unsigned long cur_pfn = start_pfn + i; unsigned long left = n - i; @@ -438,17 +441,29 @@ static unsigned long __init xen_set_identity_and_remap_chunk( return remap_pfn; } -static void __init xen_set_identity_and_remap(unsigned long nr_pages) +static unsigned long __init xen_count_remap_pages( + unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, + unsigned long remap_pages) +{ + if (start_pfn >= nr_pages) + return remap_pages; + + return remap_pages + min(end_pfn, nr_pages) - start_pfn; +} + +static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, + unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn, + unsigned long nr_pages, unsigned long last_val)) { phys_addr_t start = 0; - unsigned long last_pfn = nr_pages; + unsigned long ret_val = 0; const struct e820entry *entry = xen_e820_map; int i; /* * Combine non-RAM regions and gaps until a RAM region (or the - * end of the map) is reached, then set the 1:1 map and - * remap the memory in those non-RAM regions. + * end of the map) is reached, then call the provided function + * to perform its duty on the non-RAM region. * * The combined non-RAM regions are rounded to a whole number * of pages so any partial pages are accessible via the 1:1 @@ -466,14 +481,13 @@ static void __init xen_set_identity_and_remap(unsigned long nr_pages) end_pfn = PFN_UP(entry->addr); if (start_pfn < end_pfn) - last_pfn = xen_set_identity_and_remap_chunk( - start_pfn, end_pfn, nr_pages, - last_pfn); + ret_val = func(start_pfn, end_pfn, nr_pages, + ret_val); start = end; } } - pr_info("Released %ld page(s)\n", xen_released_pages); + return ret_val; } /* @@ -596,35 +610,6 @@ static void __init xen_ignore_unusable(void) } } -static unsigned long __init xen_count_remap_pages(unsigned long max_pfn) -{ - unsigned long extra = 0; - unsigned long start_pfn, end_pfn; - const struct e820entry *entry = xen_e820_map; - int i; - - end_pfn = 0; - for (i = 0; i < xen_e820_map_entries; i++, entry++) { - start_pfn = PFN_DOWN(entry->addr); - /* Adjacent regions on non-page boundaries handling! */ - end_pfn = min(end_pfn, start_pfn); - - if (start_pfn >= max_pfn) - return extra + max_pfn - end_pfn; - - /* Add any holes in map to result. */ - extra += start_pfn - end_pfn; - - end_pfn = PFN_UP(entry->addr + entry->size); - end_pfn = min(end_pfn, max_pfn); - - if (entry->type != E820_RAM) - extra += end_pfn - start_pfn; - } - - return extra; -} - bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) { struct e820entry *entry; @@ -804,7 +789,7 @@ char * __init xen_memory_setup(void) max_pages = xen_get_max_pages(); /* How many extra pages do we need due to remapping? */ - max_pages += xen_count_remap_pages(max_pfn); + max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages); if (max_pages > max_pfn) extra_pages += max_pages - max_pfn; @@ -922,7 +907,9 @@ char * __init xen_memory_setup(void) * Set identity map on non-RAM pages and prepare remapping the * underlying RAM. */ - xen_set_identity_and_remap(max_pfn); + xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk); + + pr_info("Released %ld page(s)\n", xen_released_pages); return "Xen"; } diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index 4870f28403f5..05bfe568cd30 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig @@ -14,6 +14,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE select MPILIB select PUBLIC_KEY_ALGO_RSA select CRYPTO_HASH_INFO + select CRYPTO_AKCIPHER help This option provides support for asymmetric public key type handling. If signature generation and/or verification are to be used, diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 43fe85f20d57..7097a3395b25 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -455,6 +455,7 @@ static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = { [CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), [CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), + [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0, }; diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 32d684af0ec7..a000ecb995e6 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -135,7 +135,7 @@ static struct osi_linux { unsigned int enable:1; unsigned int dmi:1; unsigned int cmdline:1; - unsigned int default_disabling:1; + u8 default_disabling; } osi_linux = {0, 0, 0, 0}; static u32 acpi_osi_handler(acpi_string interface, u32 supported) @@ -1444,10 +1444,13 @@ void __init acpi_osi_setup(char *str) if (*str == '!') { str++; if (*str == '\0') { - osi_linux.default_disabling = 1; + /* Do not override acpi_osi=!* */ + if (!osi_linux.default_disabling) + osi_linux.default_disabling = + ACPI_DISABLE_ALL_VENDOR_STRINGS; return; } else if (*str == '*') { - acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS); + osi_linux.default_disabling = ACPI_DISABLE_ALL_STRINGS; for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { osi = &osi_setup_entries[i]; osi->enable = false; @@ -1520,10 +1523,13 @@ static void __init acpi_osi_setup_late(void) acpi_status status; if (osi_linux.default_disabling) { - status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS); + status = acpi_update_interfaces(osi_linux.default_disabling); if (ACPI_SUCCESS(status)) - printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n"); + printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors%s\n", + osi_linux.default_disabling == + ACPI_DISABLE_ALL_STRINGS ? + " and feature groups" : ""); } for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 20d17906fc9b..f80cfc36a354 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -3444,7 +3444,7 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node) static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) { - seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", + seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", ref->node->debug_id, ref->strong, ref->weak, ref->death); } diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 961acc788f44..91a9e6af2ec4 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host) ata_scsi_port_error_handler(host, ap); /* finish or retry handled scmd's and clean up */ - WARN_ON(host->host_failed || !list_empty(&eh_work_q)); + WARN_ON(!list_empty(&eh_work_q)); DPRINTK("EXIT\n"); } diff --git a/drivers/base/core.c b/drivers/base/core.c index 3ac683dff7de..bbe8e2efc677 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -841,11 +841,29 @@ static struct kobject *get_device_parent(struct device *dev, return NULL; } +static inline bool live_in_glue_dir(struct kobject *kobj, + struct device *dev) +{ + if (!kobj || !dev->class || + kobj->kset != &dev->class->p->glue_dirs) + return false; + return true; +} + +static inline struct kobject *get_glue_dir(struct device *dev) +{ + return dev->kobj.parent; +} + +/* + * make sure cleaning up dir as the last step, we need to make + * sure .release handler of kobject is run with holding the + * global lock + */ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) { /* see if we live in a "glue" directory */ - if (!glue_dir || !dev->class || - glue_dir->kset != &dev->class->p->glue_dirs) + if (!live_in_glue_dir(glue_dir, dev)) return; mutex_lock(&gdp_mutex); @@ -853,11 +871,6 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) mutex_unlock(&gdp_mutex); } -static void cleanup_device_parent(struct device *dev) -{ - cleanup_glue_dir(dev, dev->kobj.parent); -} - static int device_add_class_symlinks(struct device *dev) { struct device_node *of_node = dev_of_node(dev); @@ -1033,6 +1046,7 @@ int device_add(struct device *dev) struct kobject *kobj; struct class_interface *class_intf; int error = -EINVAL; + struct kobject *glue_dir = NULL; dev = get_device(dev); if (!dev) @@ -1077,8 +1091,10 @@ int device_add(struct device *dev) /* first, register with generic layer. */ /* we require the name to be set before, and pass NULL */ error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); - if (error) + if (error) { + glue_dir = get_glue_dir(dev); goto Error; + } /* notify platform of device entry */ if (platform_notify) @@ -1159,9 +1175,10 @@ done: device_remove_file(dev, &dev_attr_uevent); attrError: kobject_uevent(&dev->kobj, KOBJ_REMOVE); + glue_dir = get_glue_dir(dev); kobject_del(&dev->kobj); Error: - cleanup_device_parent(dev); + cleanup_glue_dir(dev, glue_dir); put_device(parent); name_error: kfree(dev->p); @@ -1237,6 +1254,7 @@ EXPORT_SYMBOL_GPL(put_device); void device_del(struct device *dev) { struct device *parent = dev->parent; + struct kobject *glue_dir = NULL; struct class_interface *class_intf; /* Notify clients of device removal. This call must come @@ -1281,8 +1299,9 @@ void device_del(struct device *dev) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_REMOVED_DEVICE, dev); kobject_uevent(&dev->kobj, KOBJ_REMOVE); - cleanup_device_parent(dev); + glue_dir = get_glue_dir(dev); kobject_del(&dev->kobj); + cleanup_glue_dir(dev, glue_dir); put_device(parent); } EXPORT_SYMBOL_GPL(device_del); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 918ce439534b..0dd6379ac215 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -228,6 +228,8 @@ static void driver_bound(struct device *dev) klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices); + device_pm_check_callbacks(dev); + /* * Make sure the device is no longer in one of the deferred lists and * kick off retrying all pending devices @@ -720,6 +722,7 @@ static void __device_release_driver(struct device *dev) dev->pm_domain->dismiss(dev); klist_remove(&dev->p->knode_driver); + device_pm_check_callbacks(dev); if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_UNBOUND_DRIVER, diff --git a/drivers/base/module.c b/drivers/base/module.c index db930d3ee312..2a215780eda2 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c @@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv) static void module_create_drivers_dir(struct module_kobject *mk) { - if (!mk || mk->drivers_dir) - return; + static DEFINE_MUTEX(drivers_dir_mutex); - mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj); + mutex_lock(&drivers_dir_mutex); + if (mk && !mk->drivers_dir) + mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj); + mutex_unlock(&drivers_dir_mutex); } void module_add_driver(struct module *mod, struct device_driver *drv) diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 6ed8b9326629..7eea95d490e6 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -35,8 +35,6 @@ #include <linux/timer.h> #include <linux/wakeup_reason.h> -#include <asm/current.h> - #include "../base.h" #include "power.h" @@ -62,12 +60,6 @@ struct suspend_stats suspend_stats; static DEFINE_MUTEX(dpm_list_mtx); static pm_message_t pm_transition; -static void dpm_drv_timeout(unsigned long data); -struct dpm_drv_wd_data { - struct device *dev; - struct task_struct *tsk; -}; - static int async_error; static char *pm_verb(int event) @@ -134,6 +126,7 @@ void device_pm_add(struct device *dev) { pr_debug("PM: Adding info for %s:%s\n", dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); + device_pm_check_callbacks(dev); mutex_lock(&dpm_list_mtx); if (dev->parent && dev->parent->power.is_prepared) dev_warn(dev, "parent %s should not be sleeping\n", @@ -156,6 +149,7 @@ void device_pm_remove(struct device *dev) mutex_unlock(&dpm_list_mtx); device_wakeup_disable(dev); pm_runtime_remove(dev); + device_pm_check_callbacks(dev); } /** @@ -839,30 +833,6 @@ static void async_resume(void *data, async_cookie_t cookie) } /** - * dpm_drv_timeout - Driver suspend / resume watchdog handler - * @data: struct device which timed out - * - * Called when a driver has timed out suspending or resuming. - * There's not much we can do here to recover so - * BUG() out for a crash-dump - * - */ -static void dpm_drv_timeout(unsigned long data) -{ - struct dpm_drv_wd_data *wd_data = (void *)data; - struct device *dev = wd_data->dev; - struct task_struct *tsk = wd_data->tsk; - - printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev), - (dev->driver ? dev->driver->name : "no driver")); - - printk(KERN_EMERG "dpm suspend stack:\n"); - show_stack(tsk, NULL); - - BUG(); -} - -/** * dpm_resume - Execute "resume" callbacks for non-sysdev devices. * @state: PM transition of the system being carried out. * @@ -1295,14 +1265,15 @@ int dpm_suspend_late(pm_message_t state) error = device_suspend_late(dev); mutex_lock(&dpm_list_mtx); + if (!list_empty(&dev->power.entry)) + list_move(&dev->power.entry, &dpm_late_early_list); + if (error) { pm_dev_err(dev, state, " late", error); dpm_save_failed_dev(dev_name(dev)); put_device(dev); break; } - if (!list_empty(&dev->power.entry)) - list_move(&dev->power.entry, &dpm_late_early_list); put_device(dev); if (async_error) @@ -1380,8 +1351,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) pm_callback_t callback = NULL; char *info = NULL; int error = 0; - struct timer_list timer; - struct dpm_drv_wd_data data; char suspend_abort[MAX_SUSPEND_ABORT_LEN]; DECLARE_DPM_WATCHDOG_ON_STACK(wd); @@ -1412,14 +1381,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; - - data.dev = dev; - data.tsk = current; - init_timer_on_stack(&timer); - timer.expires = jiffies + HZ * 12; - timer.function = dpm_drv_timeout; - timer.data = (unsigned long)&data; - add_timer(&timer); if (dev->power.direct_complete) { if (pm_runtime_status_suspended(dev)) { @@ -1500,9 +1461,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) device_unlock(dev); dpm_watchdog_clear(&wd); - del_timer_sync(&timer); - destroy_timer_on_stack(&timer); - Complete: complete_all(&dev->power.completion); if (error) @@ -1619,6 +1577,11 @@ static int device_prepare(struct device *dev, pm_message_t state) dev->power.wakeup_path = device_may_wakeup(dev); + if (dev->power.no_pm_callbacks) { + ret = 1; /* Let device go direct_complete */ + goto unlock; + } + if (dev->pm_domain) { info = "preparing power domain "; callback = dev->pm_domain->ops.prepare; @@ -1641,6 +1604,7 @@ static int device_prepare(struct device *dev, pm_message_t state) if (callback) ret = callback(dev); +unlock: device_unlock(dev); if (ret < 0) { @@ -1769,3 +1733,30 @@ void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) device_pm_unlock(); } EXPORT_SYMBOL_GPL(dpm_for_each_dev); + +static bool pm_ops_is_empty(const struct dev_pm_ops *ops) +{ + if (!ops) + return true; + + return !ops->prepare && + !ops->suspend && + !ops->suspend_late && + !ops->suspend_noirq && + !ops->resume_noirq && + !ops->resume_early && + !ops->resume && + !ops->complete; +} + +void device_pm_check_callbacks(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + dev->power.no_pm_callbacks = + (!dev->bus || pm_ops_is_empty(dev->bus->pm)) && + (!dev->class || pm_ops_is_empty(dev->class->pm)) && + (!dev->type || pm_ops_is_empty(dev->type->pm)) && + (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && + (!dev->driver || pm_ops_is_empty(dev->driver->pm)); + spin_unlock_irq(&dev->power.lock); +} diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 998fa6b23084..297beae64314 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -123,6 +123,7 @@ extern void device_pm_remove(struct device *); extern void device_pm_move_before(struct device *, struct device *); extern void device_pm_move_after(struct device *, struct device *); extern void device_pm_move_last(struct device *); +extern void device_pm_check_callbacks(struct device *dev); #else /* !CONFIG_PM_SLEEP */ @@ -141,6 +142,8 @@ static inline void device_pm_move_after(struct device *deva, struct device *devb) {} static inline void device_pm_move_last(struct device *dev) {} +static inline void device_pm_check_callbacks(struct device *dev) {} + #endif /* !CONFIG_PM_SLEEP */ static inline void device_pm_init(struct device *dev) diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index e1a10a03df8e..9796a1a15ef6 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1468,11 +1468,16 @@ int pm_runtime_force_resume(struct device *dev) goto out; } - ret = callback(dev); + ret = pm_runtime_set_active(dev); if (ret) goto out; - pm_runtime_set_active(dev); + ret = callback(dev); + if (ret) { + pm_runtime_set_suspended(dev); + goto out; + } + pm_runtime_mark_last_busy(dev); out: pm_runtime_enable(dev); diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c index 1317ddaa3c23..b05b999fbbdc 100644 --- a/drivers/bluetooth/bluetooth-power.c +++ b/drivers/bluetooth/bluetooth-power.c @@ -46,6 +46,7 @@ static const struct of_device_id bt_power_match_table[] = { static struct bluetooth_power_platform_data *bt_power_pdata; static struct platform_device *btpdev; static bool previous; +static int pwr_state; struct class *bt_class; static int bt_major; @@ -636,6 +637,7 @@ static int bt_power_probe(struct platform_device *pdev) memcpy(bt_power_pdata, pdev->dev.platform_data, sizeof(struct bluetooth_power_platform_data)); + pwr_state = 0; } else { BT_PWR_ERR("Failed to get platform data"); goto free_pdata; @@ -680,7 +682,7 @@ int bt_register_slimdev(struct device *dev) static long bt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - int ret; + int ret, pwr_cntrl = 0; switch (cmd) { case BT_CMD_SLIM_TEST: @@ -692,6 +694,18 @@ static long bt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) bt_power_pdata->slim_dev->platform_data ); break; + case BT_CMD_PWR_CTRL: + pwr_cntrl = (int)arg; + BT_PWR_ERR("BT_CMD_PWR_CTRL pwr_cntrl:%d", pwr_cntrl); + if (pwr_state != pwr_cntrl) { + ret = bluetooth_power(pwr_cntrl); + if (!ret) + pwr_state = pwr_cntrl; + } else { + BT_PWR_ERR("BT chip state is already :%d no change d\n" + , pwr_state); + } + break; default: return -EINVAL; } @@ -711,6 +725,7 @@ static struct platform_driver bt_power_driver = { static const struct file_operations bt_dev_fops = { .owner = THIS_MODULE, .unlocked_ioctl = bt_ioctl, + .compat_ioctl = bt_ioctl, }; static int __init bluetooth_power_init(void) @@ -733,7 +748,7 @@ static int __init bluetooth_power_init(void) if (device_create(bt_class, NULL, MKDEV(bt_major, 0), - NULL, "pintest") == NULL) { + NULL, "btpower") == NULL) { BTFMSLIM_ERR("failed to allocate char dev\n"); goto chrdev_unreg; } diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index ed888e302bc3..597b2d16b775 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -50,6 +50,7 @@ struct vhci_data { wait_queue_head_t read_wait; struct sk_buff_head readq; + struct mutex open_mutex; struct delayed_work open_timeout; }; @@ -87,12 +88,15 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return 0; } -static int vhci_create_device(struct vhci_data *data, __u8 opcode) +static int __vhci_create_device(struct vhci_data *data, __u8 opcode) { struct hci_dev *hdev; struct sk_buff *skb; __u8 dev_type; + if (data->hdev) + return -EBADFD; + /* bits 0-1 are dev_type (BR/EDR or AMP) */ dev_type = opcode & 0x03; @@ -151,6 +155,17 @@ static int vhci_create_device(struct vhci_data *data, __u8 opcode) return 0; } +static int vhci_create_device(struct vhci_data *data, __u8 opcode) +{ + int err; + + mutex_lock(&data->open_mutex); + err = __vhci_create_device(data, opcode); + mutex_unlock(&data->open_mutex); + + return err; +} + static inline ssize_t vhci_get_user(struct vhci_data *data, struct iov_iter *from) { @@ -189,11 +204,6 @@ static inline ssize_t vhci_get_user(struct vhci_data *data, break; case HCI_VENDOR_PKT: - if (data->hdev) { - kfree_skb(skb); - return -EBADFD; - } - cancel_delayed_work_sync(&data->open_timeout); opcode = *((__u8 *) skb->data); @@ -320,6 +330,7 @@ static int vhci_open(struct inode *inode, struct file *file) skb_queue_head_init(&data->readq); init_waitqueue_head(&data->read_wait); + mutex_init(&data->open_mutex); INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout); file->private_data = data; @@ -333,15 +344,18 @@ static int vhci_open(struct inode *inode, struct file *file) static int vhci_release(struct inode *inode, struct file *file) { struct vhci_data *data = file->private_data; - struct hci_dev *hdev = data->hdev; + struct hci_dev *hdev; cancel_delayed_work_sync(&data->open_timeout); + hdev = data->hdev; + if (hdev) { hci_unregister_dev(hdev); hci_free_dev(hdev); } + skb_queue_purge(&data->readq); file->private_data = NULL; kfree(data); diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index ca9a8684de94..1046c262b46b 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -590,10 +590,6 @@ config DEVPORT depends on ISA || PCI default y -config DCC_TTY - tristate "DCC tty driver" - depends on ARM - source "drivers/s390/char/Kconfig" config TILE_SROM diff --git a/drivers/char/Makefile b/drivers/char/Makefile index fe696f180841..e180562c725e 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -53,7 +53,6 @@ obj-$(CONFIG_PCMCIA) += pcmcia/ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o obj-$(CONFIG_TCG_TPM) += tpm/ -obj-$(CONFIG_DCC_TTY) += dcc_tty.o obj-$(CONFIG_PS3_FLASH) += ps3flash.o obj-$(CONFIG_JS_RTC) += js-rtc.o diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 67c1207d35be..ef8aaac6e0a2 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -1070,7 +1070,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) int idx = list[i].pgidx; if (map->attr & FASTRPC_ATTR_NOVA) { - offset = (uintptr_t)lpra[i].buf.pv; + offset = 0; } else { down_read(¤t->mm->mmap_sem); VERIFY(err, NULL != (vma = find_vma(current->mm, diff --git a/drivers/char/dcc_tty.c b/drivers/char/dcc_tty.c deleted file mode 100644 index 0a62d410286f..000000000000 --- a/drivers/char/dcc_tty.c +++ /dev/null @@ -1,326 +0,0 @@ -/* drivers/char/dcc_tty.c - * - * Copyright (C) 2007 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/delay.h> -#include <linux/console.h> -#include <linux/hrtimer.h> -#include <linux/tty.h> -#include <linux/tty_driver.h> -#include <linux/tty_flip.h> - -MODULE_DESCRIPTION("DCC TTY Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION("1.0"); - -DEFINE_SPINLOCK(g_dcc_tty_lock); -static struct hrtimer g_dcc_timer; -static char g_dcc_buffer[16]; -static int g_dcc_buffer_head; -static int g_dcc_buffer_count; -static unsigned g_dcc_write_delay_usecs = 1; -static struct tty_driver *g_dcc_tty_driver; -static struct tty_struct *g_dcc_tty; -static int g_dcc_tty_open_count; - -static void dcc_poll_locked(void) -{ - char ch; - int rch; - int written; - - while (g_dcc_buffer_count) { - ch = g_dcc_buffer[g_dcc_buffer_head]; - asm( - "mrc 14, 0, r15, c0, c1, 0\n" - "mcrcc 14, 0, %1, c0, c5, 0\n" - "movcc %0, #1\n" - "movcs %0, #0\n" - : "=r" (written) - : "r" (ch) - ); - if (written) { - if (ch == '\n') - g_dcc_buffer[g_dcc_buffer_head] = '\r'; - else { - g_dcc_buffer_head = (g_dcc_buffer_head + 1) % ARRAY_SIZE(g_dcc_buffer); - g_dcc_buffer_count--; - if (g_dcc_tty) - tty_wakeup(g_dcc_tty); - } - g_dcc_write_delay_usecs = 1; - } else { - if (g_dcc_write_delay_usecs > 0x100) - break; - g_dcc_write_delay_usecs <<= 1; - udelay(g_dcc_write_delay_usecs); - } - } - - if (g_dcc_tty && !test_bit(TTY_THROTTLED, &g_dcc_tty->flags)) { - asm( - "mrc 14, 0, %0, c0, c1, 0\n" - "tst %0, #(1 << 30)\n" - "moveq %0, #-1\n" - "mrcne 14, 0, %0, c0, c5, 0\n" - : "=r" (rch) - ); - if (rch >= 0) { - ch = rch; - tty_insert_flip_string(g_dcc_tty->port, &ch, 1); - tty_flip_buffer_push(g_dcc_tty->port); - } - } - - - if (g_dcc_buffer_count) - hrtimer_start(&g_dcc_timer, ktime_set(0, g_dcc_write_delay_usecs * NSEC_PER_USEC), HRTIMER_MODE_REL); - else - hrtimer_start(&g_dcc_timer, ktime_set(0, 20 * NSEC_PER_MSEC), HRTIMER_MODE_REL); -} - -static int dcc_tty_open(struct tty_struct * tty, struct file * filp) -{ - int ret; - unsigned long irq_flags; - - spin_lock_irqsave(&g_dcc_tty_lock, irq_flags); - if (g_dcc_tty == NULL || g_dcc_tty == tty) { - g_dcc_tty = tty; - g_dcc_tty_open_count++; - ret = 0; - } else - ret = -EBUSY; - spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags); - - printk("dcc_tty_open, tty %p, f_flags %x, returned %d\n", tty, filp->f_flags, ret); - - return ret; -} - -static void dcc_tty_close(struct tty_struct * tty, struct file * filp) -{ - printk("dcc_tty_close, tty %p, f_flags %x\n", tty, filp->f_flags); - if (g_dcc_tty == tty) { - if (--g_dcc_tty_open_count == 0) - g_dcc_tty = NULL; - } -} - -static int dcc_write(const unsigned char *buf_start, int count) -{ - const unsigned char *buf = buf_start; - unsigned long irq_flags; - int copy_len; - int space_left; - int tail; - - if (count < 1) - return 0; - - spin_lock_irqsave(&g_dcc_tty_lock, irq_flags); - do { - tail = (g_dcc_buffer_head + g_dcc_buffer_count) % ARRAY_SIZE(g_dcc_buffer); - copy_len = ARRAY_SIZE(g_dcc_buffer) - tail; - space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count; - if (copy_len > space_left) - copy_len = space_left; - if (copy_len > count) - copy_len = count; - memcpy(&g_dcc_buffer[tail], buf, copy_len); - g_dcc_buffer_count += copy_len; - buf += copy_len; - count -= copy_len; - if (copy_len < count && copy_len < space_left) { - space_left -= copy_len; - copy_len = count; - if (copy_len > space_left) { - copy_len = space_left; - } - memcpy(g_dcc_buffer, buf, copy_len); - buf += copy_len; - count -= copy_len; - g_dcc_buffer_count += copy_len; - } - dcc_poll_locked(); - space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count; - } while(count && space_left); - spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags); - return buf - buf_start; -} - -static int dcc_tty_write(struct tty_struct * tty, const unsigned char *buf, int count) -{ - int ret; - /* printk("dcc_tty_write %p, %d\n", buf, count); */ - ret = dcc_write(buf, count); - if (ret != count) - printk("dcc_tty_write %p, %d, returned %d\n", buf, count, ret); - return ret; -} - -static int dcc_tty_write_room(struct tty_struct *tty) -{ - int space_left; - unsigned long irq_flags; - - spin_lock_irqsave(&g_dcc_tty_lock, irq_flags); - space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count; - spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags); - return space_left; -} - -static int dcc_tty_chars_in_buffer(struct tty_struct *tty) -{ - int ret; - asm( - "mrc 14, 0, %0, c0, c1, 0\n" - "mov %0, %0, LSR #30\n" - "and %0, %0, #1\n" - : "=r" (ret) - ); - return ret; -} - -static void dcc_tty_unthrottle(struct tty_struct * tty) -{ - unsigned long irq_flags; - - spin_lock_irqsave(&g_dcc_tty_lock, irq_flags); - dcc_poll_locked(); - spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags); -} - -static enum hrtimer_restart dcc_tty_timer_func(struct hrtimer *timer) -{ - unsigned long irq_flags; - - spin_lock_irqsave(&g_dcc_tty_lock, irq_flags); - dcc_poll_locked(); - spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags); - return HRTIMER_NORESTART; -} - -void dcc_console_write(struct console *co, const char *b, unsigned count) -{ -#if 1 - dcc_write(b, count); -#else - /* blocking printk */ - while (count > 0) { - int written; - written = dcc_write(b, count); - if (written) { - b += written; - count -= written; - } - } -#endif -} - -static struct tty_driver *dcc_console_device(struct console *c, int *index) -{ - *index = 0; - return g_dcc_tty_driver; -} - -static int __init dcc_console_setup(struct console *co, char *options) -{ - if (co->index != 0) - return -ENODEV; - return 0; -} - - -static struct console dcc_console = -{ - .name = "ttyDCC", - .write = dcc_console_write, - .device = dcc_console_device, - .setup = dcc_console_setup, - .flags = CON_PRINTBUFFER, - .index = -1, -}; - -static struct tty_operations dcc_tty_ops = { - .open = dcc_tty_open, - .close = dcc_tty_close, - .write = dcc_tty_write, - .write_room = dcc_tty_write_room, - .chars_in_buffer = dcc_tty_chars_in_buffer, - .unthrottle = dcc_tty_unthrottle, -}; - -static int __init dcc_tty_init(void) -{ - int ret; - - hrtimer_init(&g_dcc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - g_dcc_timer.function = dcc_tty_timer_func; - - g_dcc_tty_driver = alloc_tty_driver(1); - if (!g_dcc_tty_driver) { - printk(KERN_ERR "dcc_tty_probe: alloc_tty_driver failed\n"); - ret = -ENOMEM; - goto err_alloc_tty_driver_failed; - } - g_dcc_tty_driver->owner = THIS_MODULE; - g_dcc_tty_driver->driver_name = "dcc"; - g_dcc_tty_driver->name = "ttyDCC"; - g_dcc_tty_driver->major = 0; // auto assign - g_dcc_tty_driver->minor_start = 0; - g_dcc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; - g_dcc_tty_driver->subtype = SERIAL_TYPE_NORMAL; - g_dcc_tty_driver->init_termios = tty_std_termios; - g_dcc_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; - tty_set_operations(g_dcc_tty_driver, &dcc_tty_ops); - ret = tty_register_driver(g_dcc_tty_driver); - if (ret) { - printk(KERN_ERR "dcc_tty_probe: tty_register_driver failed, %d\n", ret); - goto err_tty_register_driver_failed; - } - tty_register_device(g_dcc_tty_driver, 0, NULL); - - register_console(&dcc_console); - hrtimer_start(&g_dcc_timer, ktime_set(0, 0), HRTIMER_MODE_REL); - - return 0; - -err_tty_register_driver_failed: - put_tty_driver(g_dcc_tty_driver); - g_dcc_tty_driver = NULL; -err_alloc_tty_driver_failed: - return ret; -} - -static void __exit dcc_tty_exit(void) -{ - int ret; - - tty_unregister_device(g_dcc_tty_driver, 0); - ret = tty_unregister_driver(g_dcc_tty_driver); - if (ret < 0) { - printk(KERN_ERR "dcc_tty_remove: tty_unregister_driver failed, %d\n", ret); - } else { - put_tty_driver(g_dcc_tty_driver); - } - g_dcc_tty_driver = NULL; -} - -module_init(dcc_tty_init); -module_exit(dcc_tty_exit); - - diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h index 2aef98f4fe04..35cbe3b6b596 100644 --- a/drivers/char/diag/diagchar.h +++ b/drivers/char/diag/diagchar.h @@ -509,6 +509,7 @@ struct diagchar_dev { struct list_head cmd_reg_list; struct mutex cmd_reg_mutex; uint32_t cmd_reg_count; + struct mutex diagfwd_channel_mutex; /* Sizes that reflect memory pool sizes */ unsigned int poolsize; unsigned int poolsize_hdlc; diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c index 39be6ef3735e..a5781f6db269 100644 --- a/drivers/char/diag/diagchar_core.c +++ b/drivers/char/diag/diagchar_core.c @@ -3394,6 +3394,7 @@ static int __init diagchar_init(void) mutex_init(&driver->diag_file_mutex); mutex_init(&driver->delayed_rsp_mutex); mutex_init(&apps_data_mutex); + mutex_init(&driver->diagfwd_channel_mutex); init_waitqueue_head(&driver->wait_q); INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn); INIT_WORK(&(driver->update_user_clients), diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c index 22b9e05086bd..40fdcbaaf31a 100644 --- a/drivers/char/diag/diagfwd_peripheral.c +++ b/drivers/char/diag/diagfwd_peripheral.c @@ -651,9 +651,9 @@ void diagfwd_close_transport(uint8_t transport, uint8_t peripheral) break; default: return; - } + mutex_lock(&driver->diagfwd_channel_mutex); fwd_info = &early_init_info[transport][peripheral]; if (fwd_info->p_ops && fwd_info->p_ops->close) fwd_info->p_ops->close(fwd_info->ctxt); @@ -677,6 +677,7 @@ void diagfwd_close_transport(uint8_t transport, uint8_t peripheral) diagfwd_late_open(dest_info); diagfwd_cntl_open(dest_info); init_fn(peripheral); + mutex_unlock(&driver->diagfwd_channel_mutex); diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]); diagfwd_queue_read(&peripheral_info[TYPE_CMD][peripheral]); } diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c index fd927e931414..2f9ec51a17ba 100644 --- a/drivers/char/diag/diagfwd_socket.c +++ b/drivers/char/diag/diagfwd_socket.c @@ -959,7 +959,9 @@ static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len) (info->data_ready > 0) || (!info->hdl) || (atomic_read(&info->diag_state) == 0)); if (err) { + mutex_lock(&driver->diagfwd_channel_mutex); diagfwd_channel_read_done(info->fwd_ctxt, buf, 0); + mutex_unlock(&driver->diagfwd_channel_mutex); return -ERESTARTSYS; } @@ -971,7 +973,9 @@ static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len) DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s closing read thread. diag state is closed\n", info->name); + mutex_lock(&driver->diagfwd_channel_mutex); diagfwd_channel_read_done(info->fwd_ctxt, buf, 0); + mutex_unlock(&driver->diagfwd_channel_mutex); return 0; } @@ -1038,8 +1042,10 @@ static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len) if (total_recd > 0) { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s read total bytes: %d\n", info->name, total_recd); + mutex_lock(&driver->diagfwd_channel_mutex); err = diagfwd_channel_read_done(info->fwd_ctxt, buf, total_recd); + mutex_unlock(&driver->diagfwd_channel_mutex); if (err) goto fail; } else { @@ -1052,7 +1058,9 @@ static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len) return 0; fail: + mutex_lock(&driver->diagfwd_channel_mutex); diagfwd_channel_read_done(info->fwd_ctxt, buf, 0); + mutex_unlock(&driver->diagfwd_channel_mutex); return -EIO; } diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c index 30cf4623184f..aa30af5f0f2b 100644 --- a/drivers/char/hw_random/exynos-rng.c +++ b/drivers/char/hw_random/exynos-rng.c @@ -89,6 +89,7 @@ static int exynos_read(struct hwrng *rng, void *buf, struct exynos_rng, rng); u32 *data = buf; int retry = 100; + int ret = 4; pm_runtime_get_sync(exynos_rng->dev); @@ -97,17 +98,20 @@ static int exynos_read(struct hwrng *rng, void *buf, while (!(exynos_rng_readl(exynos_rng, EXYNOS_PRNG_STATUS_OFFSET) & PRNG_DONE) && --retry) cpu_relax(); - if (!retry) - return -ETIMEDOUT; + if (!retry) { + ret = -ETIMEDOUT; + goto out; + } exynos_rng_writel(exynos_rng, PRNG_DONE, EXYNOS_PRNG_STATUS_OFFSET); *data = exynos_rng_readl(exynos_rng, EXYNOS_PRNG_OUT1_OFFSET); +out: pm_runtime_mark_last_busy(exynos_rng->dev); pm_runtime_put_sync_autosuspend(exynos_rng->dev); - return 4; + return ret; } static int exynos_rng_probe(struct platform_device *pdev) diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index e3536da05c88..a084a4751fa9 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -3819,6 +3819,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf) while (!list_empty(&intf->waiting_rcv_msgs)) { smi_msg = list_entry(intf->waiting_rcv_msgs.next, struct ipmi_smi_msg, link); + list_del(&smi_msg->link); if (!run_to_completion) spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); @@ -3828,11 +3829,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf) if (rv > 0) { /* * To preserve message order, quit if we - * can't handle a message. + * can't handle a message. Add the message + * back at the head, this is safe because this + * tasklet is the only thing that pulls the + * messages. */ + list_add(&smi_msg->link, &intf->waiting_rcv_msgs); break; } else { - list_del(&smi_msg->link); if (rv == 0) /* Message handled */ ipmi_free_smi_msg(smi_msg); diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c index 61566bcefa53..a165230e7eda 100644 --- a/drivers/clk/at91/clk-h32mx.c +++ b/drivers/clk/at91/clk-h32mx.c @@ -116,7 +116,7 @@ void __init of_sama5d4_clk_h32mx_setup(struct device_node *np, h32mxclk->pmc = pmc; clk = clk_register(NULL, &h32mxclk->hw); - if (!clk) { + if (IS_ERR(clk)) { kfree(h32mxclk); return; } diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 4f9830c1b121..6029313aa995 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -890,8 +890,14 @@ static void bcm2835_pll_off(struct clk_hw *hw) struct bcm2835_cprman *cprman = pll->cprman; const struct bcm2835_pll_data *data = pll->data; - cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST); - cprman_write(cprman, data->a2w_ctrl_reg, A2W_PLL_CTRL_PWRDN); + spin_lock(&cprman->regs_lock); + cprman_write(cprman, data->cm_ctrl_reg, + cprman_read(cprman, data->cm_ctrl_reg) | + CM_PLL_ANARST); + cprman_write(cprman, data->a2w_ctrl_reg, + cprman_read(cprman, data->a2w_ctrl_reg) | + A2W_PLL_CTRL_PWRDN); + spin_unlock(&cprman->regs_lock); } static int bcm2835_pll_on(struct clk_hw *hw) @@ -901,6 +907,10 @@ static int bcm2835_pll_on(struct clk_hw *hw) const struct bcm2835_pll_data *data = pll->data; ktime_t timeout; + cprman_write(cprman, data->a2w_ctrl_reg, + cprman_read(cprman, data->a2w_ctrl_reg) & + ~A2W_PLL_CTRL_PWRDN); + /* Take the PLL out of reset. */ cprman_write(cprman, data->cm_ctrl_reg, cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST); @@ -1068,10 +1078,12 @@ static void bcm2835_pll_divider_off(struct clk_hw *hw) struct bcm2835_cprman *cprman = divider->cprman; const struct bcm2835_pll_divider_data *data = divider->data; + spin_lock(&cprman->regs_lock); cprman_write(cprman, data->cm_reg, (cprman_read(cprman, data->cm_reg) & ~data->load_mask) | data->hold_mask); cprman_write(cprman, data->a2w_reg, A2W_PLL_CHANNEL_DISABLE); + spin_unlock(&cprman->regs_lock); } static int bcm2835_pll_divider_on(struct clk_hw *hw) @@ -1080,12 +1092,14 @@ static int bcm2835_pll_divider_on(struct clk_hw *hw) struct bcm2835_cprman *cprman = divider->cprman; const struct bcm2835_pll_divider_data *data = divider->data; + spin_lock(&cprman->regs_lock); cprman_write(cprman, data->a2w_reg, cprman_read(cprman, data->a2w_reg) & ~A2W_PLL_CHANNEL_DISABLE); cprman_write(cprman, data->cm_reg, cprman_read(cprman, data->cm_reg) & ~data->hold_mask); + spin_unlock(&cprman->regs_lock); return 0; } @@ -1167,8 +1181,9 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw, div &= ~unused_frac_mask; } - /* Clamp to the limits. */ - div = max(div, unused_frac_mask + 1); + /* clamp to min divider of 1 */ + div = max_t(u32, div, 1 << CM_DIV_FRAC_BITS); + /* clamp to the highest possible fractional divider */ div = min_t(u32, div, GENMASK(data->int_bits + CM_DIV_FRAC_BITS - 1, CM_DIV_FRAC_BITS - data->frac_bits)); diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c index 4735de0660cc..8db56919e367 100644 --- a/drivers/clk/clk-composite.c +++ b/drivers/clk/clk-composite.c @@ -194,7 +194,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name, unsigned long flags) { struct clk *clk; - struct clk_init_data init; + struct clk_init_data init = {}; struct clk_composite *composite; struct clk_ops *clk_composite_ops; diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index bbf206e3da0d..0c83ffc22dd2 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -436,7 +436,7 @@ static struct clk *_register_divider(struct device *dev, const char *name, { struct clk_divider *div; struct clk *clk; - struct clk_init_data init; + struct clk_init_data init = {}; if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) { if (width + shift > 16) { diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c index 83de57aeceea..57fbc94764ff 100644 --- a/drivers/clk/clk-fixed-factor.c +++ b/drivers/clk/clk-fixed-factor.c @@ -75,7 +75,7 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name, unsigned int mult, unsigned int div) { struct clk_fixed_factor *fix; - struct clk_init_data init; + struct clk_init_data init = {}; struct clk *clk; fix = kmalloc(sizeof(*fix), GFP_KERNEL); diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c index f85ec8d1711f..2ca7d5a8826f 100644 --- a/drivers/clk/clk-fixed-rate.c +++ b/drivers/clk/clk-fixed-rate.c @@ -62,7 +62,7 @@ struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev, { struct clk_fixed_rate *fixed; struct clk *clk; - struct clk_init_data init; + struct clk_init_data init = {}; /* allocate fixed-rate clock */ fixed = kzalloc(sizeof(*fixed), GFP_KERNEL); diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c index 5c4955e33f7a..f50892a74b60 100644 --- a/drivers/clk/clk-fractional-divider.c +++ b/drivers/clk/clk-fractional-divider.c @@ -124,7 +124,7 @@ struct clk *clk_register_fractional_divider(struct device *dev, u8 clk_divider_flags, spinlock_t *lock) { struct clk_fractional_divider *fd; - struct clk_init_data init; + struct clk_init_data init = {}; struct clk *clk; fd = kzalloc(sizeof(*fd), GFP_KERNEL); diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c index de0b322f5f58..eeb142534016 100644 --- a/drivers/clk/clk-gate.c +++ b/drivers/clk/clk-gate.c @@ -129,7 +129,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name, { struct clk_gate *gate; struct clk *clk; - struct clk_init_data init; + struct clk_init_data init = {}; if (clk_gate_flags & CLK_GATE_HIWORD_MASK) { if (bit_idx > 15) { diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c index 7129c86a79db..21cb9fc0e4c4 100644 --- a/drivers/clk/clk-mux.c +++ b/drivers/clk/clk-mux.c @@ -124,7 +124,7 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name, { struct clk_mux *mux; struct clk *clk; - struct clk_init_data init; + struct clk_init_data init = {}; u8 width = 0; if (clk_mux_flags & CLK_MUX_HIWORD_MASK) { diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c index 328fcfcefd8c..63505a323a08 100644 --- a/drivers/clk/clk-pwm.c +++ b/drivers/clk/clk-pwm.c @@ -56,7 +56,7 @@ static const struct clk_ops clk_pwm_ops = { static int clk_pwm_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; - struct clk_init_data init; + struct clk_init_data init = {}; struct clk_pwm *clk_pwm; struct pwm_device *pwm; const char *clk_name; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 97a604755053..1eb6e32e0d51 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1,6 +1,7 @@ /* * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> + * Copyright (c) 2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -23,6 +24,7 @@ #include <linux/init.h> #include <linux/sched.h> #include <linux/clkdev.h> +#include <linux/regulator/consumer.h> #include "clk.h" @@ -41,6 +43,13 @@ static HLIST_HEAD(clk_root_list); static HLIST_HEAD(clk_orphan_list); static LIST_HEAD(clk_notifier_list); +struct clk_handoff_vdd { + struct list_head list; + struct clk_vdd_class *vdd_class; +}; + +static LIST_HEAD(clk_handoff_vdd_list); + /*** private data structures ***/ struct clk_core { @@ -75,6 +84,9 @@ struct clk_core { struct hlist_node debug_node; #endif struct kref ref; + struct clk_vdd_class *vdd_class; + unsigned long *rate_max; + int num_rate_max; }; #define CREATE_TRACE_POINTS @@ -243,9 +255,12 @@ static int __init clk_ignore_unused_setup(char *__unused) } __setup("clk_ignore_unused", clk_ignore_unused_setup); +static int clk_unvote_vdd_level(struct clk_vdd_class *vdd_class, int level); + static int clk_disable_unused(void) { struct clk_core *core; + struct clk_handoff_vdd *v, *v_temp; if (clk_ignore_unused) { pr_warn("clk: Not disabling unused clocks\n"); @@ -266,6 +281,13 @@ static int clk_disable_unused(void) hlist_for_each_entry(core, &clk_orphan_list, child_node) clk_unprepare_unused_subtree(core); + list_for_each_entry_safe(v, v_temp, &clk_handoff_vdd_list, list) { + clk_unvote_vdd_level(v->vdd_class, + v->vdd_class->num_levels - 1); + list_del(&v->list); + kfree(v); + }; + clk_prepare_unlock(); return 0; @@ -585,6 +607,212 @@ int __clk_mux_determine_rate_closest(struct clk_hw *hw, } EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); +/* + * Find the voltage level required for a given clock rate. + */ +static int clk_find_vdd_level(struct clk_core *clk, unsigned long rate) +{ + int level; + + for (level = 0; level < clk->num_rate_max; level++) + if (rate <= clk->rate_max[level]) + break; + + if (level == clk->num_rate_max) { + pr_err("Rate %lu for %s is greater than highest Fmax\n", rate, + clk->name); + return -EINVAL; + } + + return level; +} + +/* + * Update voltage level given the current votes. + */ +static int clk_update_vdd(struct clk_vdd_class *vdd_class) +{ + int level, rc = 0, i, ignore; + struct regulator **r = vdd_class->regulator; + int *uv = vdd_class->vdd_uv; + int n_reg = vdd_class->num_regulators; + int cur_lvl = vdd_class->cur_level; + int max_lvl = vdd_class->num_levels - 1; + int cur_base = cur_lvl * n_reg; + int new_base; + + /* aggregate votes */ + for (level = max_lvl; level > 0; level--) + if (vdd_class->level_votes[level]) + break; + + if (level == cur_lvl) + return 0; + + max_lvl = max_lvl * n_reg; + new_base = level * n_reg; + + for (i = 0; i < vdd_class->num_regulators; i++) { + pr_debug("Set Voltage level Min %d, Max %d\n", uv[new_base + i], + uv[max_lvl + i]); + rc = regulator_set_voltage(r[i], uv[new_base + i], + uv[max_lvl + i]); + if (rc) + goto set_voltage_fail; + + if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels) + rc = regulator_enable(r[i]); + else if (level == 0) + rc = regulator_disable(r[i]); + if (rc) + goto enable_disable_fail; + } + + if (vdd_class->set_vdd && !vdd_class->num_regulators) + rc = vdd_class->set_vdd(vdd_class, level); + + if (!rc) + vdd_class->cur_level = level; + + return rc; + +enable_disable_fail: + regulator_set_voltage(r[i], uv[cur_base + i], uv[max_lvl + i]); + +set_voltage_fail: + for (i--; i >= 0; i--) { + regulator_set_voltage(r[i], uv[cur_base + i], uv[max_lvl + i]); + if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels) + regulator_disable(r[i]); + else if (level == 0) + ignore = regulator_enable(r[i]); + } + + return rc; +} + +/* + * Vote for a voltage level. + */ +static int clk_vote_vdd_level(struct clk_vdd_class *vdd_class, int level) +{ + int rc = 0; + + if (level >= vdd_class->num_levels) + return -EINVAL; + + mutex_lock(&vdd_class->lock); + + vdd_class->level_votes[level]++; + + rc = clk_update_vdd(vdd_class); + if (rc) + vdd_class->level_votes[level]--; + + mutex_unlock(&vdd_class->lock); + + return rc; +} + +/* + * Remove vote for a voltage level. + */ +static int clk_unvote_vdd_level(struct clk_vdd_class *vdd_class, int level) +{ + int rc = 0; + + if (level >= vdd_class->num_levels) + return -EINVAL; + + mutex_lock(&vdd_class->lock); + + if (WARN(!vdd_class->level_votes[level], + "Reference counts are incorrect for %s level %d\n", + vdd_class->class_name, level)) + goto out; + + vdd_class->level_votes[level]--; + + rc = clk_update_vdd(vdd_class); + if (rc) + vdd_class->level_votes[level]++; + +out: + mutex_unlock(&vdd_class->lock); + return rc; +} + +/* + * Vote for a voltage level corresponding to a clock's rate. + */ +static int clk_vote_rate_vdd(struct clk_core *core, unsigned long rate) +{ + int level; + + if (!core->vdd_class) + return 0; + + level = clk_find_vdd_level(core, rate); + if (level < 0) + return level; + + return clk_vote_vdd_level(core->vdd_class, level); +} + +/* + * Remove vote for a voltage level corresponding to a clock's rate. + */ +static void clk_unvote_rate_vdd(struct clk_core *core, unsigned long rate) +{ + int level; + + if (!core->vdd_class) + return; + + level = clk_find_vdd_level(core, rate); + if (level < 0) + return; + + clk_unvote_vdd_level(core->vdd_class, level); +} + +static bool clk_is_rate_level_valid(struct clk_core *core, unsigned long rate) +{ + int level; + + if (!core->vdd_class) + return true; + + level = clk_find_vdd_level(core, rate); + + return level >= 0; +} + +static int clk_vdd_class_init(struct clk_vdd_class *vdd) +{ + struct clk_handoff_vdd *v; + + list_for_each_entry(v, &clk_handoff_vdd_list, list) { + if (v->vdd_class == vdd) + return 0; + } + + pr_debug("voting for vdd_class %s\n", vdd->class_name); + + if (clk_vote_vdd_level(vdd, vdd->num_levels - 1)) + pr_err("failed to vote for %s\n", vdd->class_name); + + v = kmalloc(sizeof(*v), GFP_KERNEL); + if (!v) + return -ENOMEM; + + v->vdd_class = vdd; + + list_add_tail(&v->list, &clk_handoff_vdd_list); + + return 0; +} + /*** clk api ***/ static void clk_core_unprepare(struct clk_core *core) @@ -608,6 +836,9 @@ static void clk_core_unprepare(struct clk_core *core) core->ops->unprepare(core->hw); trace_clk_unprepare_complete(core); + + clk_unvote_rate_vdd(core, core->rate); + clk_core_unprepare(core->parent); } @@ -649,12 +880,19 @@ static int clk_core_prepare(struct clk_core *core) trace_clk_prepare(core); + ret = clk_vote_rate_vdd(core, core->rate); + if (ret) { + clk_core_unprepare(core->parent); + return ret; + } + if (core->ops->prepare) ret = core->ops->prepare(core->hw); trace_clk_prepare_complete(core); if (ret) { + clk_unvote_rate_vdd(core, core->rate); clk_core_unprepare(core->parent); return ret; } @@ -1401,6 +1639,9 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core, top = clk_calc_new_rates(parent, best_parent_rate); out: + if (!clk_is_rate_level_valid(core, rate)) + return NULL; + clk_calc_subtree(core, new_rate, parent, p_index); return top; @@ -1485,15 +1726,26 @@ static int clk_change_rate(struct clk_core *core) trace_clk_set_rate(core, core->new_rate); + /* Enforce vdd requirements for new frequency. */ + if (core->prepare_count) { + rc = clk_vote_rate_vdd(core, core->new_rate); + if (rc) + goto out; + } + if (!skip_set_rate && core->ops->set_rate) { rc = core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); if (rc) - goto out; + goto err_set_rate; } trace_clk_set_rate_complete(core, core->new_rate); + /* Release vdd requirements for old frequency. */ + if (core->prepare_count) + clk_unvote_rate_vdd(core, old_rate); + core->rate = clk_recalc(core, best_parent_rate); if (core->notifier_count && old_rate != core->rate) @@ -1519,6 +1771,9 @@ static int clk_change_rate(struct clk_core *core) return rc; +err_set_rate: + if (core->prepare_count) + clk_unvote_rate_vdd(core, core->new_rate); out: trace_clk_set_rate_complete(core, core->new_rate); @@ -2597,8 +2852,19 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw) core->num_parents = hw->init->num_parents; core->min_rate = 0; core->max_rate = ULONG_MAX; + core->vdd_class = hw->init->vdd_class; + core->rate_max = hw->init->rate_max; + core->num_rate_max = hw->init->num_rate_max; hw->core = core; + if (core->vdd_class) { + ret = clk_vdd_class_init(core->vdd_class); + if (ret) { + pr_err("Failed to initialize vdd class\n"); + goto fail_parent_names; + } + } + /* allocate local copy in case parent_names is __initdata */ core->parent_names = kcalloc(core->num_parents, sizeof(char *), GFP_KERNEL); diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c index a71d24cb4c06..b0978d3b83e2 100644 --- a/drivers/clk/imx/clk-imx35.c +++ b/drivers/clk/imx/clk-imx35.c @@ -66,7 +66,7 @@ static const char *std_sel[] = {"ppll", "arm"}; static const char *ipg_per_sel[] = {"ahb_per_div", "arm_per_div"}; enum mx35_clks { - ckih, ckil, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg, + ckih, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg, arm_per_div, ahb_per_div, ipg_per, uart_sel, uart_div, esdhc_sel, esdhc1_div, esdhc2_div, esdhc3_div, spdif_sel, spdif_div_pre, spdif_div_post, ssi_sel, ssi1_div_pre, ssi1_div_post, ssi2_div_pre, @@ -79,7 +79,7 @@ enum mx35_clks { rtc_gate, rtic_gate, scc_gate, sdma_gate, spba_gate, spdif_gate, ssi1_gate, ssi2_gate, uart1_gate, uart2_gate, uart3_gate, usbotg_gate, wdog_gate, max_gate, admux_gate, csi_gate, csi_div, csi_sel, iim_gate, - gpu2d_gate, clk_max + gpu2d_gate, ckil, clk_max }; static struct clk *clk[clk_max]; diff --git a/drivers/clk/msm/clock-debug.c b/drivers/clk/msm/clock-debug.c index d0ff821eb203..00a86ba55171 100644 --- a/drivers/clk/msm/clock-debug.c +++ b/drivers/clk/msm/clock-debug.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2007 Google, Inc. - * Copyright (c) 2007-2014, The Linux Foundation. All rights reserved. + * Copyright (c) 2007-2014, 2016, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -355,8 +355,12 @@ static int trace_clocks_show(struct seq_file *m, void *unused) return 1; } list_for_each_entry(c, &clk_list, list) { + int vlevel = 0; + + if (c->num_fmax) + vlevel = find_vdd_level(c, c->rate); trace_clock_state(c->dbg_name, c->prepare_count, c->count, - c->rate); + c->rate, vlevel); total_cnt++; } mutex_unlock(&clk_list_lock); diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c index b1c9a24c4087..5391ef456aae 100644 --- a/drivers/clk/msm/clock-osm.c +++ b/drivers/clk/msm/clock-osm.c @@ -2748,33 +2748,26 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev) } clk_prepare_enable(&sys_apcsaux_clk_gcc.c); - /* Set boot rate */ - rc = clk_set_rate(&pwrcl_clk.c, msmcobalt_v1 ? - MSMCOBALTV1_PWRCL_BOOT_RATE : - MSMCOBALTV2_PWRCL_BOOT_RATE); + rc = clk_set_rate(&osm_clk_src.c, osm_clk_init_rate); if (rc) { - dev_err(&pdev->dev, "Unable to set boot rate on pwr cluster, rc=%d\n", + dev_err(&pdev->dev, "Unable to set init rate on osm_clk, rc=%d\n", rc); - clk_disable_unprepare(&sys_apcsaux_clk_gcc.c); - return rc; + goto exit2; } - rc = clk_set_rate(&perfcl_clk.c, msmcobalt_v1 ? - MSMCOBALTV1_PERFCL_BOOT_RATE : - MSMCOBALTV2_PERFCL_BOOT_RATE); + /* Make sure index zero is selected */ + rc = clk_set_rate(&pwrcl_clk.c, init_rate); if (rc) { - dev_err(&pdev->dev, "Unable to set boot rate on perf cluster, rc=%d\n", + dev_err(&pdev->dev, "Unable to set init rate on pwr cluster, rc=%d\n", rc); - clk_disable_unprepare(&sys_apcsaux_clk_gcc.c); - return rc; + goto exit2; } - rc = clk_set_rate(&osm_clk_src.c, osm_clk_init_rate); + rc = clk_set_rate(&perfcl_clk.c, init_rate); if (rc) { - dev_err(&pdev->dev, "Unable to set init rate on osm_clk, rc=%d\n", + dev_err(&pdev->dev, "Unable to set init rate on perf cluster, rc=%d\n", rc); - clk_disable_unprepare(&sys_apcsaux_clk_gcc.c); - return rc; + goto exit2; } get_online_cpus(); @@ -2785,6 +2778,25 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev) "Failed to enable clock for cpu %d\n", cpu); } + /* Set final boot rate */ + rc = clk_set_rate(&pwrcl_clk.c, msmcobalt_v1 ? + MSMCOBALTV1_PWRCL_BOOT_RATE : + MSMCOBALTV2_PWRCL_BOOT_RATE); + if (rc) { + dev_err(&pdev->dev, "Unable to set boot rate on pwr cluster, rc=%d\n", + rc); + goto exit2; + } + + rc = clk_set_rate(&perfcl_clk.c, msmcobalt_v1 ? + MSMCOBALTV1_PERFCL_BOOT_RATE : + MSMCOBALTV2_PERFCL_BOOT_RATE); + if (rc) { + dev_err(&pdev->dev, "Unable to set boot rate on perf cluster, rc=%d\n", + rc); + goto exit2; + } + pwrcl_clk.version = clk_osm_read_reg(&pwrcl_clk, VERSION_REG); perfcl_clk.version = clk_osm_read_reg(&perfcl_clk, VERSION_REG); @@ -2801,6 +2813,8 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev) return 0; +exit2: + clk_disable_unprepare(&sys_apcsaux_clk_gcc.c); exit: dev_err(&pdev->dev, "OSM driver failed to initialize, rc=%d\n", rc); diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c index d0a0313d6bef..2e7f03d50f4e 100644 --- a/drivers/clk/qcom/gcc-msm8916.c +++ b/drivers/clk/qcom/gcc-msm8916.c @@ -2346,6 +2346,7 @@ static struct clk_branch gcc_crypto_ahb_clk = { "pcnoc_bfdcd_clk_src", }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2381,6 +2382,7 @@ static struct clk_branch gcc_crypto_clk = { "crypto_clk_src", }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c index 90638f325d0b..30e1a2138002 100644 --- a/drivers/cpufreq/cpufreq_interactive.c +++ b/drivers/cpufreq/cpufreq_interactive.c @@ -162,28 +162,6 @@ struct cpufreq_interactive_tunables { bool enable_prediction; }; -/* - * HACK: FIXME: Bring back cpufreq_{get,put}_global_kobject() - * definition removed by upstream commit 8eec1020f0c0 "cpufreq: - * create cpu/cpufreq at boot time" to fix build failures. - */ -static int cpufreq_global_kobject_usage; - -int cpufreq_get_global_kobject(void) -{ - if (!cpufreq_global_kobject_usage++) - return kobject_add(cpufreq_global_kobject, - &cpu_subsys.dev_root->kobj, "%s", "cpufreq"); - - return 0; -} - -void cpufreq_put_global_kobject(void) -{ - if (!--cpufreq_global_kobject_usage) - kobject_del(cpufreq_global_kobject); -} - /* For cases where we have single governor instance for system */ static struct cpufreq_interactive_tunables *common_tunables; static struct cpufreq_interactive_tunables *cached_common_tunables; diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index a045b9a940e8..7f437bc4431b 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -214,7 +214,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, tick_broadcast_exit(); } - if (!cpuidle_state_is_coupled(drv, entered_state)) + if (!cpuidle_state_is_coupled(drv, index)) local_irq_enable(); diff = ktime_to_us(ktime_sub(time_end, time_start)); @@ -433,6 +433,8 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev) list_del(&dev->device_list); per_cpu(cpuidle_devices, dev->cpu) = NULL; module_put(drv->owner); + + dev->registered = 0; } static void __cpuidle_device_init(struct cpuidle_device *dev) diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index 37e504381313..de033cc37a15 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -85,7 +85,9 @@ struct lpm_debug { struct lpm_cluster *lpm_root_node; -static bool lpm_prediction; +#define MAXSAMPLES 5 + +static bool lpm_prediction = true; module_param_named(lpm_prediction, lpm_prediction, bool, S_IRUGO | S_IWUSR | S_IWGRP); diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index f7e0d8d4c3da..8f50a02ff68d 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -248,7 +248,7 @@ static void caam_jr_dequeue(unsigned long devarg) struct device *caam_jr_alloc(void) { struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL; - struct device *dev = NULL; + struct device *dev = ERR_PTR(-ENODEV); int min_tfm_cnt = INT_MAX; int tfm_cnt; diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 52c7395cb8d8..0d0d4529ee36 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c @@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); unsigned int unit; + u32 unit_size; int ret; if (!ctx->u.aes.key_len) @@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, if (!req->info) return -EINVAL; - for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) - if (!(req->nbytes & (unit_size_map[unit].size - 1))) - break; + unit_size = CCP_XTS_AES_UNIT_SIZE__LAST; + if (req->nbytes <= unit_size_map[0].size) { + for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) { + if (!(req->nbytes & (unit_size_map[unit].size - 1))) { + unit_size = unit_size_map[unit].value; + break; + } + } + } - if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) || + if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) || (ctx->u.aes.key_len != AES_KEYSIZE_128)) { /* Use the fallback to process the request for any * unsupported unit sizes or key sizes @@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT; - rctx->cmd.u.xts.unit_size = unit_size_map[unit].value; + rctx->cmd.u.xts.unit_size = unit_size; rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; rctx->cmd.u.xts.iv = &rctx->iv_sg; diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c index 7ddbb1938400..4cf95b90a2df 100644 --- a/drivers/crypto/msm/qce.c +++ b/drivers/crypto/msm/qce.c @@ -1,6 +1,6 @@ /* Qualcomm Crypto Engine driver. * - * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1962,8 +1962,8 @@ int qce_aead_req(void *handle, struct qce_req *q_req) else q_req->cryptlen = areq->cryptlen - authsize; - if ((q_req->cryptlen > ULONG_MAX - ivsize) || - (q_req->cryptlen + ivsize > ULONG_MAX - areq->assoclen)) { + if ((q_req->cryptlen > UINT_MAX - ivsize) || + (q_req->cryptlen + ivsize > UINT_MAX - areq->assoclen)) { pr_err("Integer overflow on total aead req length.\n"); return -EINVAL; } diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile index 9e9e196c6d51..45b5adaafa6f 100644 --- a/drivers/crypto/qat/qat_common/Makefile +++ b/drivers/crypto/qat/qat_common/Makefile @@ -2,6 +2,7 @@ $(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \ $(obj)/qat_rsapubkey-asn1.h $(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \ $(obj)/qat_rsaprivkey-asn1.h +$(obj)/qat_asym_algs.o: $(obj)/qat_rsapubkey-asn1.h $(obj)/qat_rsaprivkey-asn1.h clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h clean-files += qat_rsaprivkey-asn1.c qat_rsapvivkey-asn1.h diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index b9178d0a3093..aa1dbeaa9b49 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -145,8 +145,6 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf); void adf_disable_aer(struct adf_accel_dev *accel_dev); int adf_init_aer(void); void adf_exit_aer(void); -int adf_init_pf_wq(void); -void adf_exit_pf_wq(void); int adf_init_admin_comms(struct adf_accel_dev *accel_dev); void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); int adf_send_admin_init(struct adf_accel_dev *accel_dev); @@ -229,6 +227,8 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, uint32_t vf_mask); void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, uint32_t vf_mask); +int adf_init_pf_wq(void); +void adf_exit_pf_wq(void); #else static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) { @@ -238,5 +238,14 @@ static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) { } + +static inline int adf_init_pf_wq(void) +{ + return 0; +} + +static inline void adf_exit_pf_wq(void) +{ +} #endif #endif diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index a19ee127edca..e72fea737a0d 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c @@ -35,6 +35,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) unsigned int todo; struct sg_mapping_iter mi, mo; unsigned int oi, oo; /* offset for in and out */ + unsigned long flags; if (areq->nbytes == 0) return 0; @@ -49,7 +50,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) return -EINVAL; } - spin_lock_bh(&ss->slock); + spin_lock_irqsave(&ss->slock, flags); for (i = 0; i < op->keylen; i += 4) writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); @@ -117,7 +118,7 @@ release_ss: sg_miter_stop(&mi); sg_miter_stop(&mo); writel(0, ss->base + SS_CTL); - spin_unlock_bh(&ss->slock); + spin_unlock_irqrestore(&ss->slock, flags); return err; } @@ -149,6 +150,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) unsigned int ob = 0; /* offset in buf */ unsigned int obo = 0; /* offset in bufo*/ unsigned int obl = 0; /* length of data in bufo */ + unsigned long flags; if (areq->nbytes == 0) return 0; @@ -181,7 +183,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) if (no_chunk == 1) return sun4i_ss_opti_poll(areq); - spin_lock_bh(&ss->slock); + spin_lock_irqsave(&ss->slock, flags); for (i = 0; i < op->keylen; i += 4) writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); @@ -308,7 +310,7 @@ release_ss: sg_miter_stop(&mi); sg_miter_stop(&mo); writel(0, ss->base + SS_CTL); - spin_unlock_bh(&ss->slock); + spin_unlock_irqrestore(&ss->slock, flags); return err; } diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index a04fea4d0063..9a8a18aafd5c 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -835,6 +835,16 @@ struct talitos_ahash_req_ctx { struct scatterlist *psrc; }; +struct talitos_export_state { + u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; + u8 buf[HASH_MAX_BLOCK_SIZE]; + unsigned int swinit; + unsigned int first; + unsigned int last; + unsigned int to_hash_later; + unsigned int nbuf; +}; + static int aead_setkey(struct crypto_aead *authenc, const u8 *key, unsigned int keylen) { @@ -1954,6 +1964,46 @@ static int ahash_digest(struct ahash_request *areq) return ahash_process_req(areq, areq->nbytes); } +static int ahash_export(struct ahash_request *areq, void *out) +{ + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct talitos_export_state *export = out; + + memcpy(export->hw_context, req_ctx->hw_context, + req_ctx->hw_context_size); + memcpy(export->buf, req_ctx->buf, req_ctx->nbuf); + export->swinit = req_ctx->swinit; + export->first = req_ctx->first; + export->last = req_ctx->last; + export->to_hash_later = req_ctx->to_hash_later; + export->nbuf = req_ctx->nbuf; + + return 0; +} + +static int ahash_import(struct ahash_request *areq, const void *in) +{ + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + const struct talitos_export_state *export = in; + + memset(req_ctx, 0, sizeof(*req_ctx)); + req_ctx->hw_context_size = + (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) + ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 + : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; + memcpy(req_ctx->hw_context, export->hw_context, + req_ctx->hw_context_size); + memcpy(req_ctx->buf, export->buf, export->nbuf); + req_ctx->swinit = export->swinit; + req_ctx->first = export->first; + req_ctx->last = export->last; + req_ctx->to_hash_later = export->to_hash_later; + req_ctx->nbuf = export->nbuf; + + return 0; +} + struct keyhash_result { struct completion completion; int err; @@ -2348,6 +2398,7 @@ static struct talitos_alg_template driver_algs[] = { { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = MD5_DIGEST_SIZE, + .halg.statesize = sizeof(struct talitos_export_state), .halg.base = { .cra_name = "md5", .cra_driver_name = "md5-talitos", @@ -2363,6 +2414,7 @@ static struct talitos_alg_template driver_algs[] = { { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.statesize = sizeof(struct talitos_export_state), .halg.base = { .cra_name = "sha1", .cra_driver_name = "sha1-talitos", @@ -2378,6 +2430,7 @@ static struct talitos_alg_template driver_algs[] = { { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA224_DIGEST_SIZE, + .halg.statesize = sizeof(struct talitos_export_state), .halg.base = { .cra_name = "sha224", .cra_driver_name = "sha224-talitos", @@ -2393,6 +2446,7 @@ static struct talitos_alg_template driver_algs[] = { { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct talitos_export_state), .halg.base = { .cra_name = "sha256", .cra_driver_name = "sha256-talitos", @@ -2408,6 +2462,7 @@ static struct talitos_alg_template driver_algs[] = { { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.statesize = sizeof(struct talitos_export_state), .halg.base = { .cra_name = "sha384", .cra_driver_name = "sha384-talitos", @@ -2423,6 +2478,7 @@ static struct talitos_alg_template driver_algs[] = { { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.statesize = sizeof(struct talitos_export_state), .halg.base = { .cra_name = "sha512", .cra_driver_name = "sha512-talitos", @@ -2438,6 +2494,7 @@ static struct talitos_alg_template driver_algs[] = { { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = MD5_DIGEST_SIZE, + .halg.statesize = sizeof(struct talitos_export_state), .halg.base = { .cra_name = "hmac(md5)", .cra_driver_name = "hmac-md5-talitos", @@ -2453,6 +2510,7 @@ static struct talitos_alg_template driver_algs[] = { { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.statesize = sizeof(struct talitos_export_state), .halg.base = { .cra_name = "hmac(sha1)", .cra_driver_name = "hmac-sha1-talitos", @@ -2468,6 +2526,7 @@ static struct talitos_alg_template driver_algs[] = { { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA224_DIGEST_SIZE, + .halg.statesize = sizeof(struct talitos_export_state), .halg.base = { .cra_name = "hmac(sha224)", .cra_driver_name = "hmac-sha224-talitos", @@ -2483,6 +2542,7 @@ static struct talitos_alg_template driver_algs[] = { { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct talitos_export_state), .halg.base = { .cra_name = "hmac(sha256)", .cra_driver_name = "hmac-sha256-talitos", @@ -2498,6 +2558,7 @@ static struct talitos_alg_template driver_algs[] = { { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.statesize = sizeof(struct talitos_export_state), .halg.base = { .cra_name = "hmac(sha384)", .cra_driver_name = "hmac-sha384-talitos", @@ -2513,6 +2574,7 @@ static struct talitos_alg_template driver_algs[] = { { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.statesize = sizeof(struct talitos_export_state), .halg.base = { .cra_name = "hmac(sha512)", .cra_driver_name = "hmac-sha512-talitos", @@ -2704,6 +2766,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, t_alg->algt.alg.hash.finup = ahash_finup; t_alg->algt.alg.hash.digest = ahash_digest; t_alg->algt.alg.hash.setkey = ahash_setkey; + t_alg->algt.alg.hash.import = ahash_import; + t_alg->algt.alg.hash.export = ahash_export; if (!(priv->features & TALITOS_FTR_HMAC_OK) && !strncmp(alg->cra_name, "hmac", 4)) { diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 66b1c3313e2e..cd4398498495 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -797,7 +797,7 @@ static int hash_process_data(struct hash_device_data *device_data, &device_data->state); memmove(req_ctx->state.buffer, device_data->state.buffer, - HASH_BLOCK_SIZE / sizeof(u32)); + HASH_BLOCK_SIZE); if (ret) { dev_err(device_data->dev, "%s: hash_resume_state() failed!\n", @@ -848,7 +848,7 @@ static int hash_process_data(struct hash_device_data *device_data, memmove(device_data->state.buffer, req_ctx->state.buffer, - HASH_BLOCK_SIZE / sizeof(u32)); + HASH_BLOCK_SIZE); if (ret) { dev_err(device_data->dev, "%s: hash_save_state() failed!\n", __func__); diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 0b8fe2ec5315..f3801b983f42 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -182,7 +182,7 @@ struct crypto_alg p8_aes_cbc_alg = { .cra_name = "cbc(aes)", .cra_driver_name = "p8_aes_cbc", .cra_module = THIS_MODULE, - .cra_priority = 1000, + .cra_priority = 2000, .cra_type = &crypto_blkcipher_type, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, .cra_alignmask = 0, diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index ee1306cd8f59..404a1b69a3ab 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -166,7 +166,7 @@ struct crypto_alg p8_aes_ctr_alg = { .cra_name = "ctr(aes)", .cra_driver_name = "p8_aes_ctr", .cra_module = THIS_MODULE, - .cra_priority = 1000, + .cra_priority = 2000, .cra_type = &crypto_blkcipher_type, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, .cra_alignmask = 0, diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 37649221f81c..ca64b174f8a3 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -218,8 +218,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = { { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc }, }; -#define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19) -#define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14) +#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \ + GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19)) + +#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \ + GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14)) /* Device 16, functions 2-7 */ @@ -1175,14 +1178,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci) pci_read_config_dword(pvt->pci_tad[i], rir_offset[j][k], ®); - tmp_mb = RIR_OFFSET(reg) << 6; + tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6; gb = div_u64_rem(tmp_mb, 1024, &mb); edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", i, j, k, gb, (mb*1000)/1024, ((u64)tmp_mb) << 20L, - (u32)RIR_RNK_TGT(reg), + (u32)RIR_RNK_TGT(pvt->info.type, reg), reg); } } @@ -1512,7 +1515,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, pci_read_config_dword(pvt->pci_tad[ch_add + base_ch], rir_offset[n_rir][idx], ®); - *rank = RIR_RNK_TGT(reg); + *rank = RIR_RNK_TGT(pvt->info.type, reg); edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", n_rir, diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index e0e6b74fef8f..377d935a3380 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -61,15 +61,24 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg, if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) { /* + * If CONFIG_DEBUG_ALIGN_RODATA is not set, produce a + * displacement in the interval [0, MIN_KIMG_ALIGN) that + * is a multiple of the minimal segment alignment (SZ_64K) + */ + u32 mask = (MIN_KIMG_ALIGN - 1) & ~(SZ_64K - 1); + u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ? + (phys_seed >> 32) & mask : TEXT_OFFSET; + + /* * If KASLR is enabled, and we have some randomness available, * locate the kernel at a randomized offset in physical memory. */ - *reserve_size = kernel_memsize + TEXT_OFFSET; + *reserve_size = kernel_memsize + offset; status = efi_random_alloc(sys_table_arg, *reserve_size, MIN_KIMG_ALIGN, reserve_addr, - phys_seed); + (u32)phys_seed); - *image_addr = *reserve_addr + TEXT_OFFSET; + *image_addr = *reserve_addr + offset; } else { /* * Else, try a straight allocation at the preferred offset. diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 33a1f9779b86..4ea71d505bce 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c @@ -551,11 +551,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio) /* disable interrupts and clear status */ for (i = 0; i < kona_gpio->num_bank; i++) { /* Unlock the entire bank first */ - bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE); + bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE); writel(0xffffffff, reg_base + GPIO_INT_MASK(i)); writel(0xffffffff, reg_base + GPIO_INT_STATUS(i)); /* Now re-lock the bank */ - bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE); + bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE); } } diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c index 3a5c7011ad3b..8b830996fe02 100644 --- a/drivers/gpio/gpiolib-legacy.c +++ b/drivers/gpio/gpiolib-legacy.c @@ -28,6 +28,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) if (!desc && gpio_is_valid(gpio)) return -EPROBE_DEFER; + err = gpiod_request(desc, label); + if (err) + return err; + if (flags & GPIOF_OPEN_DRAIN) set_bit(FLAG_OPEN_DRAIN, &desc->flags); @@ -37,10 +41,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) if (flags & GPIOF_ACTIVE_LOW) set_bit(FLAG_ACTIVE_LOW, &desc->flags); - err = gpiod_request(desc, label); - if (err) - return err; - if (flags & GPIOF_DIR_IN) err = gpiod_direction_input(desc); else diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 4e4c3083ae56..06d345b087f8 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -927,14 +927,6 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label) spin_lock_irqsave(&gpio_lock, flags); } done: - if (status < 0) { - /* Clear flags that might have been set by the caller before - * requesting the GPIO. - */ - clear_bit(FLAG_ACTIVE_LOW, &desc->flags); - clear_bit(FLAG_OPEN_DRAIN, &desc->flags); - clear_bit(FLAG_OPEN_SOURCE, &desc->flags); - } spin_unlock_irqrestore(&gpio_lock, flags); return status; } @@ -2062,28 +2054,13 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, } EXPORT_SYMBOL_GPL(gpiod_get_optional); -/** - * gpiod_parse_flags - helper function to parse GPIO lookup flags - * @desc: gpio to be setup - * @lflags: gpio_lookup_flags - returned from of_find_gpio() or - * of_get_gpio_hog() - * - * Set the GPIO descriptor flags based on the given GPIO lookup flags. - */ -static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags) -{ - if (lflags & GPIO_ACTIVE_LOW) - set_bit(FLAG_ACTIVE_LOW, &desc->flags); - if (lflags & GPIO_OPEN_DRAIN) - set_bit(FLAG_OPEN_DRAIN, &desc->flags); - if (lflags & GPIO_OPEN_SOURCE) - set_bit(FLAG_OPEN_SOURCE, &desc->flags); -} /** * gpiod_configure_flags - helper function to configure a given GPIO * @desc: gpio whose value will be assigned * @con_id: function within the GPIO consumer + * @lflags: gpio_lookup_flags - returned from of_find_gpio() or + * of_get_gpio_hog() * @dflags: gpiod_flags - optional GPIO initialization flags * * Return 0 on success, -ENOENT if no GPIO has been assigned to the @@ -2091,10 +2068,17 @@ static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags) * occurred while trying to acquire the GPIO. */ static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id, - enum gpiod_flags dflags) + unsigned long lflags, enum gpiod_flags dflags) { int status; + if (lflags & GPIO_ACTIVE_LOW) + set_bit(FLAG_ACTIVE_LOW, &desc->flags); + if (lflags & GPIO_OPEN_DRAIN) + set_bit(FLAG_OPEN_DRAIN, &desc->flags); + if (lflags & GPIO_OPEN_SOURCE) + set_bit(FLAG_OPEN_SOURCE, &desc->flags); + /* No particular flag request, return here... */ if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) { pr_debug("no flags found for %s\n", con_id); @@ -2161,13 +2145,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, return desc; } - gpiod_parse_flags(desc, lookupflags); - status = gpiod_request(desc, con_id); if (status < 0) return ERR_PTR(status); - status = gpiod_configure_flags(desc, con_id, flags); + status = gpiod_configure_flags(desc, con_id, lookupflags, flags); if (status < 0) { dev_dbg(dev, "setup of GPIO %s failed\n", con_id); gpiod_put(desc); @@ -2223,6 +2205,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, if (IS_ERR(desc)) return desc; + ret = gpiod_request(desc, NULL); + if (ret) + return ERR_PTR(ret); + if (active_low) set_bit(FLAG_ACTIVE_LOW, &desc->flags); @@ -2233,10 +2219,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, set_bit(FLAG_OPEN_SOURCE, &desc->flags); } - ret = gpiod_request(desc, NULL); - if (ret) - return ERR_PTR(ret); - return desc; } EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod); @@ -2289,8 +2271,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name, chip = gpiod_to_chip(desc); hwnum = gpio_chip_hwgpio(desc); - gpiod_parse_flags(desc, lflags); - local_desc = gpiochip_request_own_desc(chip, hwnum, name); if (IS_ERR(local_desc)) { pr_err("requesting hog GPIO %s (chip %s, offset %d) failed\n", @@ -2298,7 +2278,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name, return PTR_ERR(local_desc); } - status = gpiod_configure_flags(desc, name, dflags); + status = gpiod_configure_flags(desc, name, lflags, dflags); if (status < 0) { pr_err("setup of hog GPIO %s (chip %s, offset %d) failed\n", name, chip->label, hwnum); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 119cdc2c43e7..7ef2c13921b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -194,12 +194,12 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) bpc = 8; DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n", connector->name, bpc); - } else if (bpc > 8) { - /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */ - DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n", - connector->name); - bpc = 8; } + } else if (bpc > 8) { + /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */ + DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n", + connector->name); + bpc = 8; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index 7b7f4aba60c0..fe36caf1b7d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -150,7 +150,7 @@ u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { amdgpu_crtc = to_amdgpu_crtc(crtc); if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { - vrefresh = amdgpu_crtc->hw_mode.vrefresh; + vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); break; } } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 946300764609..b57fffc2d4af 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -5463,7 +5463,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev, case 2: for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; - if ((ring->me == me_id) & (ring->pipe == pipe_id)) + if ((ring->me == me_id) && (ring->pipe == pipe_id)) amdgpu_fence_process(ring); } break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 9be007081b72..eb1da83c9902 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -242,13 +242,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, pqm_uninit(&p->pqm); /* Iterate over all process device data structure and check - * if we should reset all wavefronts */ - list_for_each_entry(pdd, &p->per_device_data, per_device_list) + * if we should delete debug managers and reset all wavefronts + */ + list_for_each_entry(pdd, &p->per_device_data, per_device_list) { + if ((pdd->dev->dbgmgr) && + (pdd->dev->dbgmgr->pasid == p->pasid)) + kfd_dbgmgr_destroy(pdd->dev->dbgmgr); + if (pdd->reset_wavefronts) { pr_warn("amdkfd: Resetting all wave fronts\n"); dbgdev_wave_reset_wavefronts(pdd->dev, p); pdd->reset_wavefronts = false; } + } mutex_unlock(&p->mutex); @@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid) idx = srcu_read_lock(&kfd_processes_srcu); + /* + * Look for the process that matches the pasid. If there is no such + * process, we either released it in amdkfd's own notifier, or there + * is a bug. Unfortunately, there is no way to tell... + */ hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes) - if (p->pasid == pasid) - break; + if (p->pasid == pasid) { - srcu_read_unlock(&kfd_processes_srcu, idx); + srcu_read_unlock(&kfd_processes_srcu, idx); - BUG_ON(p->pasid != pasid); + pr_debug("Unbinding process %d from IOMMU\n", pasid); - mutex_lock(&p->mutex); + mutex_lock(&p->mutex); - if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) - kfd_dbgmgr_destroy(dev->dbgmgr); + if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) + kfd_dbgmgr_destroy(dev->dbgmgr); - pqm_uninit(&p->pqm); + pqm_uninit(&p->pqm); - pdd = kfd_get_process_device_data(dev, p); + pdd = kfd_get_process_device_data(dev, p); - if (!pdd) { - mutex_unlock(&p->mutex); - return; - } + if (!pdd) { + mutex_unlock(&p->mutex); + return; + } - if (pdd->reset_wavefronts) { - dbgdev_wave_reset_wavefronts(pdd->dev, p); - pdd->reset_wavefronts = false; - } + if (pdd->reset_wavefronts) { + dbgdev_wave_reset_wavefronts(pdd->dev, p); + pdd->reset_wavefronts = false; + } - /* - * Just mark pdd as unbound, because we still need it to call - * amd_iommu_unbind_pasid() in when the process exits. - * We don't call amd_iommu_unbind_pasid() here - * because the IOMMU called us. - */ - pdd->bound = false; + /* + * Just mark pdd as unbound, because we still need it + * to call amd_iommu_unbind_pasid() in when the + * process exits. + * We don't call amd_iommu_unbind_pasid() here + * because the IOMMU called us. + */ + pdd->bound = false; - mutex_unlock(&p->mutex); + mutex_unlock(&p->mutex); + + return; + } + + srcu_read_unlock(&kfd_processes_srcu, idx); } struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p) diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index d0299aed517e..59d1269626b1 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -335,6 +335,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane, atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, factor_reg); + } else { + atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0); } } diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index aeee083c7f95..6253775b8d9c 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -150,7 +150,7 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) for (i = 0; i < state->num_connector; i++) { struct drm_connector *connector = state->connectors[i]; - if (!connector) + if (!connector || !connector->funcs) continue; /* @@ -367,6 +367,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, drm_property_unreference_blob(state->mode_blob); state->mode_blob = NULL; + memset(&state->mode, 0, sizeof(state->mode)); + if (blob) { if (blob->length != sizeof(struct drm_mode_modeinfo) || drm_mode_convert_umode(&state->mode, @@ -379,7 +381,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", state->mode.name, state); } else { - memset(&state->mode, 0, sizeof(state->mode)); state->enable = false; DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", state); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 24c5434abd1c..dc84003f694e 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2682,8 +2682,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, goto out; } - drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); - /* * Check whether the primary plane supports the fb pixel format. * Drivers not implementing the universal planes API use a @@ -3316,6 +3314,24 @@ int drm_mode_addfb2(struct drm_device *dev, return 0; } +struct drm_mode_rmfb_work { + struct work_struct work; + struct list_head fbs; +}; + +static void drm_mode_rmfb_work_fn(struct work_struct *w) +{ + struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work); + + while (!list_empty(&arg->fbs)) { + struct drm_framebuffer *fb = + list_first_entry(&arg->fbs, typeof(*fb), filp_head); + + list_del_init(&fb->filp_head); + drm_framebuffer_remove(fb); + } +} + /** * drm_mode_rmfb - remove an FB from the configuration * @dev: drm device for the ioctl @@ -3356,7 +3372,25 @@ int drm_mode_rmfb(struct drm_device *dev, mutex_unlock(&dev->mode_config.fb_lock); mutex_unlock(&file_priv->fbs_lock); - drm_framebuffer_unreference(fb); + /* + * we now own the reference that was stored in the fbs list + * + * drm_framebuffer_remove may fail with -EINTR on pending signals, + * so run this in a separate stack as there's no way to correctly + * handle this after the fb is already removed from the lookup table. + */ + if (atomic_read(&fb->refcount.refcount) > 1) { + struct drm_mode_rmfb_work arg; + + INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn); + INIT_LIST_HEAD(&arg.fbs); + list_add_tail(&fb->filp_head, &arg.fbs); + + schedule_work(&arg.work); + flush_work(&arg.work); + destroy_work_on_stack(&arg.work); + } else + drm_framebuffer_unreference(fb); return 0; @@ -3509,7 +3543,6 @@ out_err1: return ret; } - /** * drm_fb_release - remove and free the FBs on this file * @priv: drm file for the ioctl @@ -3524,6 +3557,9 @@ out_err1: void drm_fb_release(struct drm_file *priv) { struct drm_framebuffer *fb, *tfb; + struct drm_mode_rmfb_work arg; + + INIT_LIST_HEAD(&arg.fbs); /* * When the file gets released that means no one else can access the fb @@ -3536,10 +3572,22 @@ void drm_fb_release(struct drm_file *priv) * at it any more. */ list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { - list_del_init(&fb->filp_head); + if (atomic_read(&fb->refcount.refcount) > 1) { + list_move_tail(&fb->filp_head, &arg.fbs); + } else { + list_del_init(&fb->filp_head); - /* This drops the fpriv->fbs reference. */ - drm_framebuffer_unreference(fb); + /* This drops the fpriv->fbs reference. */ + drm_framebuffer_unreference(fb); + } + } + + if (!list_empty(&arg.fbs)) { + INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn); + + schedule_work(&arg.work); + flush_work(&arg.work); + destroy_work_on_stack(&arg.work); } } diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index d268bf18a662..2485fb652716 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -2874,11 +2874,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) drm_dp_port_teardown_pdt(port, port->pdt); if (!port->input && port->vcpi.vcpi > 0) { - if (mgr->mst_state) { - drm_dp_mst_reset_vcpi_slots(mgr, port); - drm_dp_update_payload_part1(mgr); - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); - } + drm_dp_mst_reset_vcpi_slots(mgr, port); + drm_dp_update_payload_part1(mgr); + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); } kref_put(&port->kref, drm_dp_free_mst_port); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 69cbab5e5c81..5ad036741b99 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1899,7 +1899,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, int n, int width, int height) { int c, o; - struct drm_device *dev = fb_helper->dev; struct drm_connector *connector; const struct drm_connector_helper_funcs *connector_funcs; struct drm_encoder *encoder; @@ -1918,7 +1917,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, if (modes[n] == NULL) return best_score; - crtcs = kzalloc(dev->mode_config.num_connector * + crtcs = kzalloc(fb_helper->connector_count * sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); if (!crtcs) return best_score; @@ -1964,7 +1963,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, if (score > best_score) { best_score = score; memcpy(best_crtcs, crtcs, - dev->mode_config.num_connector * + fb_helper->connector_count * sizeof(struct drm_fb_helper_crtc *)); } } diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index cd74a0953f42..39e30abddf08 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1487,6 +1487,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out, if (out->status != MODE_OK) goto out; + drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V); + ret = 0; out: diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c index 6b43ae3ffd73..1616af209bfc 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c @@ -72,7 +72,7 @@ static const char *const dsi_errors[] = { "RX Prot Violation", "HS Generic Write FIFO Full", "LP Generic Write FIFO Full", - "Generic Read Data Avail" + "Generic Read Data Avail", "Special Packet Sent", "Tearing Effect", }; diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index f7df54a8ee2b..c0a96f1ee18e 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -39,7 +39,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) if (!mutex_is_locked(mutex)) return false; -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER) return mutex->owner == task; #else /* Since UP may be pre-empted, we cannot assume that we own the lock */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7e461dca564c..9ed9f6dde86f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7357,6 +7357,8 @@ enum skl_disp_power_wells { #define TRANS_CLK_SEL_DISABLED (0x0<<29) #define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29) +#define CDCLK_FREQ 0x46200 + #define TRANSA_MSA_MISC 0x60410 #define TRANSB_MSA_MISC 0x61410 #define TRANSC_MSA_MISC 0x62410 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index afa81691163d..c41bc42b6fa7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8228,12 +8228,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *encoder; + int i; u32 val, final; bool has_lvds = false; bool has_cpu_edp = false; bool has_panel = false; bool has_ck505 = false; bool can_ssc = false; + bool using_ssc_source = false; /* We need to take the global config into account */ for_each_intel_encoder(dev, encoder) { @@ -8260,8 +8262,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) can_ssc = true; } - DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", - has_panel, has_lvds, has_ck505); + /* Check if any DPLLs are using the SSC source */ + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + u32 temp = I915_READ(PCH_DPLL(i)); + + if (!(temp & DPLL_VCO_ENABLE)) + continue; + + if ((temp & PLL_REF_INPUT_MASK) == + PLLB_REF_INPUT_SPREADSPECTRUMIN) { + using_ssc_source = true; + break; + } + } + + DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", + has_panel, has_lvds, has_ck505, using_ssc_source); /* Ironlake: try to setup display ref clock before DPLL * enabling. This is only under driver's control after @@ -8298,9 +8314,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; } else final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - } else { - final |= DREF_SSC_SOURCE_DISABLE; - final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + } else if (using_ssc_source) { + final |= DREF_SSC_SOURCE_ENABLE; + final |= DREF_SSC1_ENABLE; } if (final == val) @@ -8346,7 +8362,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) POSTING_READ(PCH_DREF_CONTROL); udelay(200); } else { - DRM_DEBUG_KMS("Disabling SSC entirely\n"); + DRM_DEBUG_KMS("Disabling CPU source output\n"); val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; @@ -8357,16 +8373,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) POSTING_READ(PCH_DREF_CONTROL); udelay(200); - /* Turn off the SSC source */ - val &= ~DREF_SSC_SOURCE_MASK; - val |= DREF_SSC_SOURCE_DISABLE; + if (!using_ssc_source) { + DRM_DEBUG_KMS("Disabling SSC source\n"); - /* Turn off SSC1 */ - val &= ~DREF_SSC1_ENABLE; + /* Turn off the SSC source */ + val &= ~DREF_SSC_SOURCE_MASK; + val |= DREF_SSC_SOURCE_DISABLE; - I915_WRITE(PCH_DREF_CONTROL, val); - POSTING_READ(PCH_DREF_CONTROL); - udelay(200); + /* Turn off SSC1 */ + val &= ~DREF_SSC1_ENABLE; + + I915_WRITE(PCH_DREF_CONTROL, val); + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); + } } BUG_ON(val != final); @@ -9669,6 +9689,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data); mutex_unlock(&dev_priv->rps.hw_lock); + I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); + intel_update_cdclk(dev); WARN(cdclk != dev_priv->cdclk_freq, diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index e55a82a99e7f..8e1d6d74c203 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -3628,8 +3628,7 @@ static bool intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP, uint8_t dp_train_pat) { - if (!intel_dp->train_set_valid) - memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); + memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); intel_dp_set_signal_levels(intel_dp, DP); return intel_dp_set_link_train(intel_dp, DP, dp_train_pat); } @@ -3746,22 +3745,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) break; } - /* - * if we used previously trained voltage and pre-emphasis values - * and we don't get clock recovery, reset link training values - */ - if (intel_dp->train_set_valid) { - DRM_DEBUG_KMS("clock recovery not ok, reset"); - /* clear the flag as we are not reusing train set */ - intel_dp->train_set_valid = false; - if (!intel_dp_reset_link_train(intel_dp, &DP, - DP_TRAINING_PATTERN_1 | - DP_LINK_SCRAMBLING_DISABLE)) { - DRM_ERROR("failed to enable link training\n"); - return; - } - continue; - } /* Check to see if we've tried the max voltage */ for (i = 0; i < intel_dp->lane_count; i++) @@ -3854,7 +3837,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) /* Make sure clock is still ok */ if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { - intel_dp->train_set_valid = false; intel_dp_link_training_clock_recovery(intel_dp); intel_dp_set_link_train(intel_dp, &DP, training_pattern | @@ -3871,7 +3853,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) /* Try 5 times, then try clock recovery if that fails */ if (tries > 5) { - intel_dp->train_set_valid = false; intel_dp_link_training_clock_recovery(intel_dp); intel_dp_set_link_train(intel_dp, &DP, training_pattern | @@ -3893,10 +3874,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) intel_dp->DP = DP; - if (channel_eq) { - intel_dp->train_set_valid = true; + if (channel_eq) DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); - } } void intel_dp_stop_link_train(struct intel_dp *intel_dp) @@ -5079,13 +5058,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) void intel_dp_encoder_reset(struct drm_encoder *encoder) { - struct intel_dp *intel_dp; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (!HAS_DDI(dev_priv)) + intel_dp->DP = I915_READ(intel_dp->output_reg); if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP) return; - intel_dp = enc_to_intel_dp(encoder); - pps_lock(intel_dp); /* @@ -5157,9 +5138,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) intel_display_power_get(dev_priv, power_domain); if (long_hpd) { - /* indicate that we need to restart link training */ - intel_dp->train_set_valid = false; - if (!intel_digital_port_connected(dev_priv, intel_dig_port)) goto mst_fail; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f34a219ec5c4..c5f11e0c5d5b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -783,7 +783,6 @@ struct intel_dp { bool has_aux_irq, int send_bytes, uint32_t aux_clock_divider); - bool train_set_valid; /* Displayport compliance testing */ unsigned long compliance_test_type; diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 4fd5fdfef6bd..c0c094d5b822 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -362,12 +362,12 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, uint64_t conn_configured = 0, mask; int pass = 0; - save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), + save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool), GFP_KERNEL); if (!save_enabled) return false; - memcpy(save_enabled, enabled, dev->mode_config.num_connector); + memcpy(save_enabled, enabled, fb_helper->connector_count); mask = (1 << fb_helper->connector_count) - 1; retry: for (i = 0; i < fb_helper->connector_count; i++) { @@ -501,7 +501,7 @@ retry: if (fallback) { bail: DRM_DEBUG_KMS("Not using firmware configuration\n"); - memcpy(enabled, save_enabled, dev->mode_config.num_connector); + memcpy(enabled, save_enabled, fb_helper->connector_count); kfree(save_enabled); return false; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0a68d2ec89dc..62284e45d531 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3880,6 +3880,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) if (IS_HASWELL(dev) || IS_BROADWELL(dev)) hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); + memset(active, 0, sizeof(*active)); + active->pipe_enabled = intel_crtc->active; if (active->pipe_enabled) { diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 7b990b4e96d2..5378bdc3bbf9 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -26,6 +26,7 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_of.h> +#include <video/imx-ipu-v3.h> #include "imx-drm.h" @@ -504,6 +505,13 @@ static int compare_of(struct device *dev, void *data) { struct device_node *np = data; + /* Special case for DI, dev->of_node may not be set yet */ + if (strcmp(dev->driver->name, "imx-ipuv3-crtc") == 0) { + struct ipu_client_platformdata *pdata = dev->platform_data; + + return pdata->of_node == np; + } + /* Special case for LDB, one device for two channels */ if (of_node_cmp(np->name, "lvds-channel") == 0) { np = of_get_parent(np); diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 4ab841eebee1..9b0abd44b751 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -369,7 +369,7 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs, - ipu_crtc->dev->of_node); + pdata->of_node); if (ret) { dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); goto err_put_resources; diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index c99d3fe12881..e5bb40e58020 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -194,7 +194,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock) } } - fvv = pllreffreq * testn / testm; + fvv = pllreffreq * (n + 1) / (m + 1); fvv = (fvv - 800000) / 50000; if (fvv > 15) @@ -214,6 +214,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock) WREG_DAC(MGA1064_PIX_PLLC_M, m); WREG_DAC(MGA1064_PIX_PLLC_N, n); WREG_DAC(MGA1064_PIX_PLLC_P, p); + + if (mdev->unique_rev_id >= 0x04) { + WREG_DAC(0x1a, 0x09); + msleep(20); + WREG_DAC(0x1a, 0x01); + + } + return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 59f27e774acb..e40a1b07a014 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -557,6 +557,8 @@ nouveau_fbcon_init(struct drm_device *dev) if (ret) goto fini; + if (fbcon->helper.fbdev) + fbcon->helper.fbdev->pixmap.buf_align = 4; return 0; fini: diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 789dc2993b0d..8f715feadf56 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) uint32_t fg; uint32_t bg; uint32_t dsize; - uint32_t width; uint32_t *data = (uint32_t *)image->data; int ret; @@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) if (ret) return ret; - width = ALIGN(image->width, 8); - dsize = ALIGN(width * image->height, 32) >> 5; - if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; @@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) ((image->dx + image->width) & 0xffff)); OUT_RING(chan, bg); OUT_RING(chan, fg); - OUT_RING(chan, (image->height << 16) | width); + OUT_RING(chan, (image->height << 16) | image->width); OUT_RING(chan, (image->height << 16) | image->width); OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); + dsize = ALIGN(image->width * image->height, 32) >> 5; while (dsize) { int iter_len = dsize > 128 ? 128 : dsize; diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index e05499d6ed83..a4e259a00430 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) struct nouveau_fbdev *nfbdev = info->par; struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); struct nouveau_channel *chan = drm->channel; - uint32_t width, dwords, *data = (uint32_t *)image->data; + uint32_t dwords, *data = (uint32_t *)image->data; uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); uint32_t *palette = info->pseudo_palette; int ret; @@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) if (ret) return ret; - width = ALIGN(image->width, 32); - dwords = (width * image->height) >> 5; - BEGIN_NV04(chan, NvSub2D, 0x0814, 2); if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { @@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) OUT_RING(chan, 0); OUT_RING(chan, image->dy); + dwords = ALIGN(image->width * image->height, 32) >> 5; while (dwords) { int push = dwords > 2047 ? 2047 : dwords; diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index c97395b4a312..f28315e865a5 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c @@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) struct nouveau_fbdev *nfbdev = info->par; struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); struct nouveau_channel *chan = drm->channel; - uint32_t width, dwords, *data = (uint32_t *)image->data; + uint32_t dwords, *data = (uint32_t *)image->data; uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); uint32_t *palette = info->pseudo_palette; int ret; @@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) if (ret) return ret; - width = ALIGN(image->width, 32); - dwords = (width * image->height) >> 5; - BEGIN_NVC0(chan, NvSub2D, 0x0814, 2); if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { @@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) OUT_RING (chan, 0); OUT_RING (chan, image->dy); + dwords = ALIGN(image->width * image->height, 32) >> 5; while (dwords) { int push = dwords > 2047 ? 2047 : dwords; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c index b4b41b135643..2aaf0dd19a55 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c @@ -40,8 +40,8 @@ static int gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) { struct nvkm_device *device = outp->base.disp->engine.subdev.device; - const u32 loff = gf119_sor_loff(outp); - nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); + const u32 soff = gf119_sor_soff(outp); + nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 36655a74c538..eeeea1c2ca23 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -874,22 +874,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc) } static const struct nvkm_enum gf100_mp_warp_error[] = { - { 0x00, "NO_ERROR" }, - { 0x01, "STACK_MISMATCH" }, + { 0x01, "STACK_ERROR" }, + { 0x02, "API_STACK_ERROR" }, + { 0x03, "RET_EMPTY_STACK_ERROR" }, + { 0x04, "PC_WRAP" }, { 0x05, "MISALIGNED_PC" }, - { 0x08, "MISALIGNED_GPR" }, - { 0x09, "INVALID_OPCODE" }, - { 0x0d, "GPR_OUT_OF_BOUNDS" }, - { 0x0e, "MEM_OUT_OF_BOUNDS" }, - { 0x0f, "UNALIGNED_MEM_ACCESS" }, + { 0x06, "PC_OVERFLOW" }, + { 0x07, "MISALIGNED_IMMC_ADDR" }, + { 0x08, "MISALIGNED_REG" }, + { 0x09, "ILLEGAL_INSTR_ENCODING" }, + { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" }, + { 0x0b, "ILLEGAL_INSTR_PARAM" }, + { 0x0c, "INVALID_CONST_ADDR" }, + { 0x0d, "OOR_REG" }, + { 0x0e, "OOR_ADDR" }, + { 0x0f, "MISALIGNED_ADDR" }, { 0x10, "INVALID_ADDR_SPACE" }, - { 0x11, "INVALID_PARAM" }, + { 0x11, "ILLEGAL_INSTR_PARAM2" }, + { 0x12, "INVALID_CONST_ADDR_LDC" }, + { 0x13, "GEOMETRY_SM_ERROR" }, + { 0x14, "DIVERGENT" }, + { 0x15, "WARP_EXIT" }, {} }; static const struct nvkm_bitfield gf100_mp_global_error[] = { + { 0x00000001, "SM_TO_SM_FAULT" }, + { 0x00000002, "L1_ERROR" }, { 0x00000004, "MULTIPLE_WARP_ERRORS" }, - { 0x00000008, "OUT_OF_STACK_SPACE" }, + { 0x00000008, "PHYSICAL_STACK_OVERFLOW" }, + { 0x00000010, "BPT_INT" }, + { 0x00000020, "BPT_PAUSE" }, + { 0x00000040, "SINGLE_STEP_COMPLETE" }, + { 0x20000000, "ECC_SEC_ERROR" }, + { 0x40000000, "ECC_DED_ERROR" }, + { 0x80000000, "TIMEOUT" }, {} }; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c566993a2ec3..e2dd5d19c32c 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) /* * GPU helpers function. */ + +/** + * radeon_device_is_virtual - check if we are running is a virtual environment + * + * Check if the asic has been passed through to a VM (all asics). + * Used at driver startup. + * Returns true if virtual or false if not. + */ +static bool radeon_device_is_virtual(void) +{ +#ifdef CONFIG_X86 + return boot_cpu_has(X86_FEATURE_HYPERVISOR); +#else + return false; +#endif +} + /** * radeon_card_posted - check if the hw has already been initialized * @@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev) { uint32_t reg; + /* for pass through, always force asic_init */ + if (radeon_device_is_virtual()) + return false; + /* required for EFI mode on macbook2,1 which uses an r5xx asic */ if (efi_enabled(EFI_BOOT) && (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 745e996d2dbc..4ae8b56b1847 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1004,9 +1004,9 @@ out_unlock: return ret; } -static bool ttm_bo_mem_compat(struct ttm_placement *placement, - struct ttm_mem_reg *mem, - uint32_t *new_flags) +bool ttm_bo_mem_compat(struct ttm_placement *placement, + struct ttm_mem_reg *mem, + uint32_t *new_flags) { int i; @@ -1038,6 +1038,7 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement, return false; } +EXPORT_SYMBOL(ttm_bo_mem_compat); int ttm_bo_validate(struct ttm_buffer_object *bo, struct ttm_placement *placement, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c index 299925a1f6c6..eadc981ee79a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c @@ -49,6 +49,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv, { struct ttm_buffer_object *bo = &buf->base; int ret; + uint32_t new_flags; ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); if (unlikely(ret != 0)) @@ -60,7 +61,12 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv, if (unlikely(ret != 0)) goto err; - ret = ttm_bo_validate(bo, placement, interruptible, false); + if (buf->pin_count > 0) + ret = ttm_bo_mem_compat(placement, &bo->mem, + &new_flags) == true ? 0 : -EINVAL; + else + ret = ttm_bo_validate(bo, placement, interruptible, false); + if (!ret) vmw_bo_pin_reserved(buf, true); @@ -91,6 +97,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, { struct ttm_buffer_object *bo = &buf->base; int ret; + uint32_t new_flags; ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); if (unlikely(ret != 0)) @@ -102,6 +109,12 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, if (unlikely(ret != 0)) goto err; + if (buf->pin_count > 0) { + ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem, + &new_flags) == true ? 0 : -EINVAL; + goto out_unreserve; + } + ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible, false); if (likely(ret == 0) || ret == -ERESTARTSYS) @@ -161,6 +174,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv, struct ttm_placement placement; struct ttm_place place; int ret = 0; + uint32_t new_flags; place = vmw_vram_placement.placement[0]; place.lpfn = bo->num_pages; @@ -185,10 +199,15 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv, */ if (bo->mem.mem_type == TTM_PL_VRAM && bo->mem.start < bo->num_pages && - bo->mem.start > 0) + bo->mem.start > 0 && + buf->pin_count == 0) (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false); - ret = ttm_bo_validate(bo, &placement, interruptible, false); + if (buf->pin_count > 0) + ret = ttm_bo_mem_compat(&placement, &bo->mem, + &new_flags) == true ? 0 : -EINVAL; + else + ret = ttm_bo_validate(bo, &placement, interruptible, false); /* For some reason we didn't end up at the start of vram */ WARN_ON(ret == 0 && bo->offset != 0); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 24fb348a44e1..f3f31f995878 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -227,6 +227,7 @@ static int vmw_force_iommu; static int vmw_restrict_iommu; static int vmw_force_coherent; static int vmw_restrict_dma_mask; +static int vmw_assume_16bpp; static int vmw_probe(struct pci_dev *, const struct pci_device_id *); static void vmw_master_init(struct vmw_master *); @@ -243,6 +244,8 @@ MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); module_param_named(force_coherent, vmw_force_coherent, int, 0600); MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); +MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); +module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); static void vmw_print_capabilities(uint32_t capabilities) @@ -652,6 +655,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->vram_start = pci_resource_start(dev->pdev, 1); dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); + dev_priv->assume_16bpp = !!vmw_assume_16bpp; + dev_priv->enable_fb = enable_fbdev; vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); @@ -698,6 +703,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) vmw_read(dev_priv, SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); + /* + * Workaround for low memory 2D VMs to compensate for the + * allocation taken by fbdev + */ + if (!(dev_priv->capabilities & SVGA_CAP_3D)) + mem_size *= 2; + dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; dev_priv->prim_bb_mem = vmw_read(dev_priv, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 469cdd520615..2e94fe27b3f6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -387,6 +387,7 @@ struct vmw_private { spinlock_t hw_lock; spinlock_t cap_lock; bool has_dx; + bool assume_16bpp; /* * VGA registers. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 5da5de0cb522..4948c1529836 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3273,19 +3273,19 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, true, false, true), - VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok, + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, - &vmw_cmd_ok, true, false, true), - VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok, + &vmw_cmd_dx_cid_check, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check, true, false, true), - VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok, + VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, true, false, true), - VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid, + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, true, false, true), diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 3b1faf7862a5..d2d93959b119 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -517,28 +517,6 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info) par->set_fb = &vfb->base; - if (!par->bo_ptr) { - /* - * Pin before mapping. Since we don't know in what placement - * to pin, call into KMS to do it for us. - */ - ret = vfb->pin(vfb); - if (ret) { - DRM_ERROR("Could not pin the fbdev framebuffer.\n"); - return ret; - } - - ret = ttm_bo_kmap(&par->vmw_bo->base, 0, - par->vmw_bo->base.num_pages, &par->map); - if (ret) { - vfb->unpin(vfb); - DRM_ERROR("Could not map the fbdev framebuffer.\n"); - return ret; - } - - par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite); - } - return 0; } @@ -573,9 +551,9 @@ static int vmw_fb_set_par(struct fb_info *info) mode = old_mode; old_mode = NULL; } else if (!vmw_kms_validate_mode_vram(vmw_priv, - mode->hdisplay * - (var->bits_per_pixel + 7) / 8, - mode->vdisplay)) { + mode->hdisplay * + DIV_ROUND_UP(var->bits_per_pixel, 8), + mode->vdisplay)) { drm_mode_destroy(vmw_priv->dev, mode); return -EINVAL; } @@ -601,6 +579,31 @@ static int vmw_fb_set_par(struct fb_info *info) if (ret) goto out_unlock; + if (!par->bo_ptr) { + struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb); + + /* + * Pin before mapping. Since we don't know in what placement + * to pin, call into KMS to do it for us. + */ + ret = vfb->pin(vfb); + if (ret) { + DRM_ERROR("Could not pin the fbdev framebuffer.\n"); + goto out_unlock; + } + + ret = ttm_bo_kmap(&par->vmw_bo->base, 0, + par->vmw_bo->base.num_pages, &par->map); + if (ret) { + vfb->unpin(vfb); + DRM_ERROR("Could not map the fbdev framebuffer.\n"); + goto out_unlock; + } + + par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite); + } + + vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width, par->set_fb->height); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 7c2e118a77b0..060e5c6f4446 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1538,14 +1538,10 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }; int i; - u32 assumed_bpp = 2; + u32 assumed_bpp = 4; - /* - * If using screen objects, then assume 32-bpp because that's what the - * SVGA device is assuming - */ - if (dev_priv->active_display_unit == vmw_du_screen_object) - assumed_bpp = 4; + if (dev_priv->assume_16bpp) + assumed_bpp = 2; if (dev_priv->active_display_unit == vmw_du_screen_target) { max_width = min(max_width, dev_priv->stdu_max_width); diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 0585fd2031dd..5030cba4a581 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -997,7 +997,7 @@ struct ipu_platform_reg { }; /* These must be in the order of the corresponding device tree port nodes */ -static const struct ipu_platform_reg client_reg[] = { +static struct ipu_platform_reg client_reg[] = { { .pdata = { .csi = 0, @@ -1048,7 +1048,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) mutex_unlock(&ipu_client_id_mutex); for (i = 0; i < ARRAY_SIZE(client_reg); i++) { - const struct ipu_platform_reg *reg = &client_reg[i]; + struct ipu_platform_reg *reg = &client_reg[i]; struct platform_device *pdev; struct device_node *of_node; @@ -1070,6 +1070,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) pdev->dev.parent = dev; + reg->pdata.of_node = of_node; ret = platform_device_add_data(pdev, ®->pdata, sizeof(reg->pdata)); if (!ret) diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 9940f7a7c2b7..6160aa567fbf 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -589,11 +589,21 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device) struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct adreno_irq *irq_params = gpudev->irq; irqreturn_t ret = IRQ_NONE; - unsigned int status = 0, tmp; + unsigned int status = 0, tmp, int_bit; int i; adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status); + /* + * Clear all the interrupt bits but ADRENO_INT_RBBM_AHB_ERROR. Because + * even if we clear it here, it will stay high until it is cleared + * in its respective handler. Otherwise, the interrupt handler will + * fire again. + */ + int_bit = ADRENO_INT_BIT(adreno_dev, ADRENO_INT_RBBM_AHB_ERROR); + adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_CLEAR_CMD, + status & ~int_bit); + /* Loop through all set interrupts and call respective handlers */ for (tmp = status; tmp != 0;) { i = fls(tmp) - 1; @@ -612,9 +622,14 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device) gpudev->irq_trace(adreno_dev, status); - if (status) + /* + * Clear ADRENO_INT_RBBM_AHB_ERROR bit after this interrupt has been + * cleared in its respective handler + */ + if (status & int_bit) adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_CLEAR_CMD, - status); + int_bit); + return ret; } diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 0f3403cb0095..a2af26c81f50 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -198,6 +198,10 @@ struct adreno_gpudev; /* Time to allow preemption to complete (in ms) */ #define ADRENO_PREEMPT_TIMEOUT 10000 +#define ADRENO_INT_BIT(a, _bit) (((a)->gpucore->gpudev->int_bits) ? \ + (adreno_get_int(a, _bit) < 0 ? 0 : \ + BIT(adreno_get_int(a, _bit))) : 0) + /** * enum adreno_preempt_states * ADRENO_PREEMPT_NONE: No preemption is scheduled @@ -574,6 +578,11 @@ enum adreno_regs { ADRENO_REG_REGISTER_MAX, }; +enum adreno_int_bits { + ADRENO_INT_RBBM_AHB_ERROR, + ADRENO_INT_BITS_MAX, +}; + /** * adreno_reg_offsets: Holds array of register offsets * @offsets: Offset array of size defined by enum adreno_regs @@ -589,6 +598,7 @@ struct adreno_reg_offsets { #define ADRENO_REG_UNUSED 0xFFFFFFFF #define ADRENO_REG_SKIP 0xFFFFFFFE #define ADRENO_REG_DEFINE(_offset, _reg) [_offset] = _reg +#define ADRENO_INT_DEFINE(_offset, _val) ADRENO_REG_DEFINE(_offset, _val) /* * struct adreno_vbif_data - Describes vbif register value pair @@ -726,6 +736,7 @@ struct adreno_gpudev { * so define them in the structure and use them as variables. */ const struct adreno_reg_offsets *reg_offsets; + unsigned int *const int_bits; const struct adreno_ft_perf_counters *ft_perf_counters; unsigned int ft_perf_counters_count; @@ -1101,6 +1112,23 @@ static inline unsigned int adreno_getreg(struct adreno_device *adreno_dev, return gpudev->reg_offsets->offsets[offset_name]; } +/* + * adreno_get_int() - Returns the offset value of an interrupt bit from + * the interrupt bit array in the gpudev node + * @adreno_dev: Pointer to the the adreno device + * @bit_name: The interrupt bit enum whose bit is returned + */ +static inline unsigned int adreno_get_int(struct adreno_device *adreno_dev, + enum adreno_int_bits bit_name) +{ + struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); + + if (bit_name >= ADRENO_INT_BITS_MAX) + return -ERANGE; + + return gpudev->int_bits[bit_name]; +} + /** * adreno_gpu_fault() - Return the current state of the GPU * @adreno_dev: A pointer to the adreno_device to query diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c index 97e71464c2df..3f5a9c6318f6 100644 --- a/drivers/gpu/msm/adreno_a3xx.c +++ b/drivers/gpu/msm/adreno_a3xx.c @@ -1425,6 +1425,10 @@ static struct adreno_coresight a3xx_coresight = { .groups = a3xx_coresight_groups, }; +static unsigned int a3xx_int_bits[ADRENO_INT_BITS_MAX] = { + ADRENO_INT_DEFINE(ADRENO_INT_RBBM_AHB_ERROR, A3XX_INT_RBBM_AHB_ERROR), +}; + /* Register offset defines for A3XX */ static unsigned int a3xx_register_offsets[ADRENO_REG_REGISTER_MAX] = { ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_WADDR, A3XX_CP_ME_RAM_WADDR), @@ -1853,6 +1857,7 @@ int a3xx_microcode_load(struct adreno_device *adreno_dev, struct adreno_gpudev adreno_a3xx_gpudev = { .reg_offsets = &a3xx_reg_offsets, + .int_bits = a3xx_int_bits, .ft_perf_counters = a3xx_ft_perf_counters, .ft_perf_counters_count = ARRAY_SIZE(a3xx_ft_perf_counters), .perfcounters = &a3xx_perfcounters, diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c index bfbdb0e7ac1f..5ca04e522270 100644 --- a/drivers/gpu/msm/adreno_a4xx.c +++ b/drivers/gpu/msm/adreno_a4xx.c @@ -739,6 +739,10 @@ static void a4xx_err_callback(struct adreno_device *adreno_dev, int bit) } } +static unsigned int a4xx_int_bits[ADRENO_INT_BITS_MAX] = { + ADRENO_INT_DEFINE(ADRENO_INT_RBBM_AHB_ERROR, A4XX_INT_RBBM_AHB_ERROR), +}; + /* Register offset defines for A4XX, in order of enum adreno_regs */ static unsigned int a4xx_register_offsets[ADRENO_REG_REGISTER_MAX] = { ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_WADDR, A4XX_CP_ME_RAM_WADDR), @@ -1765,6 +1769,7 @@ static struct adreno_snapshot_data a4xx_snapshot_data = { struct adreno_gpudev adreno_a4xx_gpudev = { .reg_offsets = &a4xx_reg_offsets, + .int_bits = a4xx_int_bits, .ft_perf_counters = a4xx_ft_perf_counters, .ft_perf_counters_count = ARRAY_SIZE(a4xx_ft_perf_counters), .perfcounters = &a4xx_perfcounters, diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 2891940b8f5b..860f6d2925f1 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -2872,6 +2872,10 @@ static struct adreno_ft_perf_counters a5xx_ft_perf_counters[] = { {KGSL_PERFCOUNTER_GROUP_TSE, A5XX_TSE_INPUT_PRIM_NUM}, }; +static unsigned int a5xx_int_bits[ADRENO_INT_BITS_MAX] = { + ADRENO_INT_DEFINE(ADRENO_INT_RBBM_AHB_ERROR, A5XX_INT_RBBM_AHB_ERROR), +}; + /* Register offset defines for A5XX, in order of enum adreno_regs */ static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = { ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A5XX_CP_WFI_PEND_CTR), @@ -3504,6 +3508,7 @@ static struct adreno_coresight a5xx_coresight = { struct adreno_gpudev adreno_a5xx_gpudev = { .reg_offsets = &a5xx_reg_offsets, + .int_bits = a5xx_int_bits, .ft_perf_counters = a5xx_ft_perf_counters, .ft_perf_counters_count = ARRAY_SIZE(a5xx_ft_perf_counters), .coresight = &a5xx_coresight, diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index aad8c162a825..0cd4f7216239 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c @@ -261,7 +261,7 @@ static void elo_remove(struct hid_device *hdev) struct elo_priv *priv = hid_get_drvdata(hdev); hid_hw_stop(hdev); - flush_workqueue(wq); + cancel_delayed_work_sync(&priv->work); kfree(priv); } diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 7ecd96bdf834..f62a9d6601cc 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -61,6 +61,7 @@ MODULE_LICENSE("GPL"); #define MT_QUIRK_ALWAYS_VALID (1 << 4) #define MT_QUIRK_VALID_IS_INRANGE (1 << 5) #define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6) +#define MT_QUIRK_CONFIDENCE (1 << 7) #define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8) #define MT_QUIRK_NO_AREA (1 << 9) #define MT_QUIRK_IGNORE_DUPLICATES (1 << 10) @@ -78,6 +79,7 @@ struct mt_slot { __s32 contactid; /* the device ContactID assigned to this slot */ bool touch_state; /* is the touch valid? */ bool inrange_state; /* is the finger in proximity of the sensor? */ + bool confidence_state; /* is the touch made by a finger? */ }; struct mt_class { @@ -450,16 +452,6 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) td->buttons_count++; - /* Only map fields from TouchScreen or TouchPad collections. - * We need to ignore fields that belong to other collections - * such as Mouse that might have the same GenericDesktop usages. */ - if (field->application == HID_DG_TOUCHSCREEN) - set_bit(INPUT_PROP_DIRECT, hi->input->propbit); - else if (field->application == HID_DG_TOUCHPAD) - set_bit(INPUT_PROP_POINTER, hi->input->propbit); - else - return 0; - if (usage->usage_index) prev_usage = &field->usage[usage->usage_index - 1]; @@ -512,6 +504,9 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, mt_store_field(usage, td, hi); return 1; case HID_DG_CONFIDENCE: + if (cls->name == MT_CLS_WIN_8 && + field->application == HID_DG_TOUCHPAD) + cls->quirks |= MT_QUIRK_CONFIDENCE; mt_store_field(usage, td, hi); return 1; case HID_DG_TIPSWITCH: @@ -624,6 +619,7 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input) return; if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) { + int active; int slotnum = mt_compute_slot(td, input); struct mt_slot *s = &td->curdata; struct input_mt *mt = input->mt; @@ -638,10 +634,14 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input) return; } + if (!(td->mtclass.quirks & MT_QUIRK_CONFIDENCE)) + s->confidence_state = 1; + active = (s->touch_state || s->inrange_state) && + s->confidence_state; + input_mt_slot(input, slotnum); - input_mt_report_slot_state(input, MT_TOOL_FINGER, - s->touch_state || s->inrange_state); - if (s->touch_state || s->inrange_state) { + input_mt_report_slot_state(input, MT_TOOL_FINGER, active); + if (active) { /* this finger is in proximity of the sensor */ int wide = (s->w > s->h); /* divided by two to match visual scale of touch */ @@ -706,6 +706,8 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field, td->curdata.touch_state = value; break; case HID_DG_CONFIDENCE: + if (quirks & MT_QUIRK_CONFIDENCE) + td->curdata.confidence_state = value; if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE) td->curvalid = value; break; diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c index 93ddc1c65b4c..3edd4ac36494 100644 --- a/drivers/hid/hid-steelseries.c +++ b/drivers/hid/hid-steelseries.c @@ -253,11 +253,6 @@ static int steelseries_srws1_probe(struct hid_device *hdev, goto err_free; } - if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 16)) { - ret = -ENODEV; - goto err_free; - } - ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 2f1ddca6f2e0..700145b15088 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, goto inval; } else if (uref->usage_index >= field->report_count) goto inval; - - else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) && - (uref_multi->num_values > HID_MAX_MULTI_USAGES || - uref->usage_index + uref_multi->num_values > field->report_count)) - goto inval; } + if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) && + (uref_multi->num_values > HID_MAX_MULTI_USAGES || + uref->usage_index + uref_multi->num_values > field->report_count)) + goto inval; + switch (cmd) { case HIDIOCGUSAGE: uref->value = field->value[uref->usage_index]; diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c index 6c99ee7bafa3..ee396ff167d9 100644 --- a/drivers/hwmon/ads7828.c +++ b/drivers/hwmon/ads7828.c @@ -120,6 +120,7 @@ static int ads7828_probe(struct i2c_client *client, unsigned int vref_mv = ADS7828_INT_VREF_MV; bool diff_input = false; bool ext_vref = false; + unsigned int regval; data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL); if (!data) @@ -154,6 +155,15 @@ static int ads7828_probe(struct i2c_client *client, if (!diff_input) data->cmd_byte |= ADS7828_CMD_SD_SE; + /* + * Datasheet specifies internal reference voltage is disabled by + * default. The internal reference voltage needs to be enabled and + * voltage needs to settle before getting valid ADC data. So perform a + * dummy read to enable the internal reference voltage. + */ + if (!ext_vref) + regmap_read(data->regmap, data->cmd_byte, ®val); + hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, data, ads7828_groups); diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index c43318d3416e..a9356a3dea92 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -66,11 +66,13 @@ static DEFINE_MUTEX(i8k_mutex); static char bios_version[4]; +static char bios_machineid[16]; static struct device *i8k_hwmon_dev; static u32 i8k_hwmon_flags; static uint i8k_fan_mult = I8K_FAN_MULT; static uint i8k_pwm_mult; static uint i8k_fan_max = I8K_FAN_HIGH; +static bool disallow_fan_type_call; #define I8K_HWMON_HAVE_TEMP1 (1 << 0) #define I8K_HWMON_HAVE_TEMP2 (1 << 1) @@ -94,13 +96,13 @@ module_param(ignore_dmi, bool, 0); MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match"); #if IS_ENABLED(CONFIG_I8K) -static bool restricted; +static bool restricted = true; module_param(restricted, bool, 0); -MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set"); +MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)"); static bool power_status; module_param(power_status, bool, 0600); -MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k"); +MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)"); #endif static uint fan_mult; @@ -235,14 +237,28 @@ static int i8k_get_fan_speed(int fan) /* * Read the fan type. */ -static int i8k_get_fan_type(int fan) +static int _i8k_get_fan_type(int fan) { struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, }; + if (disallow_fan_type_call) + return -EINVAL; + regs.ebx = fan & 0xff; return i8k_smm(®s) ? : regs.eax & 0xff; } +static int i8k_get_fan_type(int fan) +{ + /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */ + static int types[2] = { INT_MIN, INT_MIN }; + + if (types[fan] == INT_MIN) + types[fan] = _i8k_get_fan_type(fan); + + return types[fan]; +} + /* * Read the fan nominal rpm for specific fan speed. */ @@ -392,9 +408,11 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg) break; case I8K_MACHINE_ID: - memset(buff, 0, 16); - strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), - sizeof(buff)); + if (restricted && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + memset(buff, 0, sizeof(buff)); + strlcpy(buff, bios_machineid, sizeof(buff)); break; case I8K_FN_STATUS: @@ -511,7 +529,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset) seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n", I8K_PROC_FMT, bios_version, - i8k_get_dmi_data(DMI_PRODUCT_SERIAL), + (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid, cpu_temp, left_fan, right_fan, left_speed, right_speed, ac_power, fn_key); @@ -718,6 +736,9 @@ static struct attribute *i8k_attrs[] = { static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, int index) { + if (disallow_fan_type_call && + (index == 9 || index == 12)) + return 0; if (index >= 0 && index <= 1 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1)) return 0; @@ -767,13 +788,17 @@ static int __init i8k_init_hwmon(void) if (err >= 0) i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4; - /* First fan attributes, if fan type is OK */ - err = i8k_get_fan_type(0); + /* First fan attributes, if fan status or type is OK */ + err = i8k_get_fan_status(0); + if (err < 0) + err = i8k_get_fan_type(0); if (err >= 0) i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1; - /* Second fan attributes, if fan type is OK */ - err = i8k_get_fan_type(1); + /* Second fan attributes, if fan status or type is OK */ + err = i8k_get_fan_status(1); + if (err < 0) + err = i8k_get_fan_type(1); if (err >= 0) i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2; @@ -929,12 +954,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = { MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); -static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { +/* + * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed + * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist + * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call. + * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121 + */ +static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = { { - /* - * CPU fan speed going up and down on Dell Studio XPS 8000 - * for unknown reasons. - */ .ident = "Dell Studio XPS 8000", .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), @@ -942,16 +969,19 @@ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { }, }, { - /* - * CPU fan speed going up and down on Dell Studio XPS 8100 - * for unknown reasons. - */ .ident = "Dell Studio XPS 8100", .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"), }, }, + { + .ident = "Dell Inspiron 580", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "), + }, + }, { } }; @@ -966,8 +996,7 @@ static int __init i8k_probe(void) /* * Get DMI information */ - if (!dmi_check_system(i8k_dmi_table) || - dmi_check_system(i8k_blacklist_dmi_table)) { + if (!dmi_check_system(i8k_dmi_table)) { if (!ignore_dmi && !force) return -ENODEV; @@ -978,8 +1007,13 @@ static int __init i8k_probe(void) i8k_get_dmi_data(DMI_BIOS_VERSION)); } + if (dmi_check_system(i8k_blacklist_fan_type_dmi_table)) + disallow_fan_type_call = true; + strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION), sizeof(bios_version)); + strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), + sizeof(bios_machineid)); /* * Get SMM Dell signature diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index 766b052ade1d..294444d5f59e 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -792,11 +792,14 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode) drvdata->out_mode == TMC_ETR_OUT_MODE_USB) { drvdata->usbch = usb_qdss_open("qdss", drvdata, usb_notifier); - if (IS_ERR(drvdata->usbch)) { + if (IS_ERR_OR_NULL(drvdata->usbch)) { dev_err(drvdata->dev, "usb_qdss_open failed\n"); ret = PTR_ERR(drvdata->usbch); pm_runtime_put(drvdata->dev); mutex_unlock(&drvdata->mem_lock); + if (!ret) + ret = -ENODEV; + return ret; } } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETB || @@ -1846,12 +1849,13 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) struct device_node *np = adev->dev.of_node; struct coresight_cti_data *ctidata; - if (np) { - pdata = of_get_coresight_platform_data(dev, np); - if (IS_ERR(pdata)) - return PTR_ERR(pdata); - adev->dev.platform_data = pdata; - } + if (!np) + return -ENODEV; + + pdata = of_get_coresight_platform_data(dev, np); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + adev->dev.platform_data = pdata; drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig index e7a348807f0c..847a39b35307 100644 --- a/drivers/hwtracing/stm/Kconfig +++ b/drivers/hwtracing/stm/Kconfig @@ -9,6 +9,8 @@ config STM Say Y here to enable System Trace Module device support. +if STM + config STM_DUMMY tristate "Dummy STM driver" help @@ -25,3 +27,16 @@ config STM_SOURCE_CONSOLE If you want to send kernel console messages over STM devices, say Y. + +config STM_SOURCE_HEARTBEAT + tristate "Heartbeat over STM devices" + help + This is a kernel space trace source that sends periodic + heartbeat messages to trace hosts over STM devices. It is + also useful for testing stm class drivers and the stm class + framework itself. + + If you want to send heartbeat messages over STM devices, + say Y. + +endif diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile index f9312c38dd7a..a9ce3d487e57 100644 --- a/drivers/hwtracing/stm/Makefile +++ b/drivers/hwtracing/stm/Makefile @@ -5,5 +5,7 @@ stm_core-y := core.o policy.o obj-$(CONFIG_STM_DUMMY) += dummy_stm.o obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o +obj-$(CONFIG_STM_SOURCE_HEARTBEAT) += stm_heartbeat.o stm_console-y := console.o +stm_heartbeat-y := heartbeat.o diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index b6445d9e5453..02095410cb33 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -67,9 +67,24 @@ static ssize_t channels_show(struct device *dev, static DEVICE_ATTR_RO(channels); +static ssize_t hw_override_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct stm_device *stm = to_stm_device(dev); + int ret; + + ret = sprintf(buf, "%u\n", stm->data->hw_override); + + return ret; +} + +static DEVICE_ATTR_RO(hw_override); + static struct attribute *stm_attrs[] = { &dev_attr_masters.attr, &dev_attr_channels.attr, + &dev_attr_hw_override.attr, NULL, }; @@ -113,6 +128,7 @@ struct stm_device *stm_find_device(const char *buf) stm = to_stm_device(dev); if (!try_module_get(stm->owner)) { + /* matches class_find_device() above */ put_device(dev); return NULL; } @@ -125,7 +141,7 @@ struct stm_device *stm_find_device(const char *buf) * @stm: stm device, previously acquired by stm_find_device() * * This drops the module reference and device reference taken by - * stm_find_device(). + * stm_find_device() or stm_char_open(). */ void stm_put_device(struct stm_device *stm) { @@ -185,6 +201,9 @@ static void stm_output_claim(struct stm_device *stm, struct stm_output *output) { struct stp_master *master = stm_master(stm, output->master); + lockdep_assert_held(&stm->mc_lock); + lockdep_assert_held(&output->lock); + if (WARN_ON_ONCE(master->nr_free < output->nr_chans)) return; @@ -199,6 +218,9 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output) { struct stp_master *master = stm_master(stm, output->master); + lockdep_assert_held(&stm->mc_lock); + lockdep_assert_held(&output->lock); + bitmap_release_region(&master->chan_map[0], output->channel, ilog2(output->nr_chans)); @@ -288,6 +310,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width, } spin_lock(&stm->mc_lock); + spin_lock(&output->lock); /* output is already assigned -- shouldn't happen */ if (WARN_ON_ONCE(output->nr_chans)) goto unlock; @@ -304,6 +327,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width, ret = 0; unlock: + spin_unlock(&output->lock); spin_unlock(&stm->mc_lock); return ret; @@ -312,11 +336,18 @@ unlock: static void stm_output_free(struct stm_device *stm, struct stm_output *output) { spin_lock(&stm->mc_lock); + spin_lock(&output->lock); if (output->nr_chans) stm_output_disclaim(stm, output); + spin_unlock(&output->lock); spin_unlock(&stm->mc_lock); } +static void stm_output_init(struct stm_output *output) +{ + spin_lock_init(&output->lock); +} + static int major_match(struct device *dev, const void *data) { unsigned int major = *(unsigned int *)data; @@ -339,6 +370,7 @@ static int stm_char_open(struct inode *inode, struct file *file) if (!stmf) return -ENOMEM; + stm_output_init(&stmf->output); stmf->stm = to_stm_device(dev); if (!try_module_get(stmf->stm->owner)) @@ -349,6 +381,8 @@ static int stm_char_open(struct inode *inode, struct file *file) return nonseekable_open(inode, file); err_free: + /* matches class_find_device() above */ + put_device(dev); kfree(stmf); return err; @@ -357,9 +391,19 @@ err_free: static int stm_char_release(struct inode *inode, struct file *file) { struct stm_file *stmf = file->private_data; + struct stm_device *stm = stmf->stm; + + if (stm->data->unlink) + stm->data->unlink(stm->data, stmf->output.master, + stmf->output.channel); + + stm_output_free(stm, &stmf->output); - stm_output_free(stmf->stm, &stmf->output); - stm_put_device(stmf->stm); + /* + * matches the stm_char_open()'s + * class_find_device() + try_module_get() + */ + stm_put_device(stm); kfree(stmf); return 0; @@ -380,8 +424,8 @@ static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width) return ret; } -static void stm_write(struct stm_data *data, unsigned int master, - unsigned int channel, const char *buf, size_t count) +static ssize_t stm_write(struct stm_data *data, unsigned int master, + unsigned int channel, const char *buf, size_t count) { unsigned int flags = STP_PACKET_TIMESTAMPED; const unsigned char *p = buf, nil = 0; @@ -393,9 +437,14 @@ static void stm_write(struct stm_data *data, unsigned int master, sz = data->packet(data, master, channel, STP_PACKET_DATA, flags, sz, p); flags = 0; + + if (sz < 0) + break; } data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil); + + return pos; } static ssize_t stm_char_write(struct file *file, const char __user *buf, @@ -406,6 +455,9 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf, char *kbuf; int err; + if (count + 1 > PAGE_SIZE) + count = PAGE_SIZE - 1; + /* * if no m/c have been assigned to this writer up to this * point, use "default" policy entry @@ -430,8 +482,8 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf, return -EFAULT; } - stm_write(stm->data, stmf->output.master, stmf->output.channel, kbuf, - count); + count = stm_write(stm->data, stmf->output.master, stmf->output.channel, + kbuf, count); kfree(kbuf); @@ -509,16 +561,12 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg) if (ret) goto err_free; - ret = 0; - if (stm->data->link) ret = stm->data->link(stm->data, stmf->output.master, stmf->output.channel); - if (ret) { + if (ret) stm_output_free(stmf->stm, &stmf->output); - stm_put_device(stmf->stm); - } err_free: kfree(id); @@ -633,17 +681,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, stm->dev.parent = parent; stm->dev.release = stm_device_release; - err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name); - if (err) - goto err_device; - - err = device_add(&stm->dev); - if (err) - goto err_device; - + mutex_init(&stm->link_mutex); spin_lock_init(&stm->link_lock); INIT_LIST_HEAD(&stm->link_list); + /* initialize the object before it is accessible via sysfs */ spin_lock_init(&stm->mc_lock); mutex_init(&stm->policy_mutex); stm->sw_nmasters = nmasters; @@ -651,9 +693,20 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, stm->data = stm_data; stm_data->stm = stm; + err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name); + if (err) + goto err_device; + + err = device_add(&stm->dev); + if (err) + goto err_device; + return 0; err_device: + unregister_chrdev(stm->major, stm_data->name); + + /* matches device_initialize() above */ put_device(&stm->dev); err_free: kfree(stm); @@ -662,20 +715,28 @@ err_free: } EXPORT_SYMBOL_GPL(stm_register_device); -static void __stm_source_link_drop(struct stm_source_device *src, - struct stm_device *stm); +static int __stm_source_link_drop(struct stm_source_device *src, + struct stm_device *stm); void stm_unregister_device(struct stm_data *stm_data) { struct stm_device *stm = stm_data->stm; struct stm_source_device *src, *iter; - int i; + int i, ret; - spin_lock(&stm->link_lock); + mutex_lock(&stm->link_mutex); list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) { - __stm_source_link_drop(src, stm); + ret = __stm_source_link_drop(src, stm); + /* + * src <-> stm link must not change under the same + * stm::link_mutex, so complain loudly if it has; + * also in this situation ret!=0 means this src is + * not connected to this stm and it should be otherwise + * safe to proceed with the tear-down of stm. + */ + WARN_ON_ONCE(ret); } - spin_unlock(&stm->link_lock); + mutex_unlock(&stm->link_mutex); synchronize_srcu(&stm_source_srcu); @@ -694,6 +755,17 @@ void stm_unregister_device(struct stm_data *stm_data) } EXPORT_SYMBOL_GPL(stm_unregister_device); +/* + * stm::link_list access serialization uses a spinlock and a mutex; holding + * either of them guarantees that the list is stable; modification requires + * holding both of them. + * + * Lock ordering is as follows: + * stm::link_mutex + * stm::link_lock + * src::link_lock + */ + /** * stm_source_link_add() - connect an stm_source device to an stm device * @src: stm_source device @@ -710,6 +782,7 @@ static int stm_source_link_add(struct stm_source_device *src, char *id; int err; + mutex_lock(&stm->link_mutex); spin_lock(&stm->link_lock); spin_lock(&src->link_lock); @@ -719,6 +792,7 @@ static int stm_source_link_add(struct stm_source_device *src, spin_unlock(&src->link_lock); spin_unlock(&stm->link_lock); + mutex_unlock(&stm->link_mutex); id = kstrdup(src->data->name, GFP_KERNEL); if (id) { @@ -753,9 +827,9 @@ static int stm_source_link_add(struct stm_source_device *src, fail_free_output: stm_output_free(stm, &src->output); - stm_put_device(stm); fail_detach: + mutex_lock(&stm->link_mutex); spin_lock(&stm->link_lock); spin_lock(&src->link_lock); @@ -764,6 +838,7 @@ fail_detach: spin_unlock(&src->link_lock); spin_unlock(&stm->link_lock); + mutex_unlock(&stm->link_mutex); return err; } @@ -776,28 +851,55 @@ fail_detach: * If @stm is @src::link, disconnect them from one another and put the * reference on the @stm device. * - * Caller must hold stm::link_lock. + * Caller must hold stm::link_mutex. */ -static void __stm_source_link_drop(struct stm_source_device *src, - struct stm_device *stm) +static int __stm_source_link_drop(struct stm_source_device *src, + struct stm_device *stm) { struct stm_device *link; + int ret = 0; + + lockdep_assert_held(&stm->link_mutex); + /* for stm::link_list modification, we hold both mutex and spinlock */ + spin_lock(&stm->link_lock); spin_lock(&src->link_lock); link = srcu_dereference_check(src->link, &stm_source_srcu, 1); - if (WARN_ON_ONCE(link != stm)) { - spin_unlock(&src->link_lock); - return; + + /* + * The linked device may have changed since we last looked, because + * we weren't holding the src::link_lock back then; if this is the + * case, tell the caller to retry. + */ + if (link != stm) { + ret = -EAGAIN; + goto unlock; } stm_output_free(link, &src->output); - /* caller must hold stm::link_lock */ list_del_init(&src->link_entry); /* matches stm_find_device() from stm_source_link_store() */ stm_put_device(link); rcu_assign_pointer(src->link, NULL); +unlock: spin_unlock(&src->link_lock); + spin_unlock(&stm->link_lock); + + /* + * Call the unlink callbacks for both source and stm, when we know + * that we have actually performed the unlinking. + */ + if (!ret) { + if (src->data->unlink) + src->data->unlink(src->data); + + if (stm->data->unlink) + stm->data->unlink(stm->data, src->output.master, + src->output.channel); + } + + return ret; } /** @@ -813,21 +915,29 @@ static void __stm_source_link_drop(struct stm_source_device *src, static void stm_source_link_drop(struct stm_source_device *src) { struct stm_device *stm; - int idx; + int idx, ret; +retry: idx = srcu_read_lock(&stm_source_srcu); + /* + * The stm device will be valid for the duration of this + * read section, but the link may change before we grab + * the src::link_lock in __stm_source_link_drop(). + */ stm = srcu_dereference(src->link, &stm_source_srcu); + ret = 0; if (stm) { - if (src->data->unlink) - src->data->unlink(src->data); - - spin_lock(&stm->link_lock); - __stm_source_link_drop(src, stm); - spin_unlock(&stm->link_lock); + mutex_lock(&stm->link_mutex); + ret = __stm_source_link_drop(src, stm); + mutex_unlock(&stm->link_mutex); } srcu_read_unlock(&stm_source_srcu, idx); + + /* if it did change, retry */ + if (ret == -EAGAIN) + goto retry; } static ssize_t stm_source_link_show(struct device *dev, @@ -862,8 +972,10 @@ static ssize_t stm_source_link_store(struct device *dev, return -EINVAL; err = stm_source_link_add(src, link); - if (err) + if (err) { + /* matches the stm_find_device() above */ stm_put_device(link); + } return err ? : count; } @@ -925,6 +1037,7 @@ int stm_source_register_device(struct device *parent, if (err) goto err; + stm_output_init(&src->output); spin_lock_init(&src->link_lock); INIT_LIST_HEAD(&src->link_entry); src->data = data; @@ -973,9 +1086,9 @@ int stm_source_write(struct stm_source_data *data, unsigned int chan, stm = srcu_dereference(src->link, &stm_source_srcu); if (stm) - stm_write(stm->data, src->output.master, - src->output.channel + chan, - buf, count); + count = stm_write(stm->data, src->output.master, + src->output.channel + chan, + buf, count); else count = -ENODEV; diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c index 3709bef0b21f..a86612d989f9 100644 --- a/drivers/hwtracing/stm/dummy_stm.c +++ b/drivers/hwtracing/stm/dummy_stm.c @@ -40,22 +40,71 @@ dummy_stm_packet(struct stm_data *stm_data, unsigned int master, return size; } -static struct stm_data dummy_stm = { - .name = "dummy_stm", - .sw_start = 0x0000, - .sw_end = 0xffff, - .sw_nchannels = 0xffff, - .packet = dummy_stm_packet, -}; +#define DUMMY_STM_MAX 32 + +static struct stm_data dummy_stm[DUMMY_STM_MAX]; + +static int nr_dummies = 4; + +module_param(nr_dummies, int, 0400); + +static unsigned int fail_mode; + +module_param(fail_mode, int, 0600); + +static int dummy_stm_link(struct stm_data *data, unsigned int master, + unsigned int channel) +{ + if (fail_mode && (channel & fail_mode)) + return -EINVAL; + + return 0; +} static int dummy_stm_init(void) { - return stm_register_device(NULL, &dummy_stm, THIS_MODULE); + int i, ret = -ENOMEM; + + if (nr_dummies < 0 || nr_dummies > DUMMY_STM_MAX) + return -EINVAL; + + for (i = 0; i < nr_dummies; i++) { + dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i); + if (!dummy_stm[i].name) + goto fail_unregister; + + dummy_stm[i].sw_start = 0x0000; + dummy_stm[i].sw_end = 0xffff; + dummy_stm[i].sw_nchannels = 0xffff; + dummy_stm[i].packet = dummy_stm_packet; + dummy_stm[i].link = dummy_stm_link; + + ret = stm_register_device(NULL, &dummy_stm[i], THIS_MODULE); + if (ret) + goto fail_free; + } + + return 0; + +fail_unregister: + for (i--; i >= 0; i--) { + stm_unregister_device(&dummy_stm[i]); +fail_free: + kfree(dummy_stm[i].name); + } + + return ret; + } static void dummy_stm_exit(void) { - stm_unregister_device(&dummy_stm); + int i; + + for (i = 0; i < nr_dummies; i++) { + stm_unregister_device(&dummy_stm[i]); + kfree(dummy_stm[i].name); + } } module_init(dummy_stm_init); diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c new file mode 100644 index 000000000000..3da7b673aab2 --- /dev/null +++ b/drivers/hwtracing/stm/heartbeat.c @@ -0,0 +1,126 @@ +/* + * Simple heartbeat STM source driver + * Copyright (c) 2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * Heartbeat STM source will send repetitive messages over STM devices to a + * trace host. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/hrtimer.h> +#include <linux/slab.h> +#include <linux/stm.h> + +#define STM_HEARTBEAT_MAX 32 + +static int nr_devs = 4; +static int interval_ms = 10; + +module_param(nr_devs, int, 0400); +module_param(interval_ms, int, 0600); + +static struct stm_heartbeat { + struct stm_source_data data; + struct hrtimer hrtimer; + unsigned int active; +} stm_heartbeat[STM_HEARTBEAT_MAX]; + +static const char str[] = "heartbeat stm source driver is here to serve you"; + +static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr) +{ + struct stm_heartbeat *heartbeat = container_of(hr, struct stm_heartbeat, + hrtimer); + + stm_source_write(&heartbeat->data, 0, str, sizeof str); + if (heartbeat->active) + hrtimer_forward_now(hr, ms_to_ktime(interval_ms)); + + return heartbeat->active ? HRTIMER_RESTART : HRTIMER_NORESTART; +} + +static int stm_heartbeat_link(struct stm_source_data *data) +{ + struct stm_heartbeat *heartbeat = + container_of(data, struct stm_heartbeat, data); + + heartbeat->active = 1; + hrtimer_start(&heartbeat->hrtimer, ms_to_ktime(interval_ms), + HRTIMER_MODE_ABS); + + return 0; +} + +static void stm_heartbeat_unlink(struct stm_source_data *data) +{ + struct stm_heartbeat *heartbeat = + container_of(data, struct stm_heartbeat, data); + + heartbeat->active = 0; + hrtimer_cancel(&heartbeat->hrtimer); +} + +static int stm_heartbeat_init(void) +{ + int i, ret = -ENOMEM; + + if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX) + return -EINVAL; + + for (i = 0; i < nr_devs; i++) { + stm_heartbeat[i].data.name = + kasprintf(GFP_KERNEL, "heartbeat.%d", i); + if (!stm_heartbeat[i].data.name) + goto fail_unregister; + + stm_heartbeat[i].data.nr_chans = 1; + stm_heartbeat[i].data.link = stm_heartbeat_link; + stm_heartbeat[i].data.unlink = stm_heartbeat_unlink; + hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS); + stm_heartbeat[i].hrtimer.function = + stm_heartbeat_hrtimer_handler; + + ret = stm_source_register_device(NULL, &stm_heartbeat[i].data); + if (ret) + goto fail_free; + } + + return 0; + +fail_unregister: + for (i--; i >= 0; i--) { + stm_source_unregister_device(&stm_heartbeat[i].data); +fail_free: + kfree(stm_heartbeat[i].data.name); + } + + return ret; +} + +static void stm_heartbeat_exit(void) +{ + int i; + + for (i = 0; i < nr_devs; i++) { + stm_source_unregister_device(&stm_heartbeat[i].data); + kfree(stm_heartbeat[i].data.name); + } +} + +module_init(stm_heartbeat_init); +module_exit(stm_heartbeat_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("stm_heartbeat driver"); +MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>"); diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c index 11ab6d01adf6..1c061cb9bff0 100644 --- a/drivers/hwtracing/stm/policy.c +++ b/drivers/hwtracing/stm/policy.c @@ -272,13 +272,17 @@ void stp_policy_unbind(struct stp_policy *policy) { struct stm_device *stm = policy->stm; + /* + * stp_policy_release() will not call here if the policy is already + * unbound; other users should not either, as no link exists between + * this policy and anything else in that case + */ if (WARN_ON_ONCE(!policy->stm)) return; - mutex_lock(&stm->policy_mutex); - stm->policy = NULL; - mutex_unlock(&stm->policy_mutex); + lockdep_assert_held(&stm->policy_mutex); + stm->policy = NULL; policy->stm = NULL; stm_put_device(stm); @@ -287,8 +291,16 @@ void stp_policy_unbind(struct stp_policy *policy) static void stp_policy_release(struct config_item *item) { struct stp_policy *policy = to_stp_policy(item); + struct stm_device *stm = policy->stm; + /* a policy *can* be unbound and still exist in configfs tree */ + if (!stm) + return; + + mutex_lock(&stm->policy_mutex); stp_policy_unbind(policy); + mutex_unlock(&stm->policy_mutex); + kfree(policy); } @@ -320,16 +332,17 @@ stp_policies_make(struct config_group *group, const char *name) /* * node must look like <device_name>.<policy_name>, where - * <device_name> is the name of an existing stm device and - * <policy_name> is an arbitrary string + * <device_name> is the name of an existing stm device; may + * contain dots; + * <policy_name> is an arbitrary string; may not contain dots */ - p = strchr(devname, '.'); + p = strrchr(devname, '.'); if (!p) { kfree(devname); return ERR_PTR(-EINVAL); } - *p++ = '\0'; + *p = '\0'; stm = stm_find_device(devname); kfree(devname); diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h index 95ece0292c99..4e8c6926260f 100644 --- a/drivers/hwtracing/stm/stm.h +++ b/drivers/hwtracing/stm/stm.h @@ -45,6 +45,7 @@ struct stm_device { int major; unsigned int sw_nmasters; struct stm_data *data; + struct mutex link_mutex; spinlock_t link_lock; struct list_head link_list; /* master allocation */ @@ -56,6 +57,7 @@ struct stm_device { container_of((_d), struct stm_device, dev) struct stm_output { + spinlock_t lock; unsigned int master; unsigned int channel; unsigned int nr_chans; diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c index 923f56598d4b..3a9f106787d2 100644 --- a/drivers/iio/accel/kxsd9.c +++ b/drivers/iio/accel/kxsd9.c @@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro) mutex_lock(&st->buf_lock); ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); - if (ret) + if (ret < 0) goto error_ret; st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C); st->tx[1] = (ret & ~KXSD9_FS_MASK) | i; @@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev, break; case IIO_CHAN_INFO_SCALE: ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); - if (ret) + if (ret < 0) goto error_ret; *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK]; ret = IIO_VAL_INT_PLUS_MICRO; diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c index 21e19b60e2b9..2123f0ac2e2a 100644 --- a/drivers/iio/adc/ad7266.c +++ b/drivers/iio/adc/ad7266.c @@ -396,8 +396,8 @@ static int ad7266_probe(struct spi_device *spi) st = iio_priv(indio_dev); - st->reg = devm_regulator_get(&spi->dev, "vref"); - if (!IS_ERR_OR_NULL(st->reg)) { + st->reg = devm_regulator_get_optional(&spi->dev, "vref"); + if (!IS_ERR(st->reg)) { ret = regulator_enable(st->reg); if (ret) return ret; @@ -408,6 +408,9 @@ static int ad7266_probe(struct spi_device *spi) st->vref_mv = ret / 1000; } else { + /* Any other error indicates that the regulator does exist */ + if (PTR_ERR(st->reg) != -ENODEV) + return PTR_ERR(st->reg); /* Use internal reference */ st->vref_mv = 2500; } diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index a7f61e881a49..dc5e7e70f951 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c @@ -55,7 +55,7 @@ static const struct { }, { /* IIO_HUMIDITYRELATIVE channel */ .shift = 8, - .mask = 2, + .mask = 3, }, }; @@ -164,14 +164,14 @@ static int hdc100x_get_measurement(struct hdc100x_data *data, dev_err(&client->dev, "cannot read high byte measurement"); return ret; } - val = ret << 6; + val = ret << 8; ret = i2c_smbus_read_byte(client); if (ret < 0) { dev_err(&client->dev, "cannot read low byte measurement"); return ret; } - val |= ret >> 2; + val |= ret; return val; } @@ -211,18 +211,18 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_SCALE: if (chan->type == IIO_TEMP) { - *val = 165; - *val2 = 65536 >> 2; + *val = 165000; + *val2 = 65536; return IIO_VAL_FRACTIONAL; } else { - *val = 0; - *val2 = 10000; - return IIO_VAL_INT_PLUS_MICRO; + *val = 100; + *val2 = 65536; + return IIO_VAL_FRACTIONAL; } break; case IIO_CHAN_INFO_OFFSET: - *val = -3971; - *val2 = 879096; + *val = -15887; + *val2 = 515151; return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index ae2806aafb72..0c52dfe64977 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c @@ -210,22 +210,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig, /* Prevent the module from being removed whilst attached to a trigger */ __module_get(pf->indio_dev->info->driver_module); + + /* Get irq number */ pf->irq = iio_trigger_get_irq(trig); + if (pf->irq < 0) + goto out_put_module; + + /* Request irq */ ret = request_threaded_irq(pf->irq, pf->h, pf->thread, pf->type, pf->name, pf); - if (ret < 0) { - module_put(pf->indio_dev->info->driver_module); - return ret; - } + if (ret < 0) + goto out_put_irq; + /* Enable trigger in driver */ if (trig->ops && trig->ops->set_trigger_state && notinuse) { ret = trig->ops->set_trigger_state(trig, true); if (ret < 0) - module_put(pf->indio_dev->info->driver_module); + goto out_free_irq; } return ret; + +out_free_irq: + free_irq(pf->irq, pf); +out_put_irq: + iio_trigger_put_irq(trig, pf->irq); +out_put_module: + module_put(pf->indio_dev->info->driver_module); + return ret; } static int iio_trigger_detach_poll_func(struct iio_trigger *trig, diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c index f6a07dc32ae4..4a6d9670e4cd 100644 --- a/drivers/iio/light/apds9960.c +++ b/drivers/iio/light/apds9960.c @@ -1005,6 +1005,7 @@ static int apds9960_probe(struct i2c_client *client, iio_device_attach_buffer(indio_dev, buffer); + indio_dev->dev.parent = &client->dev; indio_dev->info = &apds9960_info; indio_dev->name = APDS9960_DRV_NAME; indio_dev->channels = apds9960_channels; diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index b39a2fb0671c..5056bd68573f 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -28,15 +28,21 @@ #include <linux/iio/common/st_sensors.h> #include "st_pressure.h" +#define MCELSIUS_PER_CELSIUS 1000 + +/* Default pressure sensitivity */ #define ST_PRESS_LSB_PER_MBAR 4096UL #define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \ ST_PRESS_LSB_PER_MBAR) + +/* Default temperature sensitivity */ #define ST_PRESS_LSB_PER_CELSIUS 480UL -#define ST_PRESS_CELSIUS_NANO_SCALE (1000000000UL / \ - ST_PRESS_LSB_PER_CELSIUS) +#define ST_PRESS_MILLI_CELSIUS_OFFSET 42500UL + #define ST_PRESS_NUMBER_DATA_CHANNELS 1 /* FULLSCALE */ +#define ST_PRESS_FS_AVL_1100MB 1100 #define ST_PRESS_FS_AVL_1260MB 1260 #define ST_PRESS_1_OUT_XL_ADDR 0x28 @@ -54,18 +60,20 @@ #define ST_PRESS_LPS331AP_PW_MASK 0x80 #define ST_PRESS_LPS331AP_FS_ADDR 0x23 #define ST_PRESS_LPS331AP_FS_MASK 0x30 -#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL 0x00 -#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE -#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE #define ST_PRESS_LPS331AP_BDU_ADDR 0x20 #define ST_PRESS_LPS331AP_BDU_MASK 0x04 #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22 #define ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK 0x04 #define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20 #define ST_PRESS_LPS331AP_MULTIREAD_BIT true -#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500 /* CUSTOM VALUES FOR LPS001WP SENSOR */ + +/* LPS001WP pressure resolution */ +#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL +/* LPS001WP temperature resolution */ +#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL + #define ST_PRESS_LPS001WP_WAI_EXP 0xba #define ST_PRESS_LPS001WP_ODR_ADDR 0x20 #define ST_PRESS_LPS001WP_ODR_MASK 0x30 @@ -74,6 +82,8 @@ #define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03 #define ST_PRESS_LPS001WP_PW_ADDR 0x20 #define ST_PRESS_LPS001WP_PW_MASK 0x40 +#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \ + (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR) #define ST_PRESS_LPS001WP_BDU_ADDR 0x20 #define ST_PRESS_LPS001WP_BDU_MASK 0x04 #define ST_PRESS_LPS001WP_MULTIREAD_BIT true @@ -90,18 +100,12 @@ #define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04 #define ST_PRESS_LPS25H_PW_ADDR 0x20 #define ST_PRESS_LPS25H_PW_MASK 0x80 -#define ST_PRESS_LPS25H_FS_ADDR 0x00 -#define ST_PRESS_LPS25H_FS_MASK 0x00 -#define ST_PRESS_LPS25H_FS_AVL_1260_VAL 0x00 -#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE -#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE #define ST_PRESS_LPS25H_BDU_ADDR 0x20 #define ST_PRESS_LPS25H_BDU_MASK 0x04 #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23 #define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK 0x01 #define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10 #define ST_PRESS_LPS25H_MULTIREAD_BIT true -#define ST_PRESS_LPS25H_TEMP_OFFSET 42500 #define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28 #define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b @@ -153,7 +157,9 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = { .storagebits = 16, .endianness = IIO_LE, }, - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .info_mask_separate = + BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), .modified = 0, }, { @@ -169,7 +175,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = { }, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_OFFSET), + BIT(IIO_CHAN_INFO_SCALE), .modified = 0, }, IIO_CHAN_SOFT_TIMESTAMP(1) @@ -204,11 +210,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .addr = ST_PRESS_LPS331AP_FS_ADDR, .mask = ST_PRESS_LPS331AP_FS_MASK, .fs_avl = { + /* + * Pressure and temperature sensitivity values + * as defined in table 3 of LPS331AP datasheet. + */ [0] = { .num = ST_PRESS_FS_AVL_1260MB, - .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL, - .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN, - .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN, + .gain = ST_PRESS_KPASCAL_NANO_SCALE, + .gain2 = ST_PRESS_LSB_PER_CELSIUS, }, }, }, @@ -248,7 +257,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, }, .fs = { - .addr = 0, + .fs_avl = { + /* + * Pressure and temperature resolution values + * as defined in table 3 of LPS001WP datasheet. + */ + [0] = { + .num = ST_PRESS_FS_AVL_1100MB, + .gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN, + .gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS, + }, + }, }, .bdu = { .addr = ST_PRESS_LPS001WP_BDU_ADDR, @@ -285,14 +304,15 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, }, .fs = { - .addr = ST_PRESS_LPS25H_FS_ADDR, - .mask = ST_PRESS_LPS25H_FS_MASK, .fs_avl = { + /* + * Pressure and temperature sensitivity values + * as defined in table 3 of LPS25H datasheet. + */ [0] = { .num = ST_PRESS_FS_AVL_1260MB, - .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL, - .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN, - .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN, + .gain = ST_PRESS_KPASCAL_NANO_SCALE, + .gain2 = ST_PRESS_LSB_PER_CELSIUS, }, }, }, @@ -346,26 +366,26 @@ static int st_press_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: - *val = 0; - switch (ch->type) { case IIO_PRESSURE: + *val = 0; *val2 = press_data->current_fullscale->gain; - break; + return IIO_VAL_INT_PLUS_NANO; case IIO_TEMP: + *val = MCELSIUS_PER_CELSIUS; *val2 = press_data->current_fullscale->gain2; - break; + return IIO_VAL_FRACTIONAL; default: err = -EINVAL; goto read_error; } - return IIO_VAL_INT_PLUS_NANO; case IIO_CHAN_INFO_OFFSET: switch (ch->type) { case IIO_TEMP: - *val = 425; - *val2 = 10; + *val = ST_PRESS_MILLI_CELSIUS_OFFSET * + press_data->current_fullscale->gain2; + *val2 = MCELSIUS_PER_CELSIUS; break; default: err = -EINVAL; diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c index f4d29d5dbd5f..e2f926cdcad2 100644 --- a/drivers/iio/proximity/as3935.c +++ b/drivers/iio/proximity/as3935.c @@ -64,6 +64,7 @@ struct as3935_state { struct delayed_work work; u32 tune_cap; + u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */ u8 buf[2] ____cacheline_aligned; }; @@ -72,7 +73,8 @@ static const struct iio_chan_spec as3935_channels[] = { .type = IIO_PROXIMITY, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_PROCESSED), + BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_SCALE), .scan_index = 0, .scan_type = { .sign = 'u', @@ -181,7 +183,12 @@ static int as3935_read_raw(struct iio_dev *indio_dev, /* storm out of range */ if (*val == AS3935_DATA_MASK) return -EINVAL; - *val *= 1000; + + if (m == IIO_CHAN_INFO_PROCESSED) + *val *= 1000; + break; + case IIO_CHAN_INFO_SCALE: + *val = 1000; break; default: return -EINVAL; @@ -206,10 +213,10 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private) ret = as3935_read(st, AS3935_DATA, &val); if (ret) goto err_read; - val &= AS3935_DATA_MASK; - val *= 1000; - iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp); + st->buffer[0] = val & AS3935_DATA_MASK; + iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer, + pf->timestamp); err_read: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index d6d2b3582910..4d8e7f18a9af 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3430,14 +3430,14 @@ static int cm_establish(struct ib_cm_id *cm_id) work->cm_event.event = IB_CM_USER_ESTABLISHED; /* Check if the device started its remove_one */ - spin_lock_irq(&cm.lock); + spin_lock_irqsave(&cm.lock, flags); if (!cm_dev->going_down) { queue_delayed_work(cm.wq, &work->work, 0); } else { kfree(work); ret = -ENODEV; } - spin_unlock_irq(&cm.lock); + spin_unlock_irqrestore(&cm.lock, flags); out: return ret; diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 86af71351d9a..06da56bda201 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c @@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); ah->av.ib.g_slid = ah_attr->src_path_bits; + ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); if (ah_attr->ah_flags & IB_AH_GRH) { ah->av.ib.g_slid |= 0x80; ah->av.ib.gid_index = ah_attr->grh.sgid_index; @@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support)) --ah->av.ib.stat_rate; } - ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); return &ah->ibah; } diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 3db9a659719b..5f0f4fc58f43 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -1519,7 +1519,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, if (dev->use_fast_reg) { state.sg = idb_sg; - sg_set_buf(idb_sg, req->indirect_desc, idb_len); + sg_init_one(idb_sg, req->indirect_desc, idb_len); idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ #ifdef CONFIG_NEED_SG_DMA_LENGTH idb_sg->dma_length = idb_sg->length; /* hack^2 */ diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c index f2261ab54701..18663d4edae5 100644 --- a/drivers/input/misc/pwm-beeper.c +++ b/drivers/input/misc/pwm-beeper.c @@ -20,21 +20,40 @@ #include <linux/platform_device.h> #include <linux/pwm.h> #include <linux/slab.h> +#include <linux/workqueue.h> struct pwm_beeper { struct input_dev *input; struct pwm_device *pwm; + struct work_struct work; unsigned long period; }; #define HZ_TO_NANOSECONDS(x) (1000000000UL/(x)) +static void __pwm_beeper_set(struct pwm_beeper *beeper) +{ + unsigned long period = beeper->period; + + if (period) { + pwm_config(beeper->pwm, period / 2, period); + pwm_enable(beeper->pwm); + } else + pwm_disable(beeper->pwm); +} + +static void pwm_beeper_work(struct work_struct *work) +{ + struct pwm_beeper *beeper = + container_of(work, struct pwm_beeper, work); + + __pwm_beeper_set(beeper); +} + static int pwm_beeper_event(struct input_dev *input, unsigned int type, unsigned int code, int value) { - int ret = 0; struct pwm_beeper *beeper = input_get_drvdata(input); - unsigned long period; if (type != EV_SND || value < 0) return -EINVAL; @@ -49,22 +68,31 @@ static int pwm_beeper_event(struct input_dev *input, return -EINVAL; } - if (value == 0) { - pwm_disable(beeper->pwm); - } else { - period = HZ_TO_NANOSECONDS(value); - ret = pwm_config(beeper->pwm, period / 2, period); - if (ret) - return ret; - ret = pwm_enable(beeper->pwm); - if (ret) - return ret; - beeper->period = period; - } + if (value == 0) + beeper->period = 0; + else + beeper->period = HZ_TO_NANOSECONDS(value); + + schedule_work(&beeper->work); return 0; } +static void pwm_beeper_stop(struct pwm_beeper *beeper) +{ + cancel_work_sync(&beeper->work); + + if (beeper->period) + pwm_disable(beeper->pwm); +} + +static void pwm_beeper_close(struct input_dev *input) +{ + struct pwm_beeper *beeper = input_get_drvdata(input); + + pwm_beeper_stop(beeper); +} + static int pwm_beeper_probe(struct platform_device *pdev) { unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev); @@ -87,6 +115,8 @@ static int pwm_beeper_probe(struct platform_device *pdev) goto err_free; } + INIT_WORK(&beeper->work, pwm_beeper_work); + beeper->input = input_allocate_device(); if (!beeper->input) { dev_err(&pdev->dev, "Failed to allocate input device\n"); @@ -106,6 +136,7 @@ static int pwm_beeper_probe(struct platform_device *pdev) beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL); beeper->input->event = pwm_beeper_event; + beeper->input->close = pwm_beeper_close; input_set_drvdata(beeper->input, beeper); @@ -135,7 +166,6 @@ static int pwm_beeper_remove(struct platform_device *pdev) input_unregister_device(beeper->input); - pwm_disable(beeper->pwm); pwm_free(beeper->pwm); kfree(beeper); @@ -147,8 +177,7 @@ static int __maybe_unused pwm_beeper_suspend(struct device *dev) { struct pwm_beeper *beeper = dev_get_drvdata(dev); - if (beeper->period) - pwm_disable(beeper->pwm); + pwm_beeper_stop(beeper); return 0; } @@ -157,10 +186,8 @@ static int __maybe_unused pwm_beeper_resume(struct device *dev) { struct pwm_beeper *beeper = dev_get_drvdata(dev); - if (beeper->period) { - pwm_config(beeper->pwm, beeper->period / 2, beeper->period); - pwm_enable(beeper->pwm); - } + if (beeper->period) + __pwm_beeper_set(beeper); return 0; } diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 5adbcedcb81c..2bb4c8633d3b 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -893,9 +893,15 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } #ifdef CONFIG_COMPAT + +#define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t) + static long uinput_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { + if (cmd == UI_SET_PHYS_COMPAT) + cmd = UI_SET_PHYS; + return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg)); } #endif diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index bf4959f4225b..94f1bf772ec9 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1363,13 +1363,23 @@ static int __init amd_iommu_init_pci(void) break; } + /* + * Order is important here to make sure any unity map requirements are + * fulfilled. The unity mappings are created and written to the device + * table during the amd_iommu_init_api() call. + * + * After that we call init_device_table_dma() to make sure any + * uninitialized DTE will block DMA, and in the end we flush the caches + * of all IOMMUs to make sure the changes to the device table are + * active. + */ + ret = amd_iommu_init_api(); + init_device_table_dma(); for_each_iommu(iommu) iommu_flush_all_caches(iommu); - ret = amd_iommu_init_api(); - if (!ret) print_iommu_info(); diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 4e5118a4cd30..8487987458a1 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1919,6 +1919,7 @@ static struct iommu_ops arm_smmu_ops = { .detach_dev = arm_smmu_detach_dev, .map = arm_smmu_map, .unmap = arm_smmu_unmap, + .map_sg = default_iommu_map_sg, .iova_to_phys = arm_smmu_iova_to_phys, .add_device = arm_smmu_add_device, .remove_device = arm_smmu_remove_device, diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a2e1b7f14df2..6763a4dfed94 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3169,11 +3169,6 @@ static int __init init_dmars(void) } } - iommu_flush_write_buffer(iommu); - iommu_set_root_entry(iommu); - iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); - iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); - if (!ecap_pass_through(iommu->ecap)) hw_pass_through = 0; #ifdef CONFIG_INTEL_IOMMU_SVM @@ -3182,6 +3177,18 @@ static int __init init_dmars(void) #endif } + /* + * Now that qi is enabled on all iommus, set the root entry and flush + * caches. This is required on some Intel X58 chipsets, otherwise the + * flush_context function will loop forever and the boot hangs. + */ + for_each_active_iommu(iommu, drhd) { + iommu_flush_write_buffer(iommu); + iommu_set_root_entry(iommu); + iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); + } + if (iommu_pass_through) iommu_identity_mapping |= IDENTMAP_ALL; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 50c8c92d575d..cfdc235c1d28 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -521,6 +521,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs if (static_key_true(&supports_deactivate)) gic_write_dir(irqnr); #ifdef CONFIG_SMP + /* + * Unlike GICv2, we don't need an smp_rmb() here. + * The control dependency from gic_read_iar to + * the ISB in gic_write_eoir is enough to ensure + * that any shared data read by handle_IPI will + * be read after the ACK. + */ handle_IPI(irqnr, regs); #else WARN_ONCE(true, "Unexpected SGI received!\n"); @@ -540,6 +547,15 @@ static void __init gic_dist_init(void) writel_relaxed(0, base + GICD_CTLR); gic_dist_wait_for_rwp(); + /* + * Configure SPIs as non-secure Group-1. This will only matter + * if the GIC only has a single security state. This will not + * do the right thing if the kernel is running in secure mode, + * but that's not the intended use case anyway. + */ + for (i = 32; i < gic_data.irq_nr; i += 32) + writel_relaxed(~0, base + GICD_IGROUPR + i / 8); + gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp); /* Enable distributor with ARE, Group1 */ @@ -651,6 +667,9 @@ static void gic_cpu_init(void) rbase = gic_data_rdist_sgi_base(); + /* Configure SGIs/PPIs as non-secure Group-1 */ + writel_relaxed(~0, rbase + GICR_IGROUPR0); + gic_cpu_config(rbase, gic_redist_wait_for_rwp); /* Give LPIs a spin */ diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index fb940e92b64e..10b73d9bea78 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -405,6 +405,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) if (static_key_true(&supports_deactivate)) writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE); #ifdef CONFIG_SMP + /* + * Ensure any shared data written by the CPU sending + * the IPI is read after we've read the ACK register + * on the GIC. + * + * Pairs with the write barrier in gic_raise_softirq + */ + smp_rmb(); handle_IPI(irqnr, regs); #endif continue; diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c index 325bdb35e8a3..51fd79f101c8 100644 --- a/drivers/leds/leds-qpnp-flash-v2.c +++ b/drivers/leds/leds-qpnp-flash-v2.c @@ -58,6 +58,7 @@ #define FLASH_LED_HDRM_VOL_MASK GENMASK(7, 4) #define FLASH_LED_CURRENT_MASK GENMASK(6, 0) #define FLASH_LED_ENABLE_MASK GENMASK(2, 0) +#define FLASH_HW_STROBE_MASK GENMASK(2, 0) #define FLASH_LED_SAFETY_TMR_MASK GENMASK(7, 0) #define FLASH_LED_INT_RT_STS_MASK GENMASK(7, 0) #define FLASH_LED_ISC_WARMUP_DELAY_MASK GENMASK(1, 0) @@ -72,7 +73,7 @@ #define FLASH_LED_THERMAL_THRSH_MASK GENMASK(2, 0) #define FLASH_LED_THERMAL_OTST_MASK GENMASK(2, 0) #define FLASH_LED_MOD_CTRL_MASK BIT(7) -#define FLASH_LED_HW_SW_STROBE_SEL_MASK BIT(2) +#define FLASH_LED_HW_SW_STROBE_SEL_BIT BIT(2) #define FLASH_LED_VPH_DROOP_FAULT_MASK BIT(4) #define FLASH_LED_LMH_MITIGATION_EN_MASK BIT(0) #define FLASH_LED_CHGR_MITIGATION_EN_MASK BIT(4) @@ -811,7 +812,7 @@ static int qpnp_flash_led_switch_disable(struct flash_switch_data *snode) } } - if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_MASK) { + if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) { rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i], led->pdata->hw_strobe_option, false); if (rc < 0) { @@ -831,7 +832,7 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on) { struct qpnp_flash_led *led = dev_get_drvdata(&snode->pdev->dev); int rc, i, addr_offset; - u8 val; + u8 val, mask; if (snode->enabled == on) { dev_warn(&led->pdev->dev, "Switch node is already %s!\n", @@ -869,9 +870,13 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on) continue; addr_offset = led->fnode[i].id; + if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) + mask = FLASH_HW_STROBE_MASK; + else + mask = FLASH_LED_HW_SW_STROBE_SEL_BIT; rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_STROBE_CTRL(led->base + addr_offset), - FLASH_LED_ENABLE_MASK, led->fnode[i].trigger); + mask, led->fnode[i].trigger); if (rc < 0) return rc; @@ -899,7 +904,7 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on) } } - if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_MASK) { + if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) { rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i], led->pdata->hw_strobe_option, true); if (rc < 0) { @@ -1389,7 +1394,7 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led, } fnode->trigger = (strobe_sel << 2) | (edge_trigger << 1) | active_high; - if (fnode->trigger & FLASH_LED_HW_SW_STROBE_SEL_MASK) { + if (fnode->trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) { if (of_find_property(node, "qcom,hw-strobe-gpio", NULL)) { fnode->hw_strobe_gpio = of_get_named_gpio(node, "qcom,hw-strobe-gpio", 0); diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c index 18d968d3711d..894c1d88b3ef 100644 --- a/drivers/leds/leds-qpnp-wled.c +++ b/drivers/leds/leds-qpnp-wled.c @@ -45,11 +45,13 @@ #define QPNP_WLED_SWITCH_FREQ_REG(b) (b + 0x4C) #define QPNP_WLED_OVP_REG(b) (b + 0x4D) #define QPNP_WLED_ILIM_REG(b) (b + 0x4E) +#define QPNP_WLED_AMOLED_VOUT_REG(b) (b + 0x4F) #define QPNP_WLED_SOFTSTART_RAMP_DLY(b) (b + 0x53) #define QPNP_WLED_VLOOP_COMP_RES_REG(b) (b + 0x55) #define QPNP_WLED_VLOOP_COMP_GM_REG(b) (b + 0x56) #define QPNP_WLED_PSM_CTRL_REG(b) (b + 0x5B) #define QPNP_WLED_SC_PRO_REG(b) (b + 0x5E) +#define QPNP_WLED_SWIRE_AVDD_REG(b) (b + 0x5F) #define QPNP_WLED_CTRL_SPARE_REG(b) (b + 0xDF) #define QPNP_WLED_TEST1_REG(b) (b + 0xE2) #define QPNP_WLED_TEST4_REG(b) (b + 0xE5) @@ -83,12 +85,15 @@ #define QPNP_WLED_VREF_PSM_DFLT_AMOLED_MV 450 #define QPNP_WLED_PSM_CTRL_OVERWRITE 0x80 -#define QPNP_WLED_ILIM_MASK 0xF8 -#define QPNP_WLED_ILIM_MIN_MA 105 -#define QPNP_WLED_ILIM_MAX_MA 1980 -#define QPNP_WLED_ILIM_STEP_MA 280 -#define QPNP_WLED_DFLT_ILIM_MA 980 -#define QPNP_WLED_ILIM_OVERWRITE 0x80 +#define QPNP_WLED_ILIM_MASK GENMASK(2, 0) +#define QPNP_WLED_ILIM_OVERWRITE BIT(7) +#define PMI8994_WLED_ILIM_MIN_MA 105 +#define PMI8994_WLED_ILIM_MAX_MA 1980 +#define PMI8994_WLED_DFLT_ILIM_MA 980 +#define PMI8994_AMOLED_DFLT_ILIM_MA 385 +#define PMICOBALT_WLED_ILIM_MAX_MA 1500 +#define PMICOBALT_WLED_DFLT_ILIM_MA 970 +#define PMICOBALT_AMOLED_DFLT_ILIM_MA 620 #define QPNP_WLED_BOOST_DUTY_MASK 0xFC #define QPNP_WLED_BOOST_DUTY_STEP_NS 52 #define QPNP_WLED_BOOST_DUTY_MIN_NS 26 @@ -196,11 +201,19 @@ #define NUM_SUPPORTED_AVDD_VOLTAGES 6 #define QPNP_WLED_DFLT_AVDD_MV 7600 +#define QPNP_WLED_AVDD_MIN_MV 5650 +#define QPNP_WLED_AVDD_MAX_MV 7900 +#define QPNP_WLED_AVDD_STEP_MV 150 #define QPNP_WLED_AVDD_MIN_TRIM_VAL 0x0 #define QPNP_WLED_AVDD_MAX_TRIM_VAL 0xF +#define QPNP_WLED_AVDD_SEL_SPMI_BIT BIT(7) #define QPNP_WLED_AVDD_SET_BIT BIT(4) #define NUM_SUPPORTED_OVP_THRESHOLDS 4 +#define NUM_SUPPORTED_ILIM_THRESHOLDS 8 + +#define QPNP_WLED_AVDD_MV_TO_REG(val) \ + ((val - QPNP_WLED_AVDD_MIN_MV) / QPNP_WLED_AVDD_STEP_MV) /* output feedback mode */ enum qpnp_wled_fdbk_op { @@ -254,6 +267,14 @@ static int qpnp_wled_ovp_thresholds_pmicobalt[NUM_SUPPORTED_OVP_THRESHOLDS] = { 31100, 29600, 19600, 18100, }; +static int qpnp_wled_ilim_settings_pmi8994[NUM_SUPPORTED_ILIM_THRESHOLDS] = { + 105, 385, 660, 980, 1150, 1420, 1700, 1980, +}; + +static int qpnp_wled_ilim_settings_pmicobalt[NUM_SUPPORTED_ILIM_THRESHOLDS] = { + 105, 280, 450, 620, 970, 1150, 1300, 1500, +}; + /** * qpnp_wled - wed data structure * @ cdev - led class device @@ -288,6 +309,7 @@ static int qpnp_wled_ovp_thresholds_pmicobalt[NUM_SUPPORTED_OVP_THRESHOLDS] = { * @ cons_sync_write_delay_us - delay between two consecutive writes to SYNC * @ strings - supported list of strings * @ num_strings - number of strings + * @ avdd_mode_spmi - enable avdd programming via spmi * @ en_9b_dim_res - enable or disable 9bit dimming * @ en_phase_stag - enable or disable phase staggering * @ en_cabc - enable or disable cabc @@ -330,6 +352,7 @@ struct qpnp_wled { u16 cons_sync_write_delay_us; u8 strings[QPNP_WLED_MAX_STRINGS]; u8 num_strings; + bool avdd_mode_spmi; bool en_9b_dim_res; bool en_phase_stag; bool en_cabc; @@ -1089,6 +1112,142 @@ static int qpnp_wled_ovp_config(struct qpnp_wled *wled) return 0; } +static int qpnp_wled_avdd_trim_config(struct qpnp_wled *wled) +{ + int rc, i; + u8 reg; + + for (i = 0; i < NUM_SUPPORTED_AVDD_VOLTAGES; i++) { + if (wled->avdd_target_voltage_mv == + qpnp_wled_avdd_target_voltages[i]) + break; + } + + if (i == NUM_SUPPORTED_AVDD_VOLTAGES) { + dev_err(&wled->pdev->dev, + "Invalid avdd target voltage specified in device tree\n"); + return -EINVAL; + } + + /* Update WLED_OVP register based on desired target voltage */ + reg = qpnp_wled_ovp_reg_settings[i]; + rc = qpnp_wled_masked_write_reg(wled, QPNP_WLED_OVP_MASK, ®, + QPNP_WLED_OVP_REG(wled->ctrl_base)); + if (rc) + return rc; + + /* Update WLED_TRIM register based on desired target voltage */ + rc = qpnp_wled_read_reg(wled, ®, + QPNP_WLED_REF_7P7_TRIM_REG(wled->ctrl_base)); + if (rc) + return rc; + + reg += qpnp_wled_avdd_trim_adjustments[i]; + if ((s8)reg < QPNP_WLED_AVDD_MIN_TRIM_VAL || + (s8)reg > QPNP_WLED_AVDD_MAX_TRIM_VAL) { + dev_dbg(&wled->pdev->dev, + "adjusted trim %d is not within range, capping it\n", + (s8)reg); + if ((s8)reg < QPNP_WLED_AVDD_MIN_TRIM_VAL) + reg = QPNP_WLED_AVDD_MIN_TRIM_VAL; + else + reg = QPNP_WLED_AVDD_MAX_TRIM_VAL; + } + + reg &= QPNP_WLED_7P7_TRIM_MASK; + rc = qpnp_wled_sec_write_reg(wled, reg, + QPNP_WLED_REF_7P7_TRIM_REG(wled->ctrl_base)); + if (rc < 0) + dev_err(&wled->pdev->dev, "Write to 7P7_TRIM register failed, rc=%d\n", + rc); + return rc; +} + +static int qpnp_wled_avdd_mode_config(struct qpnp_wled *wled) +{ + int rc; + u8 reg = 0; + + /* + * At present, configuring the mode to SPMI/SWIRE for controlling + * AVDD voltage is available only in pmicobalt/pm2falcon. + */ + if (wled->pmic_rev_id->pmic_subtype != PMICOBALT_SUBTYPE && + wled->pmic_rev_id->pmic_subtype != PM2FALCON_SUBTYPE) + return 0; + + /* AMOLED_VOUT should be configured for AMOLED */ + if (!wled->disp_type_amoled) + return 0; + + /* Configure avdd register */ + if (wled->avdd_target_voltage_mv > QPNP_WLED_AVDD_MAX_MV) { + dev_dbg(&wled->pdev->dev, "Capping avdd target voltage to %d\n", + QPNP_WLED_AVDD_MAX_MV); + wled->avdd_target_voltage_mv = QPNP_WLED_AVDD_MAX_MV; + } else if (wled->avdd_target_voltage_mv < QPNP_WLED_AVDD_MIN_MV) { + dev_info(&wled->pdev->dev, "Capping avdd target voltage to %d\n", + QPNP_WLED_AVDD_MIN_MV); + wled->avdd_target_voltage_mv = QPNP_WLED_AVDD_MIN_MV; + } + + reg = QPNP_WLED_AVDD_MV_TO_REG(wled->avdd_target_voltage_mv); + + if (wled->avdd_mode_spmi) { + reg |= QPNP_WLED_AVDD_SEL_SPMI_BIT; + rc = qpnp_wled_write_reg(wled, reg, + QPNP_WLED_AMOLED_VOUT_REG(wled->ctrl_base)); + } else { + rc = qpnp_wled_write_reg(wled, reg, + QPNP_WLED_SWIRE_AVDD_REG(wled->ctrl_base)); + } + + if (rc < 0) + dev_err(&wled->pdev->dev, "Write to VOUT/AVDD register failed, rc=%d\n", + rc); + return rc; +} + +static int qpnp_wled_ilim_config(struct qpnp_wled *wled) +{ + int rc, i, *ilim_table; + u8 reg; + + if (wled->ilim_ma < PMI8994_WLED_ILIM_MIN_MA) + wled->ilim_ma = PMI8994_WLED_ILIM_MIN_MA; + + if (wled->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE || + wled->pmic_rev_id->pmic_subtype == PM2FALCON_SUBTYPE) { + ilim_table = qpnp_wled_ilim_settings_pmicobalt; + if (wled->ilim_ma > PMICOBALT_WLED_ILIM_MAX_MA) + wled->ilim_ma = PMICOBALT_WLED_ILIM_MAX_MA; + } else { + ilim_table = qpnp_wled_ilim_settings_pmi8994; + if (wled->ilim_ma > PMI8994_WLED_ILIM_MAX_MA) + wled->ilim_ma = PMI8994_WLED_ILIM_MAX_MA; + } + + for (i = 0; i < NUM_SUPPORTED_ILIM_THRESHOLDS; i++) { + if (wled->ilim_ma == ilim_table[i]) + break; + } + + if (i == NUM_SUPPORTED_ILIM_THRESHOLDS) { + dev_err(&wled->pdev->dev, + "Invalid ilim threshold specified in device tree\n"); + return -EINVAL; + } + + reg = (i & QPNP_WLED_ILIM_MASK) | QPNP_WLED_ILIM_OVERWRITE; + rc = qpnp_wled_masked_write_reg(wled, + QPNP_WLED_ILIM_MASK | QPNP_WLED_ILIM_OVERWRITE, + ®, QPNP_WLED_ILIM_REG(wled->ctrl_base)); + if (rc < 0) + dev_err(&wled->pdev->dev, "Write to ILIM register failed, rc=%d\n", + rc); + return rc; +} + /* Configure WLED registers */ static int qpnp_wled_config(struct qpnp_wled *wled) { @@ -1131,24 +1290,10 @@ static int qpnp_wled_config(struct qpnp_wled *wled) return rc; /* Configure the ILIM register */ - if (wled->ilim_ma < QPNP_WLED_ILIM_MIN_MA) - wled->ilim_ma = QPNP_WLED_ILIM_MIN_MA; - else if (wled->ilim_ma > QPNP_WLED_ILIM_MAX_MA) - wled->ilim_ma = QPNP_WLED_ILIM_MAX_MA; - - rc = qpnp_wled_read_reg(wled, ®, - QPNP_WLED_ILIM_REG(wled->ctrl_base)); - if (rc < 0) + rc = qpnp_wled_ilim_config(wled); + if (rc < 0) { + pr_err("Error in configuring wled ilim, rc=%d\n", rc); return rc; - temp = (wled->ilim_ma / QPNP_WLED_ILIM_STEP_MA); - if (temp != (reg & ~QPNP_WLED_ILIM_MASK)) { - reg &= QPNP_WLED_ILIM_MASK; - reg |= temp; - reg |= QPNP_WLED_ILIM_OVERWRITE; - rc = qpnp_wled_write_reg(wled, reg, - QPNP_WLED_ILIM_REG(wled->ctrl_base)); - if (rc) - return rc; } /* Configure the Soft start Ramp delay: for AMOLED - 0,for LCD - 2 */ @@ -1199,50 +1344,15 @@ static int qpnp_wled_config(struct qpnp_wled *wled) } if (is_avdd_trim_adjustment_required(wled)) { - for (i = 0; i < NUM_SUPPORTED_AVDD_VOLTAGES; i++) { - if (wled->avdd_target_voltage_mv == - qpnp_wled_avdd_target_voltages[i]) - break; - } - - if (i == NUM_SUPPORTED_AVDD_VOLTAGES) { - dev_err(&wled->pdev->dev, - "Invalid avdd target voltage specified in device tree\n"); - return -EINVAL; - } - - /* Update WLED_OVP register based on desired target voltage */ - reg = qpnp_wled_ovp_reg_settings[i]; - rc = qpnp_wled_masked_write_reg(wled, QPNP_WLED_OVP_MASK, ®, - QPNP_WLED_OVP_REG(wled->ctrl_base)); - if (rc) - return rc; - - /* Update WLED_TRIM register based on desired target voltage */ - rc = qpnp_wled_read_reg(wled, ®, - QPNP_WLED_REF_7P7_TRIM_REG(wled->ctrl_base)); - if (rc) - return rc; - - reg += qpnp_wled_avdd_trim_adjustments[i]; - if ((s8)reg < QPNP_WLED_AVDD_MIN_TRIM_VAL || - (s8)reg > QPNP_WLED_AVDD_MAX_TRIM_VAL) { - dev_info(&wled->pdev->dev, - "adjusted trim %d is not within range, capping it\n", - (s8)reg); - if ((s8)reg < QPNP_WLED_AVDD_MIN_TRIM_VAL) - reg = QPNP_WLED_AVDD_MIN_TRIM_VAL; - else - reg = QPNP_WLED_AVDD_MAX_TRIM_VAL; - } - - reg &= QPNP_WLED_7P7_TRIM_MASK; - rc = qpnp_wled_sec_write_reg(wled, reg, - QPNP_WLED_REF_7P7_TRIM_REG(wled->ctrl_base)); - if (rc) + rc = qpnp_wled_avdd_trim_config(wled); + if (rc < 0) return rc; } + rc = qpnp_wled_avdd_mode_config(wled); + if (rc < 0) + return rc; + /* Configure the MODULATION register */ if (wled->mod_freq_khz <= QPNP_WLED_MOD_FREQ_1200_KHZ) { wled->mod_freq_khz = QPNP_WLED_MOD_FREQ_1200_KHZ; @@ -1561,6 +1671,9 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled) return rc; } + wled->avdd_mode_spmi = of_property_read_bool(pdev->dev.of_node, + "qcom,avdd-mode-spmi"); + wled->avdd_target_voltage_mv = QPNP_WLED_DFLT_AVDD_MV; rc = of_property_read_u32(pdev->dev.of_node, "qcom,avdd-target-voltage-mv", &temp_val); @@ -1635,7 +1748,19 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled) return rc; } - wled->ilim_ma = QPNP_WLED_DFLT_ILIM_MA; + if (wled->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE || + wled->pmic_rev_id->pmic_subtype == PM2FALCON_SUBTYPE) { + if (wled->disp_type_amoled) + wled->ilim_ma = PMICOBALT_AMOLED_DFLT_ILIM_MA; + else + wled->ilim_ma = PMICOBALT_WLED_DFLT_ILIM_MA; + } else { + if (wled->disp_type_amoled) + wled->ilim_ma = PMI8994_AMOLED_DFLT_ILIM_MA; + else + wled->ilim_ma = PMI8994_WLED_DFLT_ILIM_MA; + } + rc = of_property_read_u32(pdev->dev.of_node, "qcom,ilim-ma", &temp_val); if (!rc) { diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c index 004926955263..b0155b05cddb 100644 --- a/drivers/mcb/mcb-parse.c +++ b/drivers/mcb/mcb-parse.c @@ -57,7 +57,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus, mdev->id = GDD_DEV(reg1); mdev->rev = GDD_REV(reg1); mdev->var = GDD_VAR(reg1); - mdev->bar = GDD_BAR(reg1); + mdev->bar = GDD_BAR(reg2); mdev->group = GDD_GRP(reg2); mdev->inst = GDD_INS(reg2); diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 998bd1ec0415..e562bdedfb07 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -517,4 +517,20 @@ config DM_LOG_WRITES If unsure, say N. +config DM_ANDROID_VERITY + bool "Android verity target support" + depends on DM_VERITY + depends on X509_CERTIFICATE_PARSER + depends on SYSTEM_TRUSTED_KEYRING + depends on PUBLIC_KEY_ALGO_RSA + depends on KEYS + depends on ASYMMETRIC_KEY_TYPE + depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE + ---help--- + This device-mapper target is virtually a VERITY target. This + target is setup by reading the metadata contents piggybacked + to the actual data blocks in the block device. The signature + of the metadata contents are verified against the key included + in the system keyring. Upon success, the underlying verity + target is setup. endif # MD diff --git a/drivers/md/Makefile b/drivers/md/Makefile index d470143dcf40..ce7cf06d0e8a 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -69,3 +69,7 @@ endif ifeq ($(CONFIG_DM_VERITY_FEC),y) dm-verity-objs += dm-verity-fec.o endif + +ifeq ($(CONFIG_DM_ANDROID_VERITY),y) +dm-verity-objs += dm-android-verity.o +endif diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c new file mode 100644 index 000000000000..bb6c1285e499 --- /dev/null +++ b/drivers/md/dm-android-verity.c @@ -0,0 +1,925 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/buffer_head.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/device-mapper.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/fcntl.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/key.h> +#include <linux/module.h> +#include <linux/mount.h> +#include <linux/namei.h> +#include <linux/of.h> +#include <linux/reboot.h> +#include <linux/string.h> +#include <linux/vmalloc.h> + +#include <asm/setup.h> +#include <crypto/hash.h> +#include <crypto/public_key.h> +#include <crypto/sha.h> +#include <keys/asymmetric-type.h> +#include <keys/system_keyring.h> + +#include "dm-verity.h" +#include "dm-android-verity.h" + +static char verifiedbootstate[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char veritymode[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char veritykeyid[VERITY_DEFAULT_KEY_ID_LENGTH]; +static char buildvariant[BUILD_VARIANT]; + +static bool target_added; +static bool verity_enabled = true; +struct dentry *debug_dir; +static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv); + +static struct target_type android_verity_target = { + .name = "android-verity", + .version = {1, 0, 0}, + .module = THIS_MODULE, + .ctr = android_verity_ctr, + .dtr = verity_dtr, + .map = verity_map, + .status = verity_status, + .prepare_ioctl = verity_prepare_ioctl, + .iterate_devices = verity_iterate_devices, + .io_hints = verity_io_hints, +}; + +static int __init verified_boot_state_param(char *line) +{ + strlcpy(verifiedbootstate, line, sizeof(verifiedbootstate)); + return 1; +} + +__setup("androidboot.verifiedbootstate=", verified_boot_state_param); + +static int __init verity_mode_param(char *line) +{ + strlcpy(veritymode, line, sizeof(veritymode)); + return 1; +} + +__setup("androidboot.veritymode=", verity_mode_param); + +static int __init verity_keyid_param(char *line) +{ + strlcpy(veritykeyid, line, sizeof(veritykeyid)); + return 1; +} + +__setup("veritykeyid=", verity_keyid_param); + +static int __init verity_buildvariant(char *line) +{ + strlcpy(buildvariant, line, sizeof(buildvariant)); + return 1; +} + +__setup("buildvariant=", verity_buildvariant); + +static inline bool default_verity_key_id(void) +{ + return veritykeyid[0] != '\0'; +} + +static inline bool is_eng(void) +{ + static const char typeeng[] = "eng"; + + return !strncmp(buildvariant, typeeng, sizeof(typeeng)); +} + +static inline bool is_userdebug(void) +{ + static const char typeuserdebug[] = "userdebug"; + + return !strncmp(buildvariant, typeuserdebug, sizeof(typeuserdebug)); +} + + +static int table_extract_mpi_array(struct public_key_signature *pks, + const void *data, size_t len) +{ + MPI mpi = mpi_read_raw_data(data, len); + + if (!mpi) { + DMERR("Error while allocating mpi array"); + return -ENOMEM; + } + + pks->mpi[0] = mpi; + pks->nr_mpi = 1; + return 0; +} + +static struct public_key_signature *table_make_digest( + enum hash_algo hash, + const void *table, + unsigned long table_len) +{ + struct public_key_signature *pks = NULL; + struct crypto_shash *tfm; + struct shash_desc *desc; + size_t digest_size, desc_size; + int ret; + + /* Allocate the hashing algorithm we're going to need and find out how + * big the hash operational data will be. + */ + tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0); + if (IS_ERR(tfm)) + return ERR_CAST(tfm); + + desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); + digest_size = crypto_shash_digestsize(tfm); + + /* We allocate the hash operational data storage on the end of out + * context data and the digest output buffer on the end of that. + */ + ret = -ENOMEM; + pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL); + if (!pks) + goto error; + + pks->pkey_hash_algo = hash; + pks->digest = (u8 *)pks + sizeof(*pks) + desc_size; + pks->digest_size = digest_size; + + desc = (struct shash_desc *)(pks + 1); + desc->tfm = tfm; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + + ret = crypto_shash_finup(desc, table, table_len, pks->digest); + if (ret < 0) + goto error; + + crypto_free_shash(tfm); + return pks; + +error: + kfree(pks); + crypto_free_shash(tfm); + return ERR_PTR(ret); +} + +static int read_block_dev(struct bio_read *payload, struct block_device *bdev, + sector_t offset, int length) +{ + struct bio *bio; + int err = 0, i; + + payload->number_of_pages = DIV_ROUND_UP(length, PAGE_SIZE); + + bio = bio_alloc(GFP_KERNEL, payload->number_of_pages); + if (!bio) { + DMERR("Error while allocating bio"); + return -ENOMEM; + } + + bio->bi_bdev = bdev; + bio->bi_iter.bi_sector = offset; + + payload->page_io = kzalloc(sizeof(struct page *) * + payload->number_of_pages, GFP_KERNEL); + if (!payload->page_io) { + DMERR("page_io array alloc failed"); + err = -ENOMEM; + goto free_bio; + } + + for (i = 0; i < payload->number_of_pages; i++) { + payload->page_io[i] = alloc_page(GFP_KERNEL); + if (!payload->page_io[i]) { + DMERR("alloc_page failed"); + err = -ENOMEM; + goto free_pages; + } + if (!bio_add_page(bio, payload->page_io[i], PAGE_SIZE, 0)) { + DMERR("bio_add_page error"); + err = -EIO; + goto free_pages; + } + } + + if (!submit_bio_wait(READ, bio)) + /* success */ + goto free_bio; + DMERR("bio read failed"); + err = -EIO; + +free_pages: + for (i = 0; i < payload->number_of_pages; i++) + if (payload->page_io[i]) + __free_page(payload->page_io[i]); + kfree(payload->page_io); +free_bio: + bio_put(bio); + return err; +} + +static inline u64 fec_div_round_up(u64 x, u64 y) +{ + u64 remainder; + + return div64_u64_rem(x, y, &remainder) + + (remainder > 0 ? 1 : 0); +} + +static inline void populate_fec_metadata(struct fec_header *header, + struct fec_ecc_metadata *ecc) +{ + ecc->blocks = fec_div_round_up(le64_to_cpu(header->inp_size), + FEC_BLOCK_SIZE); + ecc->roots = le32_to_cpu(header->roots); + ecc->start = le64_to_cpu(header->inp_size); +} + +static inline int validate_fec_header(struct fec_header *header, u64 offset) +{ + /* move offset to make the sanity check work for backup header + * as well. */ + offset -= offset % FEC_BLOCK_SIZE; + if (le32_to_cpu(header->magic) != FEC_MAGIC || + le32_to_cpu(header->version) != FEC_VERSION || + le32_to_cpu(header->size) != sizeof(struct fec_header) || + le32_to_cpu(header->roots) == 0 || + le32_to_cpu(header->roots) >= FEC_RSM) + return -EINVAL; + + return 0; +} + +static int extract_fec_header(dev_t dev, struct fec_header *fec, + struct fec_ecc_metadata *ecc) +{ + u64 device_size; + struct bio_read payload; + int i, err = 0; + struct block_device *bdev; + + bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL); + + if (IS_ERR_OR_NULL(bdev)) { + DMERR("bdev get error"); + return PTR_ERR(bdev); + } + + device_size = i_size_read(bdev->bd_inode); + + /* fec metadata size is a power of 2 and PAGE_SIZE + * is a power of 2 as well. + */ + BUG_ON(FEC_BLOCK_SIZE > PAGE_SIZE); + /* 512 byte sector alignment */ + BUG_ON(((device_size - FEC_BLOCK_SIZE) % (1 << SECTOR_SHIFT)) != 0); + + err = read_block_dev(&payload, bdev, (device_size - + FEC_BLOCK_SIZE) / (1 << SECTOR_SHIFT), FEC_BLOCK_SIZE); + if (err) { + DMERR("Error while reading verity metadata"); + goto error; + } + + BUG_ON(sizeof(struct fec_header) > PAGE_SIZE); + memcpy(fec, page_address(payload.page_io[0]), + sizeof(*fec)); + + ecc->valid = true; + if (validate_fec_header(fec, device_size - FEC_BLOCK_SIZE)) { + /* Try the backup header */ + memcpy(fec, page_address(payload.page_io[0]) + FEC_BLOCK_SIZE + - sizeof(*fec) , + sizeof(*fec)); + if (validate_fec_header(fec, device_size - + sizeof(struct fec_header))) + ecc->valid = false; + } + + if (ecc->valid) + populate_fec_metadata(fec, ecc); + + for (i = 0; i < payload.number_of_pages; i++) + __free_page(payload.page_io[i]); + kfree(payload.page_io); + +error: + blkdev_put(bdev, FMODE_READ); + return err; +} +static void find_metadata_offset(struct fec_header *fec, + struct block_device *bdev, u64 *metadata_offset) +{ + u64 device_size; + + device_size = i_size_read(bdev->bd_inode); + + if (le32_to_cpu(fec->magic) == FEC_MAGIC) + *metadata_offset = le64_to_cpu(fec->inp_size) - + VERITY_METADATA_SIZE; + else + *metadata_offset = device_size - VERITY_METADATA_SIZE; +} + +static int find_size(dev_t dev, u64 *device_size) +{ + struct block_device *bdev; + + bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL); + if (IS_ERR_OR_NULL(bdev)) { + DMERR("blkdev_get_by_dev failed"); + return PTR_ERR(bdev); + } + + *device_size = i_size_read(bdev->bd_inode); + *device_size >>= SECTOR_SHIFT; + + DMINFO("blkdev size in sectors: %llu", *device_size); + blkdev_put(bdev, FMODE_READ); + return 0; +} + +static int verify_header(struct android_metadata_header *header) +{ + int retval = -EINVAL; + + if (is_userdebug() && le32_to_cpu(header->magic_number) == + VERITY_METADATA_MAGIC_DISABLE) + return VERITY_STATE_DISABLE; + + if (!(le32_to_cpu(header->magic_number) == + VERITY_METADATA_MAGIC_NUMBER) || + (le32_to_cpu(header->magic_number) == + VERITY_METADATA_MAGIC_DISABLE)) { + DMERR("Incorrect magic number"); + return retval; + } + + if (le32_to_cpu(header->protocol_version) != + VERITY_METADATA_VERSION) { + DMERR("Unsupported version %u", + le32_to_cpu(header->protocol_version)); + return retval; + } + + return 0; +} + +static int extract_metadata(dev_t dev, struct fec_header *fec, + struct android_metadata **metadata, + bool *verity_enabled) +{ + struct block_device *bdev; + struct android_metadata_header *header; + int i; + u32 table_length, copy_length, offset; + u64 metadata_offset; + struct bio_read payload; + int err = 0; + + bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL); + + if (IS_ERR_OR_NULL(bdev)) { + DMERR("blkdev_get_by_dev failed"); + return -ENODEV; + } + + find_metadata_offset(fec, bdev, &metadata_offset); + + /* Verity metadata size is a power of 2 and PAGE_SIZE + * is a power of 2 as well. + * PAGE_SIZE is also a multiple of 512 bytes. + */ + if (VERITY_METADATA_SIZE > PAGE_SIZE) + BUG_ON(VERITY_METADATA_SIZE % PAGE_SIZE != 0); + /* 512 byte sector alignment */ + BUG_ON(metadata_offset % (1 << SECTOR_SHIFT) != 0); + + err = read_block_dev(&payload, bdev, metadata_offset / + (1 << SECTOR_SHIFT), VERITY_METADATA_SIZE); + if (err) { + DMERR("Error while reading verity metadata"); + goto blkdev_release; + } + + header = kzalloc(sizeof(*header), GFP_KERNEL); + if (!header) { + DMERR("kzalloc failed for header"); + err = -ENOMEM; + goto free_payload; + } + + memcpy(header, page_address(payload.page_io[0]), + sizeof(*header)); + + DMINFO("bio magic_number:%u protocol_version:%d table_length:%u", + le32_to_cpu(header->magic_number), + le32_to_cpu(header->protocol_version), + le32_to_cpu(header->table_length)); + + err = verify_header(header); + + if (err == VERITY_STATE_DISABLE) { + DMERR("Mounting root with verity disabled"); + *verity_enabled = false; + /* we would still have to read the metadata to figure out + * the data blocks size. Or may be could map the entire + * partition similar to mounting the device. + * + * Reset error as well as the verity_enabled flag is changed. + */ + err = 0; + } else if (err) + goto free_header; + + *metadata = kzalloc(sizeof(**metadata), GFP_KERNEL); + if (!*metadata) { + DMERR("kzalloc for metadata failed"); + err = -ENOMEM; + goto free_header; + } + + (*metadata)->header = header; + table_length = le32_to_cpu(header->table_length); + + if (table_length == 0 || + table_length > (VERITY_METADATA_SIZE - + sizeof(struct android_metadata_header))) { + DMERR("table_length too long"); + err = -EINVAL; + goto free_metadata; + } + + (*metadata)->verity_table = kzalloc(table_length + 1, GFP_KERNEL); + + if (!(*metadata)->verity_table) { + DMERR("kzalloc verity_table failed"); + err = -ENOMEM; + goto free_metadata; + } + + if (sizeof(struct android_metadata_header) + + table_length <= PAGE_SIZE) { + memcpy((*metadata)->verity_table, + page_address(payload.page_io[0]) + + sizeof(struct android_metadata_header), + table_length); + } else { + copy_length = PAGE_SIZE - + sizeof(struct android_metadata_header); + memcpy((*metadata)->verity_table, + page_address(payload.page_io[0]) + + sizeof(struct android_metadata_header), + copy_length); + table_length -= copy_length; + offset = copy_length; + i = 1; + while (table_length != 0) { + if (table_length > PAGE_SIZE) { + memcpy((*metadata)->verity_table + offset, + page_address(payload.page_io[i]), + PAGE_SIZE); + offset += PAGE_SIZE; + table_length -= PAGE_SIZE; + } else { + memcpy((*metadata)->verity_table + offset, + page_address(payload.page_io[i]), + table_length); + table_length = 0; + } + i++; + } + } + (*metadata)->verity_table[table_length] = '\0'; + + DMINFO("verity_table: %s", (*metadata)->verity_table); + goto free_payload; + +free_metadata: + kfree(*metadata); +free_header: + kfree(header); +free_payload: + for (i = 0; i < payload.number_of_pages; i++) + if (payload.page_io[i]) + __free_page(payload.page_io[i]); + kfree(payload.page_io); +blkdev_release: + blkdev_put(bdev, FMODE_READ); + return err; +} + +/* helper functions to extract properties from dts */ +const char *find_dt_value(const char *name) +{ + struct device_node *firmware; + const char *value; + + firmware = of_find_node_by_path("/firmware/android"); + if (!firmware) + return NULL; + value = of_get_property(firmware, name, NULL); + of_node_put(firmware); + + return value; +} + +static int verity_mode(void) +{ + static const char enforcing[] = "enforcing"; + static const char verified_mode_prop[] = "veritymode"; + const char *value; + + value = find_dt_value(verified_mode_prop); + if (!value) + value = veritymode; + if (!strncmp(value, enforcing, sizeof(enforcing) - 1)) + return DM_VERITY_MODE_RESTART; + + return DM_VERITY_MODE_EIO; +} + +static int verify_verity_signature(char *key_id, + struct android_metadata *metadata) +{ + key_ref_t key_ref; + struct key *key; + struct public_key_signature *pks = NULL; + int retval = -EINVAL; + + key_ref = keyring_search(make_key_ref(system_trusted_keyring, 1), + &key_type_asymmetric, key_id); + + if (IS_ERR(key_ref)) { + DMERR("keyring: key not found"); + return -ENOKEY; + } + + key = key_ref_to_ptr(key_ref); + + pks = table_make_digest(HASH_ALGO_SHA256, + (const void *)metadata->verity_table, + le32_to_cpu(metadata->header->table_length)); + + if (IS_ERR(pks)) { + DMERR("hashing failed"); + goto error; + } + + retval = table_extract_mpi_array(pks, &metadata->header->signature[0], + RSANUMBYTES); + if (retval < 0) { + DMERR("Error extracting mpi %d", retval); + goto error; + } + + retval = verify_signature(key, pks); + mpi_free(pks->rsa.s); +error: + kfree(pks); + key_put(key); + + return retval; +} + +static void handle_error(void) +{ + int mode = verity_mode(); + if (mode == DM_VERITY_MODE_RESTART) { + DMERR("triggering restart"); + kernel_restart("dm-verity device corrupted"); + } else { + DMERR("Mounting verity root failed"); + } +} + +static inline bool test_mult_overflow(sector_t a, u32 b) +{ + sector_t r = (sector_t)~0ULL; + + sector_div(r, b); + return a > r; +} + +static int add_as_linear_device(struct dm_target *ti, char *dev) +{ + /*Move to linear mapping defines*/ + char *linear_table_args[DM_LINEAR_ARGS] = {dev, + DM_LINEAR_TARGET_OFFSET}; + int err = 0; + + android_verity_target.dtr = dm_linear_dtr, + android_verity_target.map = dm_linear_map, + android_verity_target.status = dm_linear_status, + android_verity_target.prepare_ioctl = dm_linear_prepare_ioctl, + android_verity_target.iterate_devices = dm_linear_iterate_devices, + android_verity_target.io_hints = NULL; + + err = dm_linear_ctr(ti, DM_LINEAR_ARGS, linear_table_args); + + if (!err) { + DMINFO("Added android-verity as a linear target"); + target_added = true; + } else + DMERR("Failed to add android-verity as linear target"); + + return err; +} + +/* + * Target parameters: + * <key id> Key id of the public key in the system keyring. + * Verity metadata's signature would be verified against + * this. If the key id contains spaces, replace them + * with '#'. + * <block device> The block device for which dm-verity is being setup. + */ +static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv) +{ + dev_t uninitialized_var(dev); + struct android_metadata *metadata = NULL; + int err = 0, i, mode; + char *key_id, *table_ptr, dummy, *target_device, + *verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS]; + /* One for specifying number of opt args and one for mode */ + sector_t data_sectors; + u32 data_block_size; + unsigned int no_of_args = VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS; + struct fec_header uninitialized_var(fec); + struct fec_ecc_metadata uninitialized_var(ecc); + char buf[FEC_ARG_LENGTH], *buf_ptr; + unsigned long long tmpll; + u64 uninitialized_var(device_size); + + if (argc == 1) { + /* Use the default keyid */ + if (default_verity_key_id()) + key_id = veritykeyid; + else if (!is_eng()) { + DMERR("veritykeyid= is not set"); + handle_error(); + return -EINVAL; + } + } else if (argc == 2) + key_id = argv[1]; + else { + DMERR("Incorrect number of arguments"); + handle_error(); + return -EINVAL; + } + + target_device = argv[0]; + + dev = name_to_dev_t(target_device); + if (!dev) { + DMERR("no dev found for %s", target_device); + handle_error(); + return -EINVAL; + } + + if (is_eng()) { + err = find_size(dev, &device_size); + if (err) { + DMERR("error finding bdev size"); + handle_error(); + return err; + } + + ti->len = device_size; + err = add_as_linear_device(ti, target_device); + if (err) { + handle_error(); + return err; + } + verity_enabled = false; + return 0; + } + + strreplace(key_id, '#', ' '); + + DMINFO("key:%s dev:%s", key_id, target_device); + + if (extract_fec_header(dev, &fec, &ecc)) { + DMERR("Error while extracting fec header"); + handle_error(); + return -EINVAL; + } + + err = extract_metadata(dev, &fec, &metadata, &verity_enabled); + + if (err) { + DMERR("Error while extracting metadata"); + handle_error(); + goto free_metadata; + } + + if (verity_enabled) { + err = verify_verity_signature(key_id, metadata); + + if (err) { + DMERR("Signature verification failed"); + handle_error(); + goto free_metadata; + } else + DMINFO("Signature verification success"); + } + + table_ptr = metadata->verity_table; + + for (i = 0; i < VERITY_TABLE_ARGS; i++) { + verity_table_args[i] = strsep(&table_ptr, " "); + if (verity_table_args[i] == NULL) + break; + } + + if (i != VERITY_TABLE_ARGS) { + DMERR("Verity table not in the expected format"); + err = -EINVAL; + handle_error(); + goto free_metadata; + } + + if (sscanf(verity_table_args[5], "%llu%c", &tmpll, &dummy) + != 1) { + DMERR("Verity table not in the expected format"); + handle_error(); + err = -EINVAL; + goto free_metadata; + } + + if (tmpll > ULONG_MAX) { + DMERR("<num_data_blocks> too large. Forgot to turn on CONFIG_LBDAF?"); + handle_error(); + err = -EINVAL; + goto free_metadata; + } + + data_sectors = tmpll; + + if (sscanf(verity_table_args[3], "%u%c", &data_block_size, &dummy) + != 1) { + DMERR("Verity table not in the expected format"); + handle_error(); + err = -EINVAL; + goto free_metadata; + } + + if (test_mult_overflow(data_sectors, data_block_size >> + SECTOR_SHIFT)) { + DMERR("data_sectors too large"); + handle_error(); + err = -EOVERFLOW; + goto free_metadata; + } + + data_sectors *= data_block_size >> SECTOR_SHIFT; + DMINFO("Data sectors %llu", (unsigned long long)data_sectors); + + /* update target length */ + ti->len = data_sectors; + + /* Setup linear target and free */ + if (!verity_enabled) { + err = add_as_linear_device(ti, target_device); + goto free_metadata; + } + + /*substitute data_dev and hash_dev*/ + verity_table_args[1] = target_device; + verity_table_args[2] = target_device; + + mode = verity_mode(); + + if (ecc.valid && IS_BUILTIN(CONFIG_DM_VERITY_FEC)) { + if (mode) { + err = snprintf(buf, FEC_ARG_LENGTH, + "%u %s " VERITY_TABLE_OPT_FEC_FORMAT, + 1 + VERITY_TABLE_OPT_FEC_ARGS, + mode == DM_VERITY_MODE_RESTART ? + VERITY_TABLE_OPT_RESTART : + VERITY_TABLE_OPT_LOGGING, + target_device, + ecc.start / FEC_BLOCK_SIZE, ecc.blocks, + ecc.roots); + } else { + err = snprintf(buf, FEC_ARG_LENGTH, + "%u " VERITY_TABLE_OPT_FEC_FORMAT, + VERITY_TABLE_OPT_FEC_ARGS, target_device, + ecc.start / FEC_BLOCK_SIZE, ecc.blocks, + ecc.roots); + } + } else if (mode) { + err = snprintf(buf, FEC_ARG_LENGTH, + "2 " VERITY_TABLE_OPT_IGNZERO " %s", + mode == DM_VERITY_MODE_RESTART ? + VERITY_TABLE_OPT_RESTART : VERITY_TABLE_OPT_LOGGING); + } else { + err = snprintf(buf, FEC_ARG_LENGTH, "1 %s", + "ignore_zero_blocks"); + } + + if (err < 0 || err >= FEC_ARG_LENGTH) + goto free_metadata; + + buf_ptr = buf; + + for (i = VERITY_TABLE_ARGS; i < (VERITY_TABLE_ARGS + + VERITY_TABLE_OPT_FEC_ARGS + 2); i++) { + verity_table_args[i] = strsep(&buf_ptr, " "); + if (verity_table_args[i] == NULL) { + no_of_args = i; + break; + } + } + + err = verity_ctr(ti, no_of_args, verity_table_args); + + if (err) + DMERR("android-verity failed to mount as verity target"); + else { + target_added = true; + DMINFO("android-verity mounted as verity target"); + } + +free_metadata: + if (metadata) { + kfree(metadata->header); + kfree(metadata->verity_table); + } + kfree(metadata); + return err; +} + +static int __init dm_android_verity_init(void) +{ + int r; + struct dentry *file; + + r = dm_register_target(&android_verity_target); + if (r < 0) + DMERR("register failed %d", r); + + /* Tracks the status of the last added target */ + debug_dir = debugfs_create_dir("android_verity", NULL); + + if (IS_ERR_OR_NULL(debug_dir)) { + DMERR("Cannot create android_verity debugfs directory: %ld", + PTR_ERR(debug_dir)); + goto end; + } + + file = debugfs_create_bool("target_added", S_IRUGO, debug_dir, + &target_added); + + if (IS_ERR_OR_NULL(file)) { + DMERR("Cannot create android_verity debugfs directory: %ld", + PTR_ERR(debug_dir)); + debugfs_remove_recursive(debug_dir); + goto end; + } + + file = debugfs_create_bool("verity_enabled", S_IRUGO, debug_dir, + &verity_enabled); + + if (IS_ERR_OR_NULL(file)) { + DMERR("Cannot create android_verity debugfs directory: %ld", + PTR_ERR(debug_dir)); + debugfs_remove_recursive(debug_dir); + } + +end: + return r; +} + +static void __exit dm_android_verity_exit(void) +{ + if (!IS_ERR_OR_NULL(debug_dir)) + debugfs_remove_recursive(debug_dir); + + dm_unregister_target(&android_verity_target); +} + +module_init(dm_android_verity_init); +module_exit(dm_android_verity_exit); diff --git a/drivers/md/dm-android-verity.h b/drivers/md/dm-android-verity.h new file mode 100644 index 000000000000..0fcd54aaf5f6 --- /dev/null +++ b/drivers/md/dm-android-verity.h @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef DM_ANDROID_VERITY_H +#define DM_ANDROID_VERITY_H + +#include <crypto/sha.h> + +#define RSANUMBYTES 256 +#define VERITY_METADATA_MAGIC_NUMBER 0xb001b001 +#define VERITY_METADATA_MAGIC_DISABLE 0x46464f56 +#define VERITY_METADATA_VERSION 0 +#define VERITY_STATE_DISABLE 1 +#define DATA_BLOCK_SIZE (4 * 1024) +#define VERITY_METADATA_SIZE (8 * DATA_BLOCK_SIZE) +#define VERITY_TABLE_ARGS 10 +#define VERITY_COMMANDLINE_PARAM_LENGTH 20 +#define BUILD_VARIANT 20 + +/* + * <subject>:<sha1-id> is the format for the identifier. + * subject can either be the Common Name(CN) + Organization Name(O) or + * just the CN if the it is prefixed with O + * From https://tools.ietf.org/html/rfc5280#appendix-A + * ub-organization-name-length INTEGER ::= 64 + * ub-common-name-length INTEGER ::= 64 + * + * http://lxr.free-electrons.com/source/crypto/asymmetric_keys/x509_cert_parser.c?v=3.9#L278 + * ctx->o_size + 2 + ctx->cn_size + 1 + * + 41 characters for ":" and sha1 id + * 64 + 2 + 64 + 1 + 1 + 40 (172) + * setting VERITY_DEFAULT_KEY_ID_LENGTH to 200 characters. + */ +#define VERITY_DEFAULT_KEY_ID_LENGTH 200 + +#define FEC_MAGIC 0xFECFECFE +#define FEC_BLOCK_SIZE (4 * 1024) +#define FEC_VERSION 0 +#define FEC_RSM 255 +#define FEC_ARG_LENGTH 300 + +#define VERITY_TABLE_OPT_RESTART "restart_on_corruption" +#define VERITY_TABLE_OPT_LOGGING "ignore_corruption" +#define VERITY_TABLE_OPT_IGNZERO "ignore_zero_blocks" + +#define VERITY_TABLE_OPT_FEC_FORMAT \ + "use_fec_from_device %s fec_start %llu fec_blocks %llu fec_roots %u ignore_zero_blocks" +#define VERITY_TABLE_OPT_FEC_ARGS 9 + +#define VERITY_DEBUG 0 + +#define DM_MSG_PREFIX "android-verity" + +#define DM_LINEAR_ARGS 2 +#define DM_LINEAR_TARGET_OFFSET "0" + +/* + * There can be two formats. + * if fec is present + * <data_blocks> <verity_tree> <verity_metdata_32K><fec_data><fec_data_4K> + * if fec is not present + * <data_blocks> <verity_tree> <verity_metdata_32K> + */ +/* TODO: rearrange structure to reduce memory holes + * depends on userspace change. + */ +struct fec_header { + __le32 magic; + __le32 version; + __le32 size; + __le32 roots; + __le32 fec_size; + __le64 inp_size; + u8 hash[SHA256_DIGEST_SIZE]; +}; + +struct android_metadata_header { + __le32 magic_number; + __le32 protocol_version; + char signature[RSANUMBYTES]; + __le32 table_length; +}; + +struct android_metadata { + struct android_metadata_header *header; + char *verity_table; +}; + +struct fec_ecc_metadata { + bool valid; + u32 roots; + u64 blocks; + u64 rounds; + u64 start; +}; + +struct bio_read { + struct page **page_io; + int number_of_pages; +}; + +extern struct target_type linear_target; + +extern void dm_linear_dtr(struct dm_target *ti); +extern int dm_linear_map(struct dm_target *ti, struct bio *bio); +extern void dm_linear_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen); +extern int dm_linear_prepare_ioctl(struct dm_target *ti, + struct block_device **bdev, fmode_t *mode); +extern int dm_linear_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data); +extern int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv); +#endif /* DM_ANDROID_VERITY_H */ diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 80a439543259..bc5e9a5b1f30 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1923,6 +1923,45 @@ void dm_interface_exit(void) dm_hash_exit(); } + +/** + * dm_ioctl_export - Permanently export a mapped device via the ioctl interface + * @md: Pointer to mapped_device + * @name: Buffer (size DM_NAME_LEN) for name + * @uuid: Buffer (size DM_UUID_LEN) for uuid or NULL if not desired + */ +int dm_ioctl_export(struct mapped_device *md, const char *name, + const char *uuid) +{ + int r = 0; + struct hash_cell *hc; + + if (!md) { + r = -ENXIO; + goto out; + } + + /* The name and uuid can only be set once. */ + mutex_lock(&dm_hash_cells_mutex); + hc = dm_get_mdptr(md); + mutex_unlock(&dm_hash_cells_mutex); + if (hc) { + DMERR("%s: already exported", dm_device_name(md)); + r = -ENXIO; + goto out; + } + + r = dm_hash_insert(name, uuid, md); + if (r) { + DMERR("%s: could not bind to '%s'", dm_device_name(md), name); + goto out; + } + + /* Let udev know we've changed. */ + dm_kobject_uevent(md, KOBJ_CHANGE, dm_get_event_nr(md)); +out: + return r; +} /** * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers * @md: Pointer to mapped_device diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 05c35aacb3aa..8505a771de42 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -25,7 +25,7 @@ struct linear_c { /* * Construct a linear mapping: <dev_path> <offset> */ -static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) +int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct linear_c *lc; unsigned long long tmp; @@ -67,7 +67,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) return ret; } -static void linear_dtr(struct dm_target *ti) +void dm_linear_dtr(struct dm_target *ti) { struct linear_c *lc = (struct linear_c *) ti->private; @@ -92,14 +92,14 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio) linear_map_sector(ti, bio->bi_iter.bi_sector); } -static int linear_map(struct dm_target *ti, struct bio *bio) +int dm_linear_map(struct dm_target *ti, struct bio *bio) { linear_map_bio(ti, bio); return DM_MAPIO_REMAPPED; } -static void linear_status(struct dm_target *ti, status_type_t type, +void dm_linear_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { struct linear_c *lc = (struct linear_c *) ti->private; @@ -116,7 +116,7 @@ static void linear_status(struct dm_target *ti, status_type_t type, } } -static int linear_prepare_ioctl(struct dm_target *ti, +int dm_linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, fmode_t *mode) { struct linear_c *lc = (struct linear_c *) ti->private; @@ -133,7 +133,7 @@ static int linear_prepare_ioctl(struct dm_target *ti, return 0; } -static int linear_iterate_devices(struct dm_target *ti, +int dm_linear_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct linear_c *lc = ti->private; @@ -145,12 +145,12 @@ static struct target_type linear_target = { .name = "linear", .version = {1, 2, 1}, .module = THIS_MODULE, - .ctr = linear_ctr, - .dtr = linear_dtr, - .map = linear_map, - .status = linear_status, - .prepare_ioctl = linear_prepare_ioctl, - .iterate_devices = linear_iterate_devices, + .ctr = dm_linear_ctr, + .dtr = dm_linear_dtr, + .map = dm_linear_map, + .status = dm_linear_status, + .prepare_ioctl = dm_linear_prepare_ioctl, + .iterate_devices = dm_linear_iterate_devices, }; int __init dm_linear_init(void) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index cb5d0daf53bb..b3d78bba3a79 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -11,6 +11,7 @@ #include <linux/vmalloc.h> #include <linux/blkdev.h> #include <linux/namei.h> +#include <linux/mount.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/slab.h> diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index ad10d6d8ed28..1dd667b97530 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -442,6 +442,13 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, if (!verity_fec_is_enabled(v)) return -EOPNOTSUPP; + if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) { + DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name); + return -EIO; + } + + fio->level++; + if (type == DM_VERITY_BLOCK_TYPE_METADATA) block += v->data_blocks; @@ -456,9 +463,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, */ offset = block << v->data_dev_block_bits; - - res = offset; - div64_u64(res, v->fec->rounds << v->data_dev_block_bits); + res = div64_u64(offset, v->fec->rounds << v->data_dev_block_bits); /* * The base RS block we can feed to the interleaver to find out all @@ -475,7 +480,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, if (r < 0) { r = fec_decode_rsb(v, io, fio, rsb, offset, true); if (r < 0) - return r; + goto done; } if (dest) @@ -485,6 +490,8 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, r = verity_for_bv_block(v, io, iter, fec_bv_copy); } +done: + fio->level--; return r; } @@ -525,6 +532,7 @@ void verity_fec_init_io(struct dm_verity_io *io) memset(fio->bufs, 0, sizeof(fio->bufs)); fio->nbufs = 0; fio->output = NULL; + fio->level = 0; } /* @@ -680,7 +688,8 @@ static struct attribute *fec_attrs[] = { static struct kobj_type fec_ktype = { .sysfs_ops = &kobj_sysfs_ops, - .default_attrs = fec_attrs + .default_attrs = fec_attrs, + .release = dm_kobject_release }; /* diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h index 8c4bee052a73..b8e21cef3ad1 100644 --- a/drivers/md/dm-verity-fec.h +++ b/drivers/md/dm-verity-fec.h @@ -28,6 +28,9 @@ #define DM_VERITY_FEC_BUF_MAX \ (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS)) +/* maximum recursion level for verity_fec_decode */ +#define DM_VERITY_FEC_MAX_RECURSION 4 + #define DM_VERITY_OPT_FEC_DEV "use_fec_from_device" #define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks" #define DM_VERITY_OPT_FEC_START "fec_start" @@ -61,6 +64,7 @@ struct dm_verity_fec_io { unsigned nbufs; /* number of buffers allocated */ u8 *output; /* buffer for corrected output */ size_t output_pos; + unsigned level; /* recursion level */ }; #ifdef CONFIG_DM_VERITY_FEC diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 5c5d30cb6ec5..5214ed2c7507 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -551,7 +551,7 @@ static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io) * Bio map function. It allocates dm_verity_io structure and bio vector and * fills them. Then it issues prefetches and the I/O. */ -static int verity_map(struct dm_target *ti, struct bio *bio) +int verity_map(struct dm_target *ti, struct bio *bio) { struct dm_verity *v = ti->private; struct dm_verity_io *io; @@ -596,7 +596,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio) /* * Status: V (valid) or C (corruption found) */ -static void verity_status(struct dm_target *ti, status_type_t type, +void verity_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { struct dm_verity *v = ti->private; @@ -656,7 +656,7 @@ static void verity_status(struct dm_target *ti, status_type_t type, } } -static int verity_prepare_ioctl(struct dm_target *ti, +int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, fmode_t *mode) { struct dm_verity *v = ti->private; @@ -669,7 +669,7 @@ static int verity_prepare_ioctl(struct dm_target *ti, return 0; } -static int verity_iterate_devices(struct dm_target *ti, +int verity_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct dm_verity *v = ti->private; @@ -677,7 +677,7 @@ static int verity_iterate_devices(struct dm_target *ti, return fn(ti, v->data_dev, v->data_start, ti->len, data); } -static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) +void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct dm_verity *v = ti->private; @@ -690,7 +690,7 @@ static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) blk_limits_io_min(limits, limits->logical_block_size); } -static void verity_dtr(struct dm_target *ti) +void verity_dtr(struct dm_target *ti) { struct dm_verity *v = ti->private; @@ -817,7 +817,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v) * <digest> * <salt> Hex string or "-" if no salt. */ -static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) +int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) { struct dm_verity *v; struct dm_arg_set as; diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index fb419f422d73..75effca400a3 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -126,4 +126,14 @@ extern int verity_hash(struct dm_verity *v, struct shash_desc *desc, extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, sector_t block, u8 *digest, bool *is_zero); +extern void verity_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen); +extern int verity_prepare_ioctl(struct dm_target *ti, + struct block_device **bdev, fmode_t *mode); +extern int verity_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data); +extern void verity_io_hints(struct dm_target *ti, struct queue_limits *limits); +extern void verity_dtr(struct dm_target *ti); +extern int verity_ctr(struct dm_target *ti, unsigned argc, char **argv); +extern int verity_map(struct dm_target *ti, struct bio *bio); #endif /* DM_VERITY_H */ diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c index 9e68af87b86c..1529e2aa740c 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c @@ -105,6 +105,9 @@ static void msm_vfe40_config_irq(struct vfe_device *vfe_dev, case MSM_ISP_IRQ_ENABLE: vfe_dev->irq0_mask |= irq0_mask; vfe_dev->irq1_mask |= irq1_mask; + msm_camera_io_w(irq0_mask, vfe_dev->vfe_base + 0x30); + msm_camera_io_w(irq0_mask, vfe_dev->vfe_base + 0x34); + msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24); break; case MSM_ISP_IRQ_DISABLE: vfe_dev->irq0_mask &= ~irq0_mask; @@ -113,6 +116,9 @@ static void msm_vfe40_config_irq(struct vfe_device *vfe_dev, case MSM_ISP_IRQ_SET: vfe_dev->irq0_mask = irq0_mask; vfe_dev->irq1_mask = irq1_mask; + msm_camera_io_w(irq0_mask, vfe_dev->vfe_base + 0x30); + msm_camera_io_w(irq0_mask, vfe_dev->vfe_base + 0x34); + msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24); } msm_camera_io_w_mb(vfe_dev->irq0_mask, vfe_dev->vfe_base + 0x28); msm_camera_io_w_mb(vfe_dev->irq1_mask, vfe_dev->vfe_base + 0x2C); @@ -329,13 +335,6 @@ static void msm_vfe40_init_hardware_reg(struct vfe_device *vfe_dev) msm_camera_io_w(0x10000001, vfe_dev->vfe_base + 0x50); msm_vfe40_config_irq(vfe_dev, 0x800000E0, 0xFEFFFF7E, MSM_ISP_IRQ_ENABLE); - msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30); - msm_camera_io_w_mb(0xFEFFFFFF, vfe_dev->vfe_base + 0x34); - msm_camera_io_w(1, vfe_dev->vfe_base + 0x24); - - msm_camera_io_w(0, vfe_dev->vfe_base + 0x30); - msm_camera_io_w_mb(0, vfe_dev->vfe_base + 0x34); - msm_camera_io_w(1, vfe_dev->vfe_base + 0x24); } static void msm_vfe40_clear_status_reg(struct vfe_device *vfe_dev) @@ -1742,10 +1741,6 @@ static int msm_vfe40_axi_halt(struct vfe_device *vfe_dev, /* Keep only halt and restart mask */ msm_vfe40_config_irq(vfe_dev, (1 << 31), (1 << 8), MSM_ISP_IRQ_SET); - /*Clear IRQ Status */ - msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x30); - msm_camera_io_w(0xFEFFFEFF, vfe_dev->vfe_base + 0x34); - msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24); msm_isp_get_timestamp(&ts, vfe_dev); /* if any stream is waiting for update, signal complete */ @@ -1777,12 +1772,8 @@ static int msm_vfe40_axi_halt(struct vfe_device *vfe_dev, static void msm_vfe40_axi_restart(struct vfe_device *vfe_dev, uint32_t blocking, uint32_t enable_camif) { - msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask, - MSM_ISP_IRQ_SET); - /* Clear IRQ Status */ - msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x30); - msm_camera_io_w(0xFEFFFEFF, vfe_dev->vfe_base + 0x34); - msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24); + msm_vfe40_config_irq(vfe_dev, 0x800000E0, 0xFEFFFF7E, + MSM_ISP_IRQ_ENABLE); msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x318); /* Start AXI */ diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c index fb4f7a1dcc92..a9940927d426 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c @@ -74,6 +74,9 @@ static void msm_vfe44_config_irq(struct vfe_device *vfe_dev, case MSM_ISP_IRQ_ENABLE: vfe_dev->irq0_mask |= irq0_mask; vfe_dev->irq1_mask |= irq1_mask; + msm_camera_io_w(irq0_mask, vfe_dev->vfe_base + 0x30); + msm_camera_io_w(irq1_mask, vfe_dev->vfe_base + 0x34); + msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24); break; case MSM_ISP_IRQ_DISABLE: vfe_dev->irq0_mask &= ~irq0_mask; @@ -82,6 +85,9 @@ static void msm_vfe44_config_irq(struct vfe_device *vfe_dev, case MSM_ISP_IRQ_SET: vfe_dev->irq0_mask = irq0_mask; vfe_dev->irq1_mask = irq1_mask; + msm_camera_io_w(irq0_mask, vfe_dev->vfe_base + 0x30); + msm_camera_io_w(irq1_mask, vfe_dev->vfe_base + 0x34); + msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24); break; } msm_camera_io_w_mb(irq0_mask, vfe_dev->vfe_base + 0x28); @@ -175,9 +181,6 @@ static void msm_vfe44_init_hardware_reg(struct vfe_device *vfe_dev) msm_camera_io_w(0x10000001, vfe_dev->vfe_base + 0x50); msm_vfe44_config_irq(vfe_dev, 0x800000E0, 0xFFFFFF7E, MSM_ISP_IRQ_ENABLE); - msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30); - msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34); - msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x24); } @@ -1349,15 +1352,6 @@ static int msm_vfe44_axi_halt(struct vfe_device *vfe_dev, msm_vfe44_config_irq(vfe_dev, (1 << 31), (1 << 8), MSM_ISP_IRQ_SET); - /*Clear IRQ Status0, only leave reset irq mask*/ - msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x30); - - /*Clear IRQ Status1, only leave halt irq mask*/ - msm_camera_io_w(0xFEFFFEFF, vfe_dev->vfe_base + 0x34); - - /*push clear cmd*/ - msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24); - if (atomic_read(&vfe_dev->error_info.overflow_state) == OVERFLOW_DETECTED) pr_err_ratelimited("%s: VFE%d halt for recovery, blocking %d\n", @@ -1393,11 +1387,8 @@ static int msm_vfe44_axi_halt(struct vfe_device *vfe_dev, static void msm_vfe44_axi_restart(struct vfe_device *vfe_dev, uint32_t blocking, uint32_t enable_camif) { - msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask, - MSM_ISP_IRQ_SET); - msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x30); - msm_camera_io_w(0xFEFFFEFF, vfe_dev->vfe_base + 0x34); - msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24); + msm_vfe44_config_irq(vfe_dev, 0x800000E0, 0xFFFFFF7E, + MSM_ISP_IRQ_ENABLE); msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x318); /* Start AXI */ diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c index d45b6ff0a7d0..d239c6069ad9 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c @@ -96,6 +96,9 @@ static void msm_vfe46_config_irq(struct vfe_device *vfe_dev, case MSM_ISP_IRQ_ENABLE: vfe_dev->irq0_mask |= irq0_mask; vfe_dev->irq1_mask |= irq1_mask; + msm_camera_io_w(irq0_mask, vfe_dev->vfe_base + 0x64); + msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x68); + msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58); break; case MSM_ISP_IRQ_DISABLE: vfe_dev->irq0_mask &= ~irq0_mask; @@ -104,6 +107,9 @@ static void msm_vfe46_config_irq(struct vfe_device *vfe_dev, case MSM_ISP_IRQ_SET: vfe_dev->irq0_mask = irq0_mask; vfe_dev->irq1_mask = irq1_mask; + msm_camera_io_w(irq1_mask, vfe_dev->vfe_base + 0x64); + msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x68); + msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58); break; } msm_camera_io_w_mb(vfe_dev->irq0_mask, @@ -204,9 +210,6 @@ static void msm_vfe46_init_hardware_reg(struct vfe_device *vfe_dev) /* IRQ_MASK/CLEAR */ msm_vfe46_config_irq(vfe_dev, 0x810000E0, 0xFFFFFF7E, MSM_ISP_IRQ_ENABLE); - msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64); - msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68); - msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58); } static void msm_vfe46_clear_status_reg(struct vfe_device *vfe_dev) @@ -1159,11 +1162,6 @@ static void msm_vfe46_update_camif_state(struct vfe_device *vfe_dev, /* testgen OFF*/ if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN) msm_camera_io_w(1 << 1, vfe_dev->vfe_base + 0xAF4); - msm_camera_io_w(0, vfe_dev->vfe_base + 0x64); - msm_camera_io_w((1 << 0), vfe_dev->vfe_base + 0x68); - msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x58); - msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, - vfe_dev->irq1_mask, MSM_ISP_IRQ_SET); } } @@ -1436,15 +1434,6 @@ static int msm_vfe46_axi_halt(struct vfe_device *vfe_dev, msm_vfe46_config_irq(vfe_dev, (1 << 31), (1 << 8), MSM_ISP_IRQ_SET); - /*Clear IRQ Status0, only leave reset irq mask*/ - msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x64); - - /*Clear IRQ Status1, only leave halt irq mask*/ - msm_camera_io_w(0xFFFFFEFF, vfe_dev->vfe_base + 0x68); - - /*push clear cmd*/ - msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58); - if (atomic_read(&vfe_dev->error_info.overflow_state) == OVERFLOW_DETECTED) pr_err_ratelimited("%s: VFE%d halt for recovery, blocking %d\n", @@ -1479,11 +1468,8 @@ static int msm_vfe46_axi_halt(struct vfe_device *vfe_dev, static void msm_vfe46_axi_restart(struct vfe_device *vfe_dev, uint32_t blocking, uint32_t enable_camif) { - msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask, - MSM_ISP_IRQ_SET); - msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x64); - msm_camera_io_w(0xFFFFFEFF, vfe_dev->vfe_base + 0x68); - msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58); + msm_vfe46_config_irq(vfe_dev, 0x810000E0, 0xFFFFFF7E, + MSM_ISP_IRQ_ENABLE); msm_camera_io_w_mb(0x20000, vfe_dev->vfe_base + 0x3CC); /* Start AXI */ diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c index 6d1ad8ef6804..c50c55a69fb5 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c @@ -156,6 +156,9 @@ void msm_vfe47_config_irq(struct vfe_device *vfe_dev, case MSM_ISP_IRQ_ENABLE: vfe_dev->irq0_mask |= irq0_mask; vfe_dev->irq1_mask |= irq1_mask; + msm_camera_io_w_mb(irq0_mask, vfe_dev->vfe_base + 0x64); + msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x68); + msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58); break; case MSM_ISP_IRQ_DISABLE: vfe_dev->irq0_mask &= ~irq0_mask; @@ -164,6 +167,9 @@ void msm_vfe47_config_irq(struct vfe_device *vfe_dev, case MSM_ISP_IRQ_SET: vfe_dev->irq0_mask = irq0_mask; vfe_dev->irq1_mask = irq1_mask; + msm_camera_io_w_mb(irq0_mask, vfe_dev->vfe_base + 0x64); + msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x68); + msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58); break; } msm_camera_io_w_mb(vfe_dev->irq0_mask, @@ -404,9 +410,6 @@ void msm_vfe47_init_hardware_reg(struct vfe_device *vfe_dev) /* IRQ_MASK/CLEAR */ vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, 0x810000E0, 0xFFFFFF7E, MSM_ISP_IRQ_ENABLE); - msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64); - msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68); - msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58); } void msm_vfe47_clear_status_reg(struct vfe_device *vfe_dev) @@ -792,6 +795,8 @@ void msm_vfe47_axi_cfg_comp_mask(struct vfe_device *vfe_dev, struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data; int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info); uint32_t comp_mask, comp_mask_index; + int i; + uint32_t overflow_mask = 0; comp_mask_index = stream_info->comp_mask_index[vfe_idx]; comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74); @@ -800,8 +805,11 @@ void msm_vfe47_axi_cfg_comp_mask(struct vfe_device *vfe_dev, stream_composite_mask << (comp_mask_index * 8)); msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74); + for (i = 0; i < stream_info->num_planes; i++) + overflow_mask |= (1 << (stream_info->wm[vfe_idx][i] + 9)); + vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, - 1 << (comp_mask_index + 25), 0, + 1 << (comp_mask_index + 25), overflow_mask, MSM_ISP_IRQ_ENABLE); } @@ -827,7 +835,8 @@ void msm_vfe47_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev, int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info); vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, - 1 << (stream_info->wm[vfe_idx][0] + 8), 0, + 1 << (stream_info->wm[vfe_idx][0] + 8), + 1 << (stream_info->wm[vfe_idx][0] + 9), MSM_ISP_IRQ_ENABLE); } @@ -1309,7 +1318,7 @@ void msm_vfe47_cfg_camif(struct vfe_device *vfe_dev, msm_camera_io_w( subsample_cfg->first_line << 16 | subsample_cfg->last_line, - vfe_dev->vfe_base + 0xCE4); + vfe_dev->vfe_base + 0xCE8); val = msm_camera_io_r( vfe_dev->vfe_base + 0x47C); ISP_DBG("%s: camif raw crop enabled\n", __func__); @@ -1417,11 +1426,8 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev, val = msm_camera_io_r(vfe_dev->vfe_base + 0x47C); if (update_state == ENABLE_CAMIF) { - msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x64); - msm_camera_io_w(0x81, vfe_dev->vfe_base + 0x68); - msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58); vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, - 0x15, 0x81, + 0x15, 0x91, MSM_ISP_IRQ_ENABLE); if ((vfe_dev->hvx_cmd > HVX_DISABLE) && @@ -1452,8 +1458,8 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev, /* For testgen always halt on camif boundary */ if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN) update_state = DISABLE_CAMIF; - /* turn off camif violation and error irqs */ - vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, 0, 0x81, + /* turn off camif, violation and write master overwrite irq */ + vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, 0, 0x91, MSM_ISP_IRQ_DISABLE); val = msm_camera_io_r(vfe_dev->vfe_base + 0x464); /* disable danger signal */ @@ -1471,16 +1477,6 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev, if ((vfe_dev->hvx_cmd > HVX_DISABLE) && (vfe_dev->hvx_cmd <= HVX_ROUND_TRIP)) msm_vfe47_configure_hvx(vfe_dev, 0); - /* - * restore the irq that were disabled for camif stop and clear - * the camif error interrupts if generated during that period - */ - msm_camera_io_w(0, vfe_dev->vfe_base + 0x64); - msm_camera_io_w(1 << 0, vfe_dev->vfe_base + 0x68); - msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x58); - vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, - vfe_dev->irq0_mask, - vfe_dev->irq1_mask, MSM_ISP_IRQ_SET); } } @@ -1768,16 +1764,6 @@ int msm_vfe47_axi_halt(struct vfe_device *vfe_dev, (1 << 31), (1 << 8), MSM_ISP_IRQ_SET); - /*Clear IRQ Status0, only leave reset irq mask*/ - msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x64); - - /*Clear IRQ Status1, only leave halt irq mask*/ - msm_camera_io_w(0xFFFFFEFF, vfe_dev->vfe_base + 0x68); - - /*push clear cmd*/ - msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58); - - if (atomic_read(&vfe_dev->error_info.overflow_state) == OVERFLOW_DETECTED) pr_err_ratelimited("%s: VFE%d halt for recovery, blocking %d\n", @@ -1815,12 +1801,7 @@ void msm_vfe47_axi_restart(struct vfe_device *vfe_dev, uint32_t blocking, uint32_t enable_camif) { vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, - vfe_dev->irq0_mask, vfe_dev->irq1_mask, - MSM_ISP_IRQ_SET); - msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x64); - msm_camera_io_w(0xFFFFFEFF, vfe_dev->vfe_base + 0x68); - msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58); - + 0x810000E0, 0xFFFFFF7E, MSM_ISP_IRQ_ENABLE); /* Start AXI */ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x400); @@ -1832,6 +1813,8 @@ void msm_vfe47_axi_restart(struct vfe_device *vfe_dev, vfe_dev->hw_info->vfe_ops.core_ops. update_camif_state(vfe_dev, ENABLE_CAMIF); } + vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, + 0x810000E0, 0xFFFFFF7E, MSM_ISP_IRQ_ENABLE); } uint32_t msm_vfe47_get_wm_mask( @@ -1957,40 +1940,40 @@ void msm_vfe47_stats_cfg_wm_irq_mask( switch (STATS_IDX(stream_info->stream_handle[vfe_idx])) { case STATS_COMP_IDX_AEC_BG: vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, - 1 << 15, 0, MSM_ISP_IRQ_ENABLE); + 1 << 15, 1 << 24, MSM_ISP_IRQ_ENABLE); break; case STATS_COMP_IDX_HDR_BE: vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, - 1 << 16, 0, MSM_ISP_IRQ_ENABLE); + 1 << 16, 1 << 16, MSM_ISP_IRQ_ENABLE); break; case STATS_COMP_IDX_BG: vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, - 1 << 17, 0, MSM_ISP_IRQ_ENABLE); + 1 << 17, 1 << 17, MSM_ISP_IRQ_ENABLE); break; case STATS_COMP_IDX_BF: vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, - 1 << 18, 1 << 26, + 1 << 18, 1 << 26 | 1 << 18, MSM_ISP_IRQ_ENABLE); break; case STATS_COMP_IDX_HDR_BHIST: vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, - 1 << 19, 0, MSM_ISP_IRQ_ENABLE); + 1 << 19, 1 << 19, MSM_ISP_IRQ_ENABLE); break; case STATS_COMP_IDX_RS: vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, - 1 << 20, 0, MSM_ISP_IRQ_ENABLE); + 1 << 20, 1 << 20, MSM_ISP_IRQ_ENABLE); break; case STATS_COMP_IDX_CS: vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, - 1 << 21, 0, MSM_ISP_IRQ_ENABLE); + 1 << 21, 1 << 21, MSM_ISP_IRQ_ENABLE); break; case STATS_COMP_IDX_IHIST: vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, - 1 << 22, 0, MSM_ISP_IRQ_ENABLE); + 1 << 22, 1 << 22, MSM_ISP_IRQ_ENABLE); break; case STATS_COMP_IDX_BHIST: vfe_dev->hw_info->vfe_ops.irq_ops.config_irq(vfe_dev, - 1 << 23, 0, MSM_ISP_IRQ_ENABLE); + 1 << 23, 1 << 23, MSM_ISP_IRQ_ENABLE); break; default: pr_err("%s: Invalid stats idx %d\n", __func__, diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c index fbc2fee5a51d..fdc98436a105 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c @@ -14,6 +14,7 @@ #include <asm/div64.h> #include "msm_isp_util.h" #include "msm_isp_axi_util.h" +#include "msm_isp48.h" #define HANDLE_TO_IDX(handle) (handle & 0xFF) #define ISP_SOF_DEBUG_COUNT 0 @@ -2017,22 +2018,25 @@ static void msm_isp_input_disable(struct vfe_device *vfe_dev) if (i != VFE_PIX_0 || ext_read) continue; /* halt camif */ - if (total_stream_count == 0) + if (total_stream_count == 0) { vfe_dev->hw_info->vfe_ops.core_ops. update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY); - else + } else { vfe_dev->hw_info->vfe_ops.core_ops. update_camif_state(vfe_dev, DISABLE_CAMIF); + } } - /* halt and reset hardware if all streams are disabled */ if (total_stream_count == 0) { vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1); + msm_isp_flush_tasklet(vfe_dev); vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 1); - vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev); - + if (msm_vfe_is_vfe48(vfe_dev)) + vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, + 0, 1); } + } /** @@ -2719,6 +2723,20 @@ static void __msm_isp_stop_axi_streams(struct vfe_device *vfe_dev, for (i = 0; i < num_streams; i++) { stream_info = streams[i]; + msm_isp_update_intf_stream_cnt(stream_info, 0); + for (k = 0; k < stream_info->num_isp; k++) { + vfe_dev = stream_info->vfe_dev[k]; + update_vfes[vfe_dev->pdev->id] = vfe_dev; + } + } + for (k = 0; k < MAX_VFE; k++) { + if (!update_vfes[k]) + continue; + msm_isp_input_disable(update_vfes[k]); + } + + for (i = 0; i < num_streams; i++) { + stream_info = streams[i]; spin_lock_irqsave(&stream_info->lock, flags); /* * since we can get here from start axi stream error path due @@ -2743,12 +2761,10 @@ static void __msm_isp_stop_axi_streams(struct vfe_device *vfe_dev, else vfe_dev->hw_info->vfe_ops.axi_ops. clear_wm_irq_mask(vfe_dev, stream_info); - update_vfes[vfe_dev->pdev->id] = vfe_dev; } init_completion(&stream_info->inactive_comp); stream_info->state = STOP_PENDING; spin_unlock_irqrestore(&stream_info->lock, flags); - msm_isp_update_intf_stream_cnt(stream_info, 0); } for (k = 0; k < MAX_VFE; k++) { @@ -2843,7 +2859,6 @@ static void __msm_isp_stop_axi_streams(struct vfe_device *vfe_dev, if (!update_vfes[k]) continue; msm_isp_update_stream_bandwidth(update_vfes[k]); - msm_isp_input_disable(update_vfes[k]); } for (i = 0; i < num_streams; i++) { @@ -3777,8 +3792,9 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev, (~(pingpong_status >> stream_info->wm[vfe_idx][i]) & 0x1)) { spin_unlock_irqrestore(&stream_info->lock, flags); - pr_err("%s: Write master ping pong mismatch. Status: 0x%x\n", - __func__, pingpong_status); + pr_err("%s: Write master ping pong mismatch. Status: 0x%x %x\n", + __func__, pingpong_status, + stream_info->stream_src); msm_isp_halt_send_error(vfe_dev, ISP_EVENT_PING_PONG_MISMATCH); return; diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c index c9656e748f09..df95e5cb9b99 100644 --- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c +++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c @@ -1535,9 +1535,6 @@ static void msm_ispif_release(struct ispif_device *ispif) { BUG_ON(!ispif); - msm_ispif_reset(ispif); - msm_ispif_reset_hw(ispif); - msm_camera_enable_irq(ispif->irq, 0); ispif->ispif_state = ISPIF_POWER_DOWN; diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c index e0d6977b24a6..106d76aae3bb 100644 --- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c +++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c @@ -2344,21 +2344,19 @@ static int msm_cpp_cfg_frame(struct cpp_device *cpp_dev, return -EINVAL; } - if (!new_frame->partial_frame_indicator) { - if (cpp_frame_msg[new_frame->msg_len - 1] != - MSM_CPP_MSG_ID_TRAILER) { - pr_err("Invalid frame message\n"); - return -EINVAL; - } + if (cpp_frame_msg[new_frame->msg_len - 1] != + MSM_CPP_MSG_ID_TRAILER) { + pr_err("Invalid frame message\n"); + return -EINVAL; + } - if ((stripe_base + new_frame->num_strips * stripe_size + 1) != - new_frame->msg_len) { - pr_err("Invalid frame message,len=%d,expected=%d\n", - new_frame->msg_len, - (stripe_base + - new_frame->num_strips * stripe_size + 1)); - return -EINVAL; - } + if ((stripe_base + new_frame->num_strips * stripe_size + 1) != + new_frame->msg_len) { + pr_err("Invalid frame message,len=%d,expected=%d\n", + new_frame->msg_len, + (stripe_base + + new_frame->num_strips * stripe_size + 1)); + return -EINVAL; } if (cpp_dev->iommu_state != CPP_IOMMU_STATE_ATTACHED) { diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c index a18840b1a1a4..88a3b4b6f7ba 100644 --- a/drivers/media/platform/msm/vidc/hfi_response_handler.c +++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c @@ -112,6 +112,7 @@ static int hfi_process_sess_evt_seq_changed(u32 device_id, u8 *data_ptr; int prop_id; enum msm_vidc_pixel_depth luma_bit_depth, chroma_bit_depth; + struct hfi_colour_space *colour_info; if (sizeof(struct hfi_msg_event_notify_packet) > pkt->size) { dprintk(VIDC_ERR, @@ -205,6 +206,18 @@ static int hfi_process_sess_evt_seq_changed(u32 device_id, data_ptr += sizeof(struct hfi_pic_struct); break; + case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE: + data_ptr = data_ptr + sizeof(u32); + colour_info = + (struct hfi_colour_space *) data_ptr; + event_notify.colour_space = + colour_info->colour_space; + dprintk(VIDC_DBG, + "Colour space value is: %d\n", + colour_info->colour_space); + data_ptr += + sizeof(struct hfi_colour_space); + break; default: dprintk(VIDC_ERR, "%s cmd: %#x not supported\n", diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c index b12eeddc678f..93e32ef4ac35 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_vidc.c @@ -1172,6 +1172,7 @@ void *msm_vidc_open(int core_id, int session_type) inst->bit_depth = MSM_VIDC_BIT_DEPTH_8; inst->instant_bitrate = 0; inst->pic_struct = MSM_VIDC_PIC_STRUCT_PROGRESSIVE; + inst->colour_space = MSM_VIDC_BT601_6_525; for (i = SESSION_MSG_INDEX(SESSION_MSG_START); i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) { diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index d1cc08d53017..e612c6ed11c7 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -1200,29 +1200,46 @@ static void handle_event_change(enum hal_command_response cmd, void *data) * ptr[2] = flag to indicate bit depth or/and pic struct changed * ptr[3] = bit depth * ptr[4] = pic struct (progressive or interlaced) + * ptr[5] = colour space */ ptr = (u32 *)seq_changed_event.u.data; - ptr[2] = 0x0; - ptr[3] = inst->bit_depth; - ptr[4] = inst->pic_struct; - if (inst->bit_depth != event_notify->bit_depth) { - inst->bit_depth = event_notify->bit_depth; - ptr[2] |= V4L2_EVENT_BITDEPTH_FLAG; + if (ptr != NULL) { + ptr[2] = 0x0; ptr[3] = inst->bit_depth; - event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT; - dprintk(VIDC_DBG, - "V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to bit-depth change\n"); - } - - if (inst->pic_struct != event_notify->pic_struct) { - inst->pic_struct = event_notify->pic_struct; - event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT; - ptr[2] |= V4L2_EVENT_PICSTRUCT_FLAG; ptr[4] = inst->pic_struct; - dprintk(VIDC_DBG, - "V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to pic-struct change\n"); + ptr[5] = inst->colour_space; + + if (inst->bit_depth != event_notify->bit_depth) { + inst->bit_depth = event_notify->bit_depth; + ptr[2] |= V4L2_EVENT_BITDEPTH_FLAG; + ptr[3] = inst->bit_depth; + event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT; + dprintk(VIDC_DBG, + "V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to bit-depth change\n"); + } + + if (inst->pic_struct != event_notify->pic_struct) { + inst->pic_struct = event_notify->pic_struct; + event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT; + ptr[2] |= V4L2_EVENT_PICSTRUCT_FLAG; + ptr[4] = inst->pic_struct; + dprintk(VIDC_DBG, + "V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to pic-struct change\n"); + } + + if (inst->bit_depth == MSM_VIDC_BIT_DEPTH_10 + && inst->colour_space != + event_notify->colour_space) { + inst->colour_space = event_notify->colour_space; + event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT; + ptr[2] |= V4L2_EVENT_COLOUR_SPACE_FLAG; + ptr[5] = inst->colour_space; + dprintk(VIDC_DBG, + "V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to colour space change\n"); + } + } if (event == V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT) { @@ -3688,6 +3705,10 @@ static void log_frame(struct msm_vidc_inst *inst, struct vidc_frame_data *data, if (msm_comm_scale_clocks(inst->core)) dprintk(VIDC_WARN, "Failed to scale clocks. Performance might be impacted\n"); + + if (msm_comm_vote_bus(inst->core)) + dprintk(VIDC_WARN, + "Failed to scale bus. Performance might be impacted\n"); } static int request_seq_header(struct msm_vidc_inst *inst, diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c index efb90c69881f..011941c6d4eb 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c @@ -157,7 +157,7 @@ struct dentry *msm_vidc_debugfs_init_drv(void) struct dentry *f = debugfs_create_##__type(__name, S_IRUGO | S_IWUSR, \ dir, __value); \ if (IS_ERR_OR_NULL(f)) { \ - dprintk(VIDC_ERR, "Failed creating debugfs file '%pKd/%s'\n", \ + dprintk(VIDC_ERR, "Failed creating debugfs file '%pd/%s'\n", \ dir, __name); \ f = NULL; \ } \ @@ -349,7 +349,7 @@ struct dentry *msm_vidc_debugfs_init_inst(struct msm_vidc_inst *inst, dprintk(VIDC_ERR, "Invalid params, inst: %pK\n", inst); goto failed_create_dir; } - snprintf(debugfs_name, MAX_DEBUGFS_NAME, "inst_%pK", inst); + snprintf(debugfs_name, MAX_DEBUGFS_NAME, "inst_%p", inst); dir = debugfs_create_dir(debugfs_name, parent); if (!dir) { dprintk(VIDC_ERR, "Failed to create debugfs for msm_vidc\n"); diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h index 161e94f99040..ffe4456570e3 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h @@ -297,6 +297,7 @@ struct msm_vidc_inst { u32 buffers_held_in_driver; atomic_t in_flush; u32 pic_struct; + u32 colour_space; }; extern struct msm_vidc_drv *vidc_driver; diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h index 34ab36a4647b..aa566159c393 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h @@ -1357,6 +1357,7 @@ struct msm_vidc_cb_event { ion_phys_addr_t packet_buffer; ion_phys_addr_t extra_data_buffer; u32 pic_struct; + u32 colour_space; }; struct msm_vidc_cb_data_done { diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h index 23240746baf1..5e5ef6abc303 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h @@ -293,6 +293,8 @@ struct hfi_buffer_info { (HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x007) #define HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT \ (HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x009) +#define HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE \ + (HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x00A) #define HFI_PROPERTY_CONFIG_VDEC_COMMON_START \ @@ -435,6 +437,10 @@ struct hfi_bitrate { u32 layer_id; }; +struct hfi_colour_space { + u32 colour_space; +}; + #define HFI_CAPABILITY_FRAME_WIDTH (HFI_COMMON_BASE + 0x1) #define HFI_CAPABILITY_FRAME_HEIGHT (HFI_COMMON_BASE + 0x2) #define HFI_CAPABILITY_MBS_PER_FRAME (HFI_COMMON_BASE + 0x3) diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index 2764f43607c1..0e7d16fe84d4 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -1388,47 +1388,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, static long uvc_v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) { + struct uvc_fh *handle = file->private_data; union { struct uvc_xu_control_mapping xmap; struct uvc_xu_control_query xqry; } karg; void __user *up = compat_ptr(arg); - mm_segment_t old_fs; long ret; switch (cmd) { case UVCIOC_CTRL_MAP32: - cmd = UVCIOC_CTRL_MAP; ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up); + if (ret) + return ret; + ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap); + if (ret) + return ret; + ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up); + if (ret) + return ret; + break; case UVCIOC_CTRL_QUERY32: - cmd = UVCIOC_CTRL_QUERY; ret = uvc_v4l2_get_xu_query(&karg.xqry, up); + if (ret) + return ret; + ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry); + if (ret) + return ret; + ret = uvc_v4l2_put_xu_query(&karg.xqry, up); + if (ret) + return ret; break; default: return -ENOIOCTLCMD; } - old_fs = get_fs(); - set_fs(KERNEL_DS); - ret = video_ioctl2(file, cmd, (unsigned long)&karg); - set_fs(old_fs); - - if (ret < 0) - return ret; - - switch (cmd) { - case UVCIOC_CTRL_MAP: - ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up); - break; - - case UVCIOC_CTRL_QUERY: - ret = uvc_v4l2_put_xu_query(&karg.xqry, up); - break; - } - return ret; } #endif diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 2da7fd7deacd..2f1c03783414 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -280,7 +280,8 @@ static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) { if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) || - copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format))) + copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) || + copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved))) return -EFAULT; return __put_v4l2_format32(&kp->format, &up->format); } diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 6515dfc2b805..55cba89dbdb8 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -394,7 +394,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p) gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay); gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, - GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay); + GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay); gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6, GPMC_CONFIG6_CYCLE2CYCLESAMECSEN, p->cycle2cyclesamecsen); diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 68aa31ae553a..88e80ec772f6 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -33,6 +33,7 @@ #define LPSS_DEV_SIZE 0x200 #define LPSS_PRIV_OFFSET 0x200 #define LPSS_PRIV_SIZE 0x100 +#define LPSS_PRIV_REG_COUNT (LPSS_PRIV_SIZE / 4) #define LPSS_IDMA64_OFFSET 0x800 #define LPSS_IDMA64_SIZE 0x800 @@ -75,6 +76,7 @@ struct intel_lpss { const struct mfd_cell *cell; struct device *dev; void __iomem *priv; + u32 priv_ctx[LPSS_PRIV_REG_COUNT]; int devid; u32 caps; u32 active_ltr; @@ -485,6 +487,16 @@ EXPORT_SYMBOL_GPL(intel_lpss_prepare); int intel_lpss_suspend(struct device *dev) { + struct intel_lpss *lpss = dev_get_drvdata(dev); + unsigned int i; + + /* Save device context */ + for (i = 0; i < LPSS_PRIV_REG_COUNT; i++) + lpss->priv_ctx[i] = readl(lpss->priv + i * 4); + + /* Put the device into reset state */ + writel(0, lpss->priv + LPSS_PRIV_RESETS); + return 0; } EXPORT_SYMBOL_GPL(intel_lpss_suspend); @@ -492,8 +504,13 @@ EXPORT_SYMBOL_GPL(intel_lpss_suspend); int intel_lpss_resume(struct device *dev) { struct intel_lpss *lpss = dev_get_drvdata(dev); + unsigned int i; - intel_lpss_init_dev(lpss); + intel_lpss_deassert_reset(lpss); + + /* Restore device context */ + for (i = 0; i < LPSS_PRIV_REG_COUNT; i++) + writel(lpss->priv_ctx[i], lpss->priv + i * 4); return 0; } diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c index d9e15cf7c6c8..12d6ebb4ae5d 100644 --- a/drivers/mfd/intel_soc_pmic_core.c +++ b/drivers/mfd/intel_soc_pmic_core.c @@ -35,6 +35,7 @@ static struct gpiod_lookup_table panel_gpio_table = { .table = { /* Panel EN/DISABLE */ GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH), + { }, }, }; diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c index b7b3e8ee64f2..c30290f33430 100644 --- a/drivers/mfd/omap-usb-tll.c +++ b/drivers/mfd/omap-usb-tll.c @@ -269,6 +269,8 @@ static int usbtll_omap_probe(struct platform_device *pdev) if (IS_ERR(tll->ch_clk[i])) dev_dbg(dev, "can't get clock : %s\n", clkname); + else + clk_prepare(tll->ch_clk[i]); } pm_runtime_put_sync(dev); @@ -301,9 +303,12 @@ static int usbtll_omap_remove(struct platform_device *pdev) tll_dev = NULL; spin_unlock(&tll_lock); - for (i = 0; i < tll->nch; i++) - if (!IS_ERR(tll->ch_clk[i])) + for (i = 0; i < tll->nch; i++) { + if (!IS_ERR(tll->ch_clk[i])) { + clk_unprepare(tll->ch_clk[i]); clk_put(tll->ch_clk[i]); + } + } pm_runtime_disable(&pdev->dev); return 0; @@ -420,7 +425,7 @@ int omap_tll_enable(struct usbhs_omap_platform_data *pdata) if (IS_ERR(tll->ch_clk[i])) continue; - r = clk_prepare_enable(tll->ch_clk[i]); + r = clk_enable(tll->ch_clk[i]); if (r) { dev_err(tll_dev, "Error enabling ch %d clock: %d\n", i, r); @@ -448,7 +453,7 @@ int omap_tll_disable(struct usbhs_omap_platform_data *pdata) for (i = 0; i < tll->nch; i++) { if (omap_usb_mode_needs_tll(pdata->port_mode[i])) { if (!IS_ERR(tll->ch_clk[i])) - clk_disable_unprepare(tll->ch_clk[i]); + clk_disable(tll->ch_clk[i]); } } diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c index 76add503b6b8..69ec7127102c 100644 --- a/drivers/misc/hdcp.c +++ b/drivers/misc/hdcp.c @@ -160,43 +160,46 @@ static const struct hdcp_msg_data hdcp_msg_lookup[HDCP2P2_MAX_MESSAGES] = { [AKE_INIT_MESSAGE_ID] = { 2, - { {0x69000, 8}, {0x69008, 3} }, + { {"rtx", 0x69000, 8}, {"TxCaps", 0x69008, 3} }, 0 }, [AKE_SEND_CERT_MESSAGE_ID] = { 3, - { {0x6900B, 522}, {0x69215, 8}, {0x6921D, 3} }, + { {"cert-rx", 0x6900B, 522}, {"rrx", 0x69215, 8}, + {"RxCaps", 0x6921D, 3} }, 0 }, [AKE_NO_STORED_KM_MESSAGE_ID] = { 1, - { {0x69220, 128} }, + { {"Ekpub_km", 0x69220, 128} }, 0 }, [AKE_STORED_KM_MESSAGE_ID] = { 2, - { {0x692A0, 16}, {0x692B0, 16} }, + { {"Ekh_km", 0x692A0, 16}, {"m", 0x692B0, 16} }, 0 }, [AKE_SEND_H_PRIME_MESSAGE_ID] = { 1, - { {0x692C0, 32} }, + { {"H'", 0x692C0, 32} }, (1 << 1) }, [AKE_SEND_PAIRING_INFO_MESSAGE_ID] = { 1, - { {0x692E0, 16} }, + { {"Ekh_km", 0x692E0, 16} }, (1 << 2) }, [LC_INIT_MESSAGE_ID] = { 1, - { {0x692F0, 8} }, + { {"rn", 0x692F0, 8} }, 0 }, [LC_SEND_L_PRIME_MESSAGE_ID] = { 1, - { {0x692F8, 32} }, + { {"L'", 0x692F8, 32} }, 0 }, [SKE_SEND_EKS_MESSAGE_ID] = { 2, - { {0x69318, 16}, {0x69328, 8} }, + { {"Edkey_ks", 0x69318, 16}, {"riv", 0x69328, 8} }, 0 }, [REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID] = { 4, - { {0x69330, 2}, {0x69332, 3}, {0x69335, 16}, {0x69345, 155} }, + { {"RxInfo", 0x69330, 2}, {"seq_num_V", 0x69332, 3}, + {"V'", 0x69335, 16}, {"ridlist", 0x69345, 155} }, (1 << 0) }, [REPEATER_AUTH_SEND_ACK_MESSAGE_ID] = { 1, - { {0x693E0, 16} }, + { {"V", 0x693E0, 16} }, 0 }, [REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID] = { 3, - { {0x693F0, 3}, {0x693F3, 2}, {0x693F5, 126} }, + { {"seq_num_M", 0x693F0, 3}, {"k", 0x693F3, 2}, + {"streamID_Type", 0x693F5, 126} }, 0 }, [REPEATER_AUTH_STREAM_READY_MESSAGE_ID] = { 1, - { {0x69473, 32} }, + { {"M'", 0x69473, 32} }, 0 } }; @@ -616,36 +619,59 @@ static int hdcp_lib_get_next_message(struct hdcp_lib_handle *handle, } } -static inline void hdcp_lib_wakeup_client(struct hdcp_lib_handle *handle, - struct hdmi_hdcp_wakeup_data *data) +static void hdcp_lib_wakeup_client(struct hdcp_lib_handle *handle, + struct hdmi_hdcp_wakeup_data *data) { - int rc = 0; + int rc = 0, i; - if (handle && handle->client_ops && handle->client_ops->wakeup && - data && (data->cmd != HDMI_HDCP_WKUP_CMD_INVALID)) { - data->abort_mask = REAUTH_REQ | LINK_INTEGRITY_FAILURE; + if (!handle || !handle->client_ops || !handle->client_ops->wakeup || + !data || (data->cmd == HDMI_HDCP_WKUP_CMD_INVALID)) + return; - if (data->cmd == HDMI_HDCP_WKUP_CMD_SEND_MESSAGE || - data->cmd == HDMI_HDCP_WKUP_CMD_RECV_MESSAGE || - data->cmd == HDMI_HDCP_WKUP_CMD_LINK_POLL) { - handle->last_msg = - hdcp_lib_get_next_message(handle, data); + data->abort_mask = REAUTH_REQ | LINK_INTEGRITY_FAILURE; - if (handle->last_msg > INVALID_MESSAGE_ID && - handle->last_msg < HDCP2P2_MAX_MESSAGES) - data->message_data = - &hdcp_msg_lookup[handle->last_msg]; - } + if (data->cmd == HDMI_HDCP_WKUP_CMD_SEND_MESSAGE || + data->cmd == HDMI_HDCP_WKUP_CMD_RECV_MESSAGE || + data->cmd == HDMI_HDCP_WKUP_CMD_LINK_POLL) { + handle->last_msg = hdcp_lib_get_next_message(handle, data); - rc = handle->client_ops->wakeup(data); - if (rc) - pr_err("error sending %s to client\n", - hdmi_hdcp_cmd_to_str(data->cmd)); + pr_debug("lib->client: %s (%s)\n", + hdmi_hdcp_cmd_to_str(data->cmd), + hdcp_lib_message_name(handle->last_msg)); + + if (handle->last_msg > INVALID_MESSAGE_ID && + handle->last_msg < HDCP2P2_MAX_MESSAGES) { + u32 msg_num, rx_status; + const struct hdcp_msg_part *msg; + + data->message_data = &hdcp_msg_lookup[handle->last_msg]; + + msg_num = data->message_data->num_messages; + msg = data->message_data->messages; + rx_status = data->message_data->rx_status; + + pr_debug("rxstatus 0x%x\n", rx_status); + pr_debug("%10s | %6s | %4s\n", "name", "offset", "len"); + + for (i = 0; i < msg_num; i++) + pr_debug("%10s | %6x | %4d\n", + msg[i].name, msg[i].offset, + msg[i].length); + } + } else { + pr_debug("lib->client: %s\n", + hdmi_hdcp_cmd_to_str(data->cmd)); } + + rc = handle->client_ops->wakeup(data); + if (rc) + pr_err("error sending %s to client\n", + hdmi_hdcp_cmd_to_str(data->cmd)); } static inline void hdcp_lib_send_message(struct hdcp_lib_handle *handle) { + char msg_name[50]; struct hdmi_hdcp_wakeup_data cdata = { HDMI_HDCP_WKUP_CMD_SEND_MESSAGE }; @@ -655,6 +681,13 @@ static inline void hdcp_lib_send_message(struct hdcp_lib_handle *handle) cdata.send_msg_len = handle->msglen; cdata.timeout = handle->hdcp_timeout; + snprintf(msg_name, sizeof(msg_name), "%s: ", + hdcp_lib_message_name((int)cdata.send_msg_buf[0])); + + print_hex_dump(KERN_DEBUG, msg_name, + DUMP_PREFIX_NONE, 16, 1, cdata.send_msg_buf, + cdata.send_msg_len, false); + hdcp_lib_wakeup_client(handle, &cdata); } @@ -1486,6 +1519,7 @@ static int hdcp_lib_check_valid_state(struct hdcp_lib_handle *handle) if (handle->wakeup_cmd == HDCP_LIB_WKUP_CMD_START) { if (!list_empty(&handle->worker.work_list)) { + pr_debug("error: queue not empty\n"); rc = -EBUSY; goto exit; } @@ -1543,9 +1577,8 @@ static int hdcp_lib_wakeup(struct hdcp_lib_wakeup_data *data) handle->wakeup_cmd = data->cmd; handle->timeout_left = data->timeout; - pr_debug("%s, timeout left: %dms, tethered %d\n", - hdcp_lib_cmd_to_str(handle->wakeup_cmd), - handle->timeout_left, handle->tethered); + pr_debug("client->lib: %s\n", + hdcp_lib_cmd_to_str(handle->wakeup_cmd)); rc = hdcp_lib_check_valid_state(handle); if (rc) @@ -1599,6 +1632,8 @@ static int hdcp_lib_wakeup(struct hdcp_lib_wakeup_data *data) break; case HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED: case HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED: + case HDCP_LIB_WKUP_CMD_LINK_FAILED: + handle->hdcp_state |= HDCP_STATE_ERROR; HDCP_LIB_EXECUTE(clean); break; case HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS: @@ -1825,7 +1860,7 @@ static void hdcp_lib_clean(struct hdcp_lib_handle *handle) if (!handle) { pr_err("invalid input\n"); return; - }; + } hdcp_lib_txmtr_deinit(handle); if (!handle->legacy_app) @@ -1859,6 +1894,7 @@ static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle) struct hdcp_rcvd_msg_rsp *rsp_buf; uint32_t msglen; char *msg = NULL; + char msg_name[50]; uint32_t message_id_bytes = 0; if (!handle || !handle->qseecom_handle || @@ -1907,8 +1943,11 @@ static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle) mutex_unlock(&handle->msg_lock); - pr_debug("msg received: %s from sink\n", - hdcp_lib_message_name((int)msg[0])); + snprintf(msg_name, sizeof(msg_name), "%s: ", + hdcp_lib_message_name((int)msg[0])); + + print_hex_dump(KERN_DEBUG, msg_name, + DUMP_PREFIX_NONE, 16, 1, msg, msglen, false); /* send the message to QSEECOM */ req_buf = (struct hdcp_rcvd_msg_req *)(handle->qseecom_handle->sbuf); @@ -1989,13 +2028,8 @@ static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle) handle->hdcp_timeout = rsp_buf->timeout; handle->msglen = rsp_buf->msglen; - if (!atomic_read(&handle->hdcp_off)) { - cdata.cmd = HDMI_HDCP_WKUP_CMD_SEND_MESSAGE; - cdata.send_msg_buf = handle->listener_buf; - cdata.send_msg_len = handle->msglen; - cdata.timeout = handle->hdcp_timeout; - } - + if (!atomic_read(&handle->hdcp_off)) + hdcp_lib_send_message(handle); exit: kzfree(msg); @@ -2026,6 +2060,16 @@ static void hdcp_lib_topology_work(struct kthread_work *work) return; } + if (atomic_read(&handle->hdcp_off)) { + pr_debug("invalid state: hdcp off\n"); + return; + } + + if (handle->hdcp_state & HDCP_STATE_ERROR) { + pr_debug("invalid state: hdcp error\n"); + return; + } + reinit_completion(&handle->topo_wait); timeout = wait_for_completion_timeout(&handle->topo_wait, HZ * 3); if (!timeout) { diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c index cd0403f09267..e79c0371ee6f 100644 --- a/drivers/misc/mei/amthif.c +++ b/drivers/misc/mei/amthif.c @@ -417,8 +417,10 @@ int mei_amthif_irq_read_msg(struct mei_cl *cl, dev = cl->dev; - if (dev->iamthif_state != MEI_IAMTHIF_READING) + if (dev->iamthif_state != MEI_IAMTHIF_READING) { + mei_irq_discard_msg(dev, mei_hdr); return 0; + } ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list); if (ret) diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 1a173d0af694..a77643954523 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -222,17 +222,23 @@ EXPORT_SYMBOL_GPL(mei_cldev_recv); static void mei_cl_bus_event_work(struct work_struct *work) { struct mei_cl_device *cldev; + struct mei_device *bus; cldev = container_of(work, struct mei_cl_device, event_work); + bus = cldev->bus; + if (cldev->event_cb) cldev->event_cb(cldev, cldev->events, cldev->event_context); cldev->events = 0; /* Prepare for the next read */ - if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) + if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) { + mutex_lock(&bus->device_lock); mei_cl_read_start(cldev->cl, 0, NULL); + mutex_unlock(&bus->device_lock); + } } /** @@ -296,6 +302,7 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev, unsigned long events_mask, mei_cldev_event_cb_t event_cb, void *context) { + struct mei_device *bus = cldev->bus; int ret; if (cldev->event_cb) @@ -308,15 +315,17 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev, INIT_WORK(&cldev->event_work, mei_cl_bus_event_work); if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) { + mutex_lock(&bus->device_lock); ret = mei_cl_read_start(cldev->cl, 0, NULL); + mutex_unlock(&bus->device_lock); if (ret && ret != -EBUSY) return ret; } if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) { - mutex_lock(&cldev->cl->dev->device_lock); + mutex_lock(&bus->device_lock); ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0); - mutex_unlock(&cldev->cl->dev->device_lock); + mutex_unlock(&bus->device_lock); if (ret) return ret; } diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index a6c87c713193..958af84884b5 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -1735,6 +1735,10 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) wake_up(&cl->wait); break; + case MEI_FOP_DISCONNECT_RSP: + mei_io_cb_free(cb); + mei_cl_set_disconnected(cl); + break; default: BUG_ON(0); } diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index e7b7aad0999b..fd8a9f057ea6 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -873,8 +873,7 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev, cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL); if (!cb) return -ENOMEM; - cl_dbg(dev, cl, "add disconnect response as first\n"); - list_add(&cb->list, &dev->ctrl_wr_list.list); + list_add_tail(&cb->list, &dev->ctrl_wr_list.list); } return 0; } diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 64b568a0268d..d1df797c7568 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -76,7 +76,6 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl, * @dev: mei device * @hdr: message header */ -static inline void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr) { /* @@ -184,10 +183,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, return -EMSGSIZE; ret = mei_hbm_cl_disconnect_rsp(dev, cl); - mei_cl_set_disconnected(cl); - mei_io_cb_free(cb); - mei_me_cl_put(cl->me_cl); - cl->me_cl = NULL; + list_move_tail(&cb->list, &cmpl_list->list); return ret; } diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 4250555d5e72..1b06e2fd6858 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -782,6 +782,8 @@ bool mei_hbuf_acquire(struct mei_device *dev); bool mei_write_is_idle(struct mei_device *dev); +void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr); + #if IS_ENABLED(CONFIG_DEBUG_FS) int mei_dbgfs_register(struct mei_device *dev, const char *name); void mei_dbgfs_deregister(struct mei_device *dev); diff --git a/drivers/misc/qcom/qdsp6v2/amrwb_in.c b/drivers/misc/qcom/qdsp6v2/amrwb_in.c index 4f94ed2673e6..5e9dbca420a7 100644 --- a/drivers/misc/qcom/qdsp6v2/amrwb_in.c +++ b/drivers/misc/qcom/qdsp6v2/amrwb_in.c @@ -310,7 +310,7 @@ static int amrwb_in_open(struct inode *inode, struct file *file) (void *)audio); if (!audio->ac) { - pr_err("%s:audio[%p]: Could not allocate memory for audio" + pr_err("%s:audio[%pK]: Could not allocate memory for audio" "client\n", __func__, audio); kfree(audio->enc_cfg); kfree(audio); diff --git a/drivers/misc/qcom/qdsp6v2/audio_aac.c b/drivers/misc/qcom/qdsp6v2/audio_aac.c index e49d91d74514..94d563a211ec 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_aac.c +++ b/drivers/misc/qcom/qdsp6v2/audio_aac.c @@ -2,7 +2,7 @@ * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation - * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -221,10 +221,10 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } default: { - pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_ioctl(file, cmd, arg); if (rc) - pr_err("%s[%p]:Failed in utils_ioctl: %d\n", + pr_err("%s[%pK]:Failed in utils_ioctl: %d\n", __func__, audio, rc); } } @@ -328,10 +328,10 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd, break; } default: { - pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_compat_ioctl(file, cmd, arg); if (rc) - pr_err("%s[%p]:Failed in utils_ioctl: %d\n", + pr_err("%s[%pK]:Failed in utils_ioctl: %d\n", __func__, audio, rc); } } diff --git a/drivers/misc/qcom/qdsp6v2/audio_alac.c b/drivers/misc/qcom/qdsp6v2/audio_alac.c index 3de204c1ebc8..f25c8ae47b4c 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_alac.c +++ b/drivers/misc/qcom/qdsp6v2/audio_alac.c @@ -52,7 +52,7 @@ static long audio_ioctl_shared(struct file *file, unsigned int cmd, __func__, audio->pcm_cfg.channel_count); } - pr_debug("%s[%p]: AUDIO_START session_id[%d]\n", __func__, + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, audio, audio->ac->session); if (audio->feedback == NON_TUNNEL_MODE) { /* Configure PCM output block */ diff --git a/drivers/misc/qcom/qdsp6v2/audio_amrnb.c b/drivers/misc/qcom/qdsp6v2/audio_amrnb.c index 1625adb82be9..78bcdb74af0e 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_amrnb.c +++ b/drivers/misc/qcom/qdsp6v2/audio_amrnb.c @@ -2,7 +2,7 @@ * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation - * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -33,7 +33,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) switch (cmd) { case AUDIO_START: { - pr_debug("%s[%p]: AUDIO_START session_id[%d]\n", __func__, + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, audio, audio->ac->session); if (audio->feedback == NON_TUNNEL_MODE) { /* Configure PCM output block */ @@ -62,7 +62,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } default: - pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_ioctl(file, cmd, arg); } return rc; diff --git a/drivers/misc/qcom/qdsp6v2/audio_amrwb.c b/drivers/misc/qcom/qdsp6v2/audio_amrwb.c index c7ff607414a5..2283cf26bda9 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_amrwb.c +++ b/drivers/misc/qcom/qdsp6v2/audio_amrwb.c @@ -2,7 +2,7 @@ * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation - * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -34,7 +34,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) switch (cmd) { case AUDIO_START: { - pr_debug("%s[%p]: AUDIO_START session_id[%d]\n", __func__, + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, audio, audio->ac->session); if (audio->feedback == NON_TUNNEL_MODE) { /* Configure PCM output block */ @@ -65,7 +65,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } default: - pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_ioctl(file, cmd, arg); } return rc; diff --git a/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c b/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c index bfd730017d41..727a5369c2a9 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c +++ b/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c @@ -55,7 +55,7 @@ static long audio_ioctl_shared(struct file *file, unsigned int cmd, switch (cmd) { case AUDIO_START: { - pr_err("%s[%p]: AUDIO_START session_id[%d]\n", __func__, + pr_err("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, audio, audio->ac->session); if (audio->feedback == NON_TUNNEL_MODE) { /* Configure PCM output block */ @@ -162,7 +162,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, break; } default: { - pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_ioctl(file, cmd, arg); break; } @@ -278,7 +278,7 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd, break; } default: { - pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_compat_ioctl(file, cmd, arg); break; } diff --git a/drivers/misc/qcom/qdsp6v2/audio_ape.c b/drivers/misc/qcom/qdsp6v2/audio_ape.c index 670ec555b8c6..d7d550c40dff 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_ape.c +++ b/drivers/misc/qcom/qdsp6v2/audio_ape.c @@ -39,7 +39,7 @@ static long audio_ioctl_shared(struct file *file, unsigned int cmd, case AUDIO_START: { struct asm_ape_cfg ape_cfg; struct msm_audio_ape_config *ape_config; - pr_debug("%s[%p]: AUDIO_START session_id[%d]\n", __func__, + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, audio, audio->ac->session); if (audio->feedback == NON_TUNNEL_MODE) { /* Configure PCM output block */ @@ -133,7 +133,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } default: { - pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_ioctl(file, cmd, arg); if (rc) pr_err("Failed in utils_ioctl: %d\n", rc); @@ -231,7 +231,7 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd, break; } default: { - pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_compat_ioctl(file, cmd, arg); if (rc) pr_err("Failed in utils_ioctl: %d\n", rc); diff --git a/drivers/misc/qcom/qdsp6v2/audio_evrc.c b/drivers/misc/qcom/qdsp6v2/audio_evrc.c index 08ca94e62059..5a89f4e25a27 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_evrc.c +++ b/drivers/misc/qcom/qdsp6v2/audio_evrc.c @@ -2,7 +2,7 @@ * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation - * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -34,7 +34,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) switch (cmd) { case AUDIO_START: { - pr_debug("%s[%p]: AUDIO_START session_id[%d]\n", __func__, + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, audio, audio->ac->session); if (audio->feedback == NON_TUNNEL_MODE) { /* Configure PCM output block */ @@ -65,7 +65,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } default: - pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_ioctl(file, cmd, arg); } return rc; diff --git a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c index 3632fc2b961b..940fd08654d2 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c +++ b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c @@ -99,7 +99,7 @@ static void audio_effects_event_handler(uint32_t opcode, uint32_t token, struct q6audio_effects *effects; if (!payload || !priv) { - pr_err("%s: invalid data to handle events, payload: %p, priv: %p\n", + pr_err("%s: invalid data to handle events, payload: %pK, priv: %pK\n", __func__, payload, priv); return; } @@ -705,7 +705,7 @@ static int audio_effects_release(struct inode *inode, struct file *file) __func__); rc = q6asm_cmd(effects->ac, CMD_CLOSE); if (rc < 0) - pr_err("%s[%p]:Failed to close the session rc=%d\n", + pr_err("%s[%pK]:Failed to close the session rc=%d\n", __func__, effects, rc); effects->opened = 0; effects->started = 0; diff --git a/drivers/misc/qcom/qdsp6v2/audio_mp3.c b/drivers/misc/qcom/qdsp6v2/audio_mp3.c index 83e300721c8e..fa5132e83ff4 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_mp3.c +++ b/drivers/misc/qcom/qdsp6v2/audio_mp3.c @@ -2,7 +2,7 @@ * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation - * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -33,7 +33,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) int rc = 0; switch (cmd) { case AUDIO_START: { - pr_debug("%s[%p]: AUDIO_START session_id[%d]\n", __func__, + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, audio, audio->ac->session); if (audio->feedback == NON_TUNNEL_MODE) { /* Configure PCM output block */ @@ -69,7 +69,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } default: - pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_ioctl(file, cmd, arg); } return rc; diff --git a/drivers/misc/qcom/qdsp6v2/audio_qcelp.c b/drivers/misc/qcom/qdsp6v2/audio_qcelp.c index 653aee9c8eff..508a95b7bf79 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_qcelp.c +++ b/drivers/misc/qcom/qdsp6v2/audio_qcelp.c @@ -2,7 +2,7 @@ * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation - * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -36,7 +36,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) switch (cmd) { case AUDIO_START: { - pr_debug("%s[%p]: AUDIO_START session_id[%d]\n", __func__, + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, audio, audio->ac->session); if (audio->feedback == NON_TUNNEL_MODE) { /* Configure PCM output block */ @@ -67,7 +67,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } default: - pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_ioctl(file, cmd, arg); } return rc; diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils.c b/drivers/misc/qcom/qdsp6v2/audio_utils.c index 840597314a5f..15d82d126df7 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_utils.c +++ b/drivers/misc/qcom/qdsp6v2/audio_utils.c @@ -757,7 +757,7 @@ ssize_t audio_in_read(struct file *file, count -= bytes_to_copy; buf += bytes_to_copy; } else { - pr_err("%s:session id %d: short read data[%p] bytesavail[%d]bytesrequest[%zd]\n", + pr_err("%s:session id %d: short read data[%pK] bytesavail[%d]bytesrequest[%zd]\n", __func__, audio->ac->session, data, size, count); @@ -896,7 +896,7 @@ ssize_t audio_in_write(struct file *file, buf += xfer; } mutex_unlock(&audio->write_lock); - pr_debug("%s:session id %d: eos_condition 0x%x buf[0x%p] start[0x%p]\n", + pr_debug("%s:session id %d: eos_condition 0x%x buf[0x%pK] start[0x%pK]\n", __func__, audio->ac->session, nflags, buf, start); if (nflags & AUD_EOS_SET) { diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c index 567c948b0efe..c963280e5bf5 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c +++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c @@ -83,7 +83,7 @@ int insert_eos_buf(struct q6audio_aio *audio, struct audio_aio_buffer_node *buf_node) { struct dec_meta_out *eos_buf = buf_node->kvaddr; - pr_debug("%s[%p]:insert_eos_buf\n", __func__, audio); + pr_debug("%s[%pK]:insert_eos_buf\n", __func__, audio); eos_buf->num_of_frames = 0xFFFFFFFF; eos_buf->meta_out_dsp[0].offset_to_frame = 0x0; eos_buf->meta_out_dsp[0].nflags = AUDIO_DEC_EOS_SET; @@ -131,14 +131,14 @@ static int audio_aio_ion_lookup_vaddr(struct q6audio_aio *audio, void *addr, } if (match_count > 1) { - pr_err("%s[%p]:multiple hits for vaddr %p, len %ld\n", + pr_err("%s[%pK]:multiple hits for vaddr %pK, len %ld\n", __func__, audio, addr, len); list_for_each_entry(region_elt, &audio->ion_region_queue, list) { if (addr >= region_elt->vaddr && addr < region_elt->vaddr + region_elt->len && addr + len <= region_elt->vaddr + region_elt->len) - pr_err("\t%s[%p]:%p, %ld --> %pa\n", + pr_err("\t%s[%pK]:%pK, %ld --> %pK\n", __func__, audio, region_elt->vaddr, region_elt->len, @@ -158,7 +158,7 @@ static phys_addr_t audio_aio_ion_fixup(struct q6audio_aio *audio, void *addr, ret = audio_aio_ion_lookup_vaddr(audio, addr, len, ®ion); if (ret) { - pr_err("%s[%p]:lookup (%p, %ld) failed\n", + pr_err("%s[%pK]:lookup (%pK, %ld) failed\n", __func__, audio, addr, len); return 0; } @@ -166,7 +166,7 @@ static phys_addr_t audio_aio_ion_fixup(struct q6audio_aio *audio, void *addr, region->ref_cnt++; else region->ref_cnt--; - pr_debug("%s[%p]:found region %p ref_cnt %d\n", + pr_debug("%s[%pK]:found region %pK ref_cnt %d\n", __func__, audio, region, region->ref_cnt); paddr = region->paddr + (addr - region->vaddr); /* provide kernel virtual address for accessing meta information */ @@ -179,26 +179,26 @@ static int audio_aio_pause(struct q6audio_aio *audio) { int rc = -EINVAL; - pr_debug("%s[%p], enabled = %d\n", __func__, audio, + pr_debug("%s[%pK], enabled = %d\n", __func__, audio, audio->enabled); if (audio->enabled) { rc = q6asm_cmd(audio->ac, CMD_PAUSE); if (rc < 0) - pr_err("%s[%p]: pause cmd failed rc=%d\n", + pr_err("%s[%pK]: pause cmd failed rc=%d\n", __func__, audio, rc); if (rc == 0) { /* Send suspend only if pause was successful */ rc = q6asm_cmd(audio->ac, CMD_SUSPEND); if (rc < 0) - pr_err("%s[%p]: suspend cmd failed rc=%d\n", + pr_err("%s[%pK]: suspend cmd failed rc=%d\n", __func__, audio, rc); } else - pr_err("%s[%p]: not sending suspend since pause failed\n", + pr_err("%s[%pK]: not sending suspend since pause failed\n", __func__, audio); } else - pr_err("%s[%p]: Driver not enabled\n", __func__, audio); + pr_err("%s[%pK]: Driver not enabled\n", __func__, audio); return rc; } @@ -212,7 +212,7 @@ static int audio_aio_flush(struct q6audio_aio *audio) if (!(audio->drv_status & ADRV_STATUS_PAUSE)) { rc = audio_aio_pause(audio); if (rc < 0) - pr_err("%s[%p}: pause cmd failed rc=%d\n", + pr_err("%s[%pK}: pause cmd failed rc=%d\n", __func__, audio, rc); else @@ -220,13 +220,13 @@ static int audio_aio_flush(struct q6audio_aio *audio) } rc = q6asm_cmd(audio->ac, CMD_FLUSH); if (rc < 0) - pr_err("%s[%p]: flush cmd failed rc=%d\n", + pr_err("%s[%pK]: flush cmd failed rc=%d\n", __func__, audio, rc); /* Not in stop state, reenable the stream */ if (audio->stopped == 0) { rc = audio_aio_enable(audio); if (rc) - pr_err("%s[%p]:audio re-enable failed\n", + pr_err("%s[%pK]:audio re-enable failed\n", __func__, audio); else { audio->enabled = 1; @@ -235,9 +235,9 @@ static int audio_aio_flush(struct q6audio_aio *audio) } } } - pr_debug("%s[%p]:in_bytes %d\n", + pr_debug("%s[%pK]:in_bytes %d\n", __func__, audio, atomic_read(&audio->in_bytes)); - pr_debug("%s[%p]:in_samples %d\n", + pr_debug("%s[%pK]:in_samples %d\n", __func__, audio, atomic_read(&audio->in_samples)); atomic_set(&audio->in_bytes, 0); atomic_set(&audio->in_samples, 0); @@ -250,7 +250,7 @@ static int audio_aio_outport_flush(struct q6audio_aio *audio) rc = q6asm_cmd(audio->ac, CMD_OUT_FLUSH); if (rc < 0) - pr_err("%s[%p}: output port flush cmd failed rc=%d\n", + pr_err("%s[%pK}: output port flush cmd failed rc=%d\n", __func__, audio, rc); return rc; } @@ -278,19 +278,19 @@ void audio_aio_async_write_ack(struct q6audio_aio *audio, uint32_t token, if (token == used_buf->token) { list_del(&used_buf->list); spin_unlock_irqrestore(&audio->dsp_lock, flags); - pr_debug("%s[%p]:consumed buffer\n", __func__, audio); + pr_debug("%s[%pK]:consumed buffer\n", __func__, audio); event_payload.aio_buf = used_buf->buf; audio_aio_post_event(audio, AUDIO_EVENT_WRITE_DONE, event_payload); kfree(used_buf); if (list_empty(&audio->out_queue) && (audio->drv_status & ADRV_STATUS_FSYNC)) { - pr_debug("%s[%p]: list is empty, reached EOS in Tunnel\n", + pr_debug("%s[%pK]: list is empty, reached EOS in Tunnel\n", __func__, audio); wake_up(&audio->write_wait); } } else { - pr_err("%s[%p]:expected=%x ret=%x\n", + pr_err("%s[%pK]:expected=%x ret=%x\n", __func__, audio, used_buf->token, token); spin_unlock_irqrestore(&audio->dsp_lock, flags); } @@ -304,13 +304,13 @@ void audio_aio_async_out_flush(struct q6audio_aio *audio) union msm_audio_event_payload payload; unsigned long flags; - pr_debug("%s[%p}\n", __func__, audio); + pr_debug("%s[%pK}\n", __func__, audio); /* EOS followed by flush, EOS response not guranteed, free EOS i/p buffer */ spin_lock_irqsave(&audio->dsp_lock, flags); if (audio->eos_flag && (audio->eos_write_payload.aio_buf.buf_addr)) { - pr_debug("%s[%p]: EOS followed by flush received,acknowledge"\ + pr_debug("%s[%pK]: EOS followed by flush received,acknowledge" " eos i/p buffer immediately\n", __func__, audio); audio_aio_post_event(audio, AUDIO_EVENT_WRITE_DONE, audio->eos_write_payload); @@ -324,7 +324,7 @@ void audio_aio_async_out_flush(struct q6audio_aio *audio) payload.aio_buf = buf_node->buf; audio_aio_post_event(audio, AUDIO_EVENT_WRITE_DONE, payload); kfree(buf_node); - pr_debug("%s[%p]: Propagate WRITE_DONE during flush\n", + pr_debug("%s[%pK]: Propagate WRITE_DONE during flush\n", __func__, audio); } } @@ -335,14 +335,14 @@ void audio_aio_async_in_flush(struct q6audio_aio *audio) struct list_head *ptr, *next; union msm_audio_event_payload payload; - pr_debug("%s[%p]\n", __func__, audio); + pr_debug("%s[%pK]\n", __func__, audio); list_for_each_safe(ptr, next, &audio->in_queue) { buf_node = list_entry(ptr, struct audio_aio_buffer_node, list); list_del(&buf_node->list); /* Forcefull send o/p eos buffer after flush, if no eos response * received by dsp even after sending eos command */ if ((audio->eos_rsp != 1) && audio->eos_flag) { - pr_debug("%s[%p]: send eos on o/p buffer during flush\n", + pr_debug("%s[%pK]: send eos on o/p buffer during flush\n", __func__, audio); payload.aio_buf = buf_node->buf; payload.aio_buf.data_len = @@ -355,7 +355,7 @@ void audio_aio_async_in_flush(struct q6audio_aio *audio) } audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE, payload); kfree(buf_node); - pr_debug("%s[%p]: Propagate READ_DONE during flush\n", + pr_debug("%s[%pK]: Propagate READ_DONE during flush\n", __func__, audio); } } @@ -373,19 +373,19 @@ int audio_aio_disable(struct q6audio_aio *audio) if (audio->opened) { audio->enabled = 0; audio->opened = 0; - pr_debug("%s[%p]: inbytes[%d] insamples[%d]\n", __func__, + pr_debug("%s[%pK]: inbytes[%d] insamples[%d]\n", __func__, audio, atomic_read(&audio->in_bytes), atomic_read(&audio->in_samples)); /* Close the session */ rc = q6asm_cmd(audio->ac, CMD_CLOSE); if (rc < 0) - pr_err("%s[%p]:Failed to close the session rc=%d\n", + pr_err("%s[%pK]:Failed to close the session rc=%d\n", __func__, audio, rc); audio->stopped = 1; wake_up(&audio->write_wait); wake_up(&audio->cmd_wait); } - pr_debug("%s[%p]:enabled[%d]\n", __func__, audio, audio->enabled); + pr_debug("%s[%pK]:enabled[%d]\n", __func__, audio, audio->enabled); return rc; } @@ -434,16 +434,16 @@ static void audio_aio_unmap_ion_region(struct q6audio_aio *audio) struct list_head *ptr, *next; int rc = -EINVAL; - pr_debug("%s[%p]:\n", __func__, audio); + pr_debug("%s[%pK]:\n", __func__, audio); list_for_each_safe(ptr, next, &audio->ion_region_queue) { region = list_entry(ptr, struct audio_aio_ion_region, list); if (region != NULL) { - pr_debug("%s[%p]: phy_address = 0x%pa\n", + pr_debug("%s[%pK]: phy_address = 0x%pK\n", __func__, audio, ®ion->paddr); rc = q6asm_memory_unmap(audio->ac, region->paddr, IN); if (rc < 0) - pr_err("%s[%p]: memory unmap failed\n", + pr_err("%s[%pK]: memory unmap failed\n", __func__, audio); } } @@ -460,20 +460,20 @@ static void audio_aio_listner(u32 evt_id, union auddev_evt_data *evt_payload, switch (evt_id) { case AUDDEV_EVT_STREAM_VOL_CHG: audio->volume = evt_payload->session_vol; - pr_debug("%s[%p]: AUDDEV_EVT_STREAM_VOL_CHG, stream vol %d, enabled = %d\n", + pr_debug("%s[%pK]: AUDDEV_EVT_STREAM_VOL_CHG, stream vol %d, enabled = %d\n", __func__, audio, audio->volume, audio->enabled); if (audio->enabled == 1) { if (audio->ac) { rc = q6asm_set_volume(audio->ac, audio->volume); if (rc < 0) { - pr_err("%s[%p]: Send Volume command failed rc=%d\n", + pr_err("%s[%pK]: Send Volume command failed rc=%d\n", __func__, audio, rc); } } } break; default: - pr_err("%s[%p]:ERROR:wrong event\n", __func__, audio); + pr_err("%s[%pK]:ERROR:wrong event\n", __func__, audio); break; } } @@ -490,7 +490,7 @@ int register_volume_listener(struct q6audio_aio *audio) audio_aio_listner, (void *)audio); if (rc < 0) { - pr_err("%s[%p]: Event listener failed\n", __func__, audio); + pr_err("%s[%pK]: Event listener failed\n", __func__, audio); rc = -EACCES; } return rc; @@ -508,7 +508,7 @@ int enable_volume_ramp(struct q6audio_aio *audio) if (audio->ac == NULL) return -EINVAL; - pr_debug("%s[%p]\n", __func__, audio); + pr_debug("%s[%pK]\n", __func__, audio); softpause.enable = SOFT_PAUSE_ENABLE; softpause.period = SOFT_PAUSE_PERIOD; softpause.step = SOFT_PAUSE_STEP; @@ -568,7 +568,7 @@ int enable_volume_ramp(struct q6audio_aio *audio) int audio_aio_release(struct inode *inode, struct file *file) { struct q6audio_aio *audio = file->private_data; - pr_debug("%s[%p]\n", __func__, audio); + pr_debug("%s[%pK]\n", __func__, audio); mutex_lock(&audio->lock); mutex_lock(&audio->read_lock); mutex_lock(&audio->write_lock); @@ -628,56 +628,56 @@ int audio_aio_fsync(struct file *file, loff_t start, loff_t end, int datasync) audio->drv_status |= ADRV_STATUS_FSYNC; mutex_unlock(&audio->lock); - pr_debug("%s[%p]:\n", __func__, audio); + pr_debug("%s[%pK]:\n", __func__, audio); audio->eos_rsp = 0; - pr_debug("%s[%p]Wait for write done from DSP\n", __func__, audio); + pr_debug("%s[%pK]Wait for write done from DSP\n", __func__, audio); rc = wait_event_interruptible(audio->write_wait, (list_empty(&audio->out_queue)) || audio->wflush || audio->stopped); if (audio->stopped || audio->wflush) { - pr_debug("%s[%p]: Audio Flushed or Stopped,this is not EOS\n" + pr_debug("%s[%pK]: Audio Flushed or Stopped,this is not EOS\n" , __func__, audio); audio->wflush = 0; rc = -EBUSY; } if (rc < 0) { - pr_err("%s[%p]: wait event for list_empty failed, rc = %d\n", + pr_err("%s[%pK]: wait event for list_empty failed, rc = %d\n", __func__, audio, rc); goto done; } rc = q6asm_cmd(audio->ac, CMD_EOS); - pr_debug("%s[%p]: EOS cmd sent to DSP\n", __func__, audio); + pr_debug("%s[%pK]: EOS cmd sent to DSP\n", __func__, audio); if (rc < 0) - pr_err("%s[%p]: q6asm_cmd failed, rc = %d", + pr_err("%s[%pK]: q6asm_cmd failed, rc = %d", __func__, audio, rc); - pr_debug("%s[%p]: wait for RENDERED_EOS from DSP\n" + pr_debug("%s[%pK]: wait for RENDERED_EOS from DSP\n" , __func__, audio); rc = wait_event_interruptible(audio->write_wait, (audio->eos_rsp || audio->wflush || audio->stopped)); if (rc < 0) { - pr_err("%s[%p]: wait event for eos_rsp failed, rc = %d\n", + pr_err("%s[%pK]: wait event for eos_rsp failed, rc = %d\n", __func__, audio, rc); goto done; } if (audio->stopped || audio->wflush) { audio->wflush = 0; - pr_debug("%s[%p]: Audio Flushed or Stopped,this is not EOS\n" + pr_debug("%s[%pK]: Audio Flushed or Stopped,this is not EOS\n" , __func__, audio); rc = -EBUSY; } if (audio->eos_rsp == 1) - pr_debug("%s[%p]: EOS\n", __func__, audio); + pr_debug("%s[%pK]: EOS\n", __func__, audio); done: @@ -748,21 +748,21 @@ static long audio_aio_process_event_req_common(struct q6audio_aio *audio, usr_evt->event_payload = drv_evt->payload; list_add_tail(&drv_evt->list, &audio->free_event_queue); } else { - pr_err("%s[%p]:Unexpected path\n", __func__, audio); + pr_err("%s[%pK]:Unexpected path\n", __func__, audio); spin_unlock_irqrestore(&audio->event_queue_lock, flags); return -EPERM; } spin_unlock_irqrestore(&audio->event_queue_lock, flags); if (drv_evt->event_type == AUDIO_EVENT_WRITE_DONE) { - pr_debug("%s[%p]:posted AUDIO_EVENT_WRITE_DONE to user\n", + pr_debug("%s[%pK]:posted AUDIO_EVENT_WRITE_DONE to user\n", __func__, audio); mutex_lock(&audio->write_lock); audio_aio_ion_fixup(audio, drv_evt->payload.aio_buf.buf_addr, drv_evt->payload.aio_buf.buf_len, 0, 0); mutex_unlock(&audio->write_lock); } else if (drv_evt->event_type == AUDIO_EVENT_READ_DONE) { - pr_debug("%s[%p]:posted AUDIO_EVENT_READ_DONE to user\n", + pr_debug("%s[%pK]:posted AUDIO_EVENT_READ_DONE to user\n", __func__, audio); mutex_lock(&audio->read_lock); audio_aio_ion_fixup(audio, drv_evt->payload.aio_buf.buf_addr, @@ -774,7 +774,7 @@ static long audio_aio_process_event_req_common(struct q6audio_aio *audio, * Once EOS indicated */ if (audio->eos_rsp && !list_empty(&audio->in_queue)) { - pr_debug("%s[%p]:Send flush command to release read buffers"\ + pr_debug("%s[%pK]:Send flush command to release read buffers" " held up in DSP\n", __func__, audio); mutex_lock(&audio->lock); audio_aio_flush(audio); @@ -917,7 +917,7 @@ static int audio_aio_ion_check(struct q6audio_aio *audio, list_for_each_entry(region_elt, &audio->ion_region_queue, list) { if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) || OVERLAPS(region_elt, &t)) { - pr_err("%s[%p]:region (vaddr %p len %ld) clashes with registered region (vaddr %p paddr %pa len %ld)\n", + pr_err("%s[%pK]:region (vaddr %pK len %ld) clashes with registered region (vaddr %pK paddr %pK len %ld)\n", __func__, audio, vaddr, len, region_elt->vaddr, ®ion_elt->paddr, region_elt->len); @@ -939,7 +939,7 @@ static int audio_aio_ion_add(struct q6audio_aio *audio, unsigned long ionflag; void *kvaddr = NULL; - pr_debug("%s[%p]:\n", __func__, audio); + pr_debug("%s[%pK]:\n", __func__, audio); region = kmalloc(sizeof(*region), GFP_KERNEL); if (!region) { @@ -968,14 +968,14 @@ static int audio_aio_ion_add(struct q6audio_aio *audio, region->kvaddr = kvaddr; region->len = len; region->ref_cnt = 0; - pr_debug("%s[%p]:add region paddr %pa vaddr %p, len %lu kvaddr %p\n", + pr_debug("%s[%pK]:add region paddr %pK vaddr %pK, len %lu kvaddr %pK\n", __func__, audio, ®ion->paddr, region->vaddr, region->len, region->kvaddr); list_add_tail(®ion->list, &audio->ion_region_queue); rc = q6asm_memory_map(audio->ac, paddr, IN, len, 1); if (rc < 0) { - pr_err("%s[%p]: memory map failed\n", __func__, audio); + pr_err("%s[%pK]: memory map failed\n", __func__, audio); goto mmap_error; } else { goto end; @@ -997,7 +997,7 @@ static int audio_aio_ion_remove(struct q6audio_aio *audio, struct list_head *ptr, *next; int rc = -EINVAL; - pr_debug("%s[%p]:info fd %d vaddr %p\n", + pr_debug("%s[%pK]:info fd %d vaddr %pK\n", __func__, audio, info->fd, info->vaddr); list_for_each_safe(ptr, next, &audio->ion_region_queue) { @@ -1006,17 +1006,17 @@ static int audio_aio_ion_remove(struct q6audio_aio *audio, if ((region->fd == info->fd) && (region->vaddr == info->vaddr)) { if (region->ref_cnt) { - pr_debug("%s[%p]:region %p in use ref_cnt %d\n", + pr_debug("%s[%pK]:region %pK in use ref_cnt %d\n", __func__, audio, region, region->ref_cnt); break; } - pr_debug("%s[%p]:remove region fd %d vaddr %p\n", + pr_debug("%s[%pK]:remove region fd %d vaddr %pK\n", __func__, audio, info->fd, info->vaddr); rc = q6asm_memory_unmap(audio->ac, region->paddr, IN); if (rc < 0) - pr_err("%s[%p]: memory unmap failed\n", + pr_err("%s[%pK]: memory unmap failed\n", __func__, audio); list_del(®ion->list); @@ -1039,15 +1039,15 @@ static int audio_aio_async_write(struct q6audio_aio *audio, struct audio_aio_write_param param; if (!audio || !buf_node) { - pr_err("%s NULL pointer audio=[0x%p], buf_node=[0x%p]\n", + pr_err("%s NULL pointer audio=[0x%pK], buf_node=[0x%pK]\n", __func__, audio, buf_node); return -EINVAL; } - pr_debug("%s[%p]: Send write buff %p phy %pa len %d meta_enable = %d\n", + pr_debug("%s[%pK]: Send write buff %pK phy %pK len %d meta_enable = %d\n", __func__, audio, buf_node, &buf_node->paddr, buf_node->buf.data_len, audio->buf_cfg.meta_info_enable); - pr_debug("%s[%p]: flags = 0x%x\n", __func__, audio, + pr_debug("%s[%pK]: flags = 0x%x\n", __func__, audio, buf_node->meta_info.meta_in.nflags); ac = audio->ac; @@ -1076,7 +1076,7 @@ static int audio_aio_async_write(struct q6audio_aio *audio, buf_node->token = ac->session; rc = q6asm_async_write(ac, ¶m); if (rc < 0) - pr_err("%s[%p]:failed\n", __func__, audio); + pr_err("%s[%pK]:failed\n", __func__, audio); return rc; } @@ -1095,8 +1095,6 @@ void audio_aio_post_event(struct q6audio_aio *audio, int type, } else { e_node = kmalloc(sizeof(struct audio_aio_event), GFP_ATOMIC); if (!e_node) { - pr_err("%s[%p]:No mem to post event %d\n", - __func__, audio, type); spin_unlock_irqrestore(&audio->event_queue_lock, flags); return; } @@ -1117,7 +1115,7 @@ static int audio_aio_async_read(struct q6audio_aio *audio, struct audio_aio_read_param param; int rc; - pr_debug("%s[%p]: Send read buff %p phy %pa len %d\n", + pr_debug("%s[%pK]: Send read buff %pK phy %pK len %d\n", __func__, audio, buf_node, &buf_node->paddr, buf_node->buf.buf_len); ac = audio->ac; @@ -1131,7 +1129,7 @@ static int audio_aio_async_read(struct q6audio_aio *audio, buf_node->token = ac->session; rc = q6asm_async_read(ac, ¶m); if (rc < 0) - pr_err("%s[%p]:failed\n", __func__, audio); + pr_err("%s[%pK]:failed\n", __func__, audio); return rc; } @@ -1140,7 +1138,7 @@ static int audio_aio_buf_add_shared(struct q6audio_aio *audio, u32 dir, { unsigned long flags; int ret = 0; - pr_debug("%s[%p]:node %p dir %x buf_addr %p buf_len %d data_len %d\n", + pr_debug("%s[%pK]:node %pK dir %x buf_addr %pK buf_len %d data_len %d\n", __func__, audio, buf_node, dir, buf_node->buf.buf_addr, buf_node->buf.buf_len, buf_node->buf.data_len); buf_node->paddr = audio_aio_ion_fixup(audio, buf_node->buf.buf_addr, @@ -1165,7 +1163,7 @@ static int audio_aio_buf_add_shared(struct q6audio_aio *audio, u32 dir, } else if (buf_node->meta_info.meta_in.nflags & AUDIO_DEC_EOS_SET) { if (!audio->wflush) { - pr_debug("%s[%p]:Send EOS cmd at i/p\n", + pr_debug("%s[%pK]:Send EOS cmd at i/p\n", __func__, audio); /* Driver will forcefully post writedone event * once eos ack recived from DSP @@ -1211,7 +1209,7 @@ static int audio_aio_buf_add_shared(struct q6audio_aio *audio, u32 dir, event_payload.aio_buf = buf_node->buf; event_payload.aio_buf.data_len = insert_eos_buf(audio, buf_node); - pr_debug("%s[%p]: propagate READ_DONE as EOS done\n",\ + pr_debug("%s[%pK]: propagate READ_DONE as EOS done\n", __func__, audio); audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE, event_payload); @@ -1280,7 +1278,8 @@ void audio_aio_ioport_reset(struct q6audio_aio *audio) * abort due to flush */ if (audio->drv_status & ADRV_STATUS_FSYNC) { - pr_debug("%s[%p]:fsync in progress\n", __func__, audio); + pr_debug("%s[%pK]:fsync in progress\n", + __func__, audio); audio->drv_ops.out_flush(audio); } else audio->drv_ops.out_flush(audio); @@ -1307,13 +1306,13 @@ int audio_aio_open(struct q6audio_aio *audio, struct file *file) /* Only AIO interface */ if (file->f_flags & O_NONBLOCK) { - pr_debug("%s[%p]:set to aio interface\n", __func__, audio); + pr_debug("%s[%pK]:set to aio interface\n", __func__, audio); audio->drv_status |= ADRV_STATUS_AIO_INTF; audio->drv_ops.out_flush = audio_aio_async_out_flush; audio->drv_ops.in_flush = audio_aio_async_in_flush; q6asm_set_io_mode(audio->ac, ASYNC_IO_MODE); } else { - pr_err("%s[%p]:SIO interface not supported\n", + pr_err("%s[%pK]:SIO interface not supported\n", __func__, audio); rc = -EACCES; goto fail; @@ -1346,7 +1345,7 @@ int audio_aio_open(struct q6audio_aio *audio, struct file *file) if (e_node) list_add_tail(&e_node->list, &audio->free_event_queue); else { - pr_err("%s[%p]:event pkt alloc failed\n", + pr_err("%s[%pK]:event pkt alloc failed\n", __func__, audio); rc = -ENOMEM; goto cleanup; @@ -1358,7 +1357,7 @@ int audio_aio_open(struct q6audio_aio *audio, struct file *file) rc = -ENOMEM; goto cleanup; } - pr_debug("Ion client create in audio_aio_open %p", audio->client); + pr_debug("Ion client create in audio_aio_open %pK", audio->client); rc = register_volume_listener(audio); if (rc < 0) @@ -1392,11 +1391,11 @@ static long audio_aio_shared_ioctl(struct file *file, unsigned int cmd, break; } case AUDIO_OUTPORT_FLUSH: { - pr_debug("%s[%p]:AUDIO_OUTPORT_FLUSH\n", __func__, audio); + pr_debug("%s[%pK]:AUDIO_OUTPORT_FLUSH\n", __func__, audio); mutex_lock(&audio->read_lock); rc = audio_aio_outport_flush(audio); if (rc < 0) { - pr_err("%s[%p]: AUDIO_OUTPORT_FLUSH failed\n", + pr_err("%s[%pK]: AUDIO_OUTPORT_FLUSH failed\n", __func__, audio); rc = -EINTR; } @@ -1404,13 +1403,13 @@ static long audio_aio_shared_ioctl(struct file *file, unsigned int cmd, break; } case AUDIO_STOP: { - pr_debug("%s[%p]: AUDIO_STOP session_id[%d]\n", __func__, + pr_debug("%s[%pK]: AUDIO_STOP session_id[%d]\n", __func__, audio, audio->ac->session); mutex_lock(&audio->lock); audio->stopped = 1; rc = audio_aio_flush(audio); if (rc < 0) { - pr_err("%s[%p]:Audio Stop procedure failed rc=%d\n", + pr_err("%s[%pK]:Audio Stop procedure failed rc=%d\n", __func__, audio, rc); mutex_unlock(&audio->lock); break; @@ -1418,7 +1417,7 @@ static long audio_aio_shared_ioctl(struct file *file, unsigned int cmd, audio->enabled = 0; audio->drv_status &= ~ADRV_STATUS_PAUSE; if (audio->drv_status & ADRV_STATUS_FSYNC) { - pr_debug("%s[%p] Waking up the audio_aio_fsync\n", + pr_debug("%s[%pK] Waking up the audio_aio_fsync\n", __func__, audio); wake_up(&audio->write_wait); } @@ -1426,12 +1425,12 @@ static long audio_aio_shared_ioctl(struct file *file, unsigned int cmd, break; } case AUDIO_PAUSE: { - pr_debug("%s[%p]:AUDIO_PAUSE %ld\n", __func__, audio, arg); + pr_debug("%s[%pK]:AUDIO_PAUSE %ld\n", __func__, audio, arg); mutex_lock(&audio->lock); if (arg == 1) { rc = audio_aio_pause(audio); if (rc < 0) { - pr_err("%s[%p]: pause FAILED rc=%d\n", + pr_err("%s[%pK]: pause FAILED rc=%d\n", __func__, audio, rc); mutex_unlock(&audio->lock); break; @@ -1441,7 +1440,7 @@ static long audio_aio_shared_ioctl(struct file *file, unsigned int cmd, if (audio->drv_status & ADRV_STATUS_PAUSE) { rc = audio_aio_enable(audio); if (rc) - pr_err("%s[%p]: audio enable failed\n", + pr_err("%s[%pK]: audio enable failed\n", __func__, audio); else { audio->drv_status &= ~ADRV_STATUS_PAUSE; @@ -1453,13 +1452,13 @@ static long audio_aio_shared_ioctl(struct file *file, unsigned int cmd, break; } case AUDIO_FLUSH: { - pr_debug("%s[%p]: AUDIO_FLUSH sessionid[%d]\n", __func__, + pr_debug("%s[%pK]: AUDIO_FLUSH sessionid[%d]\n", __func__, audio, audio->ac->session); mutex_lock(&audio->lock); audio->rflush = 1; audio->wflush = 1; if (audio->drv_status & ADRV_STATUS_FSYNC) { - pr_debug("%s[%p] Waking up the audio_aio_fsync\n", + pr_debug("%s[%pK] Waking up the audio_aio_fsync\n", __func__, audio); wake_up(&audio->write_wait); } @@ -1468,7 +1467,7 @@ static long audio_aio_shared_ioctl(struct file *file, unsigned int cmd, /* Flush input / Output buffer in software*/ audio_aio_ioport_reset(audio); if (rc < 0) { - pr_err("%s[%p]:AUDIO_FLUSH interrupted\n", + pr_err("%s[%pK]:AUDIO_FLUSH interrupted\n", __func__, audio); rc = -EINTR; } else { @@ -1498,12 +1497,12 @@ static long audio_aio_shared_ioctl(struct file *file, unsigned int cmd, case AUDIO_PM_AWAKE: { if ((audio->audio_ws_mgr == NULL) || (audio->miscdevice == NULL)) { - pr_err("%s[%p]: invalid ws_mgr or miscdevice", + pr_err("%s[%pK]: invalid ws_mgr or miscdevice", __func__, audio); rc = -EACCES; break; } - pr_debug("%s[%p]:AUDIO_PM_AWAKE\n", __func__, audio); + pr_debug("%s[%pK]:AUDIO_PM_AWAKE\n", __func__, audio); mutex_lock(&audio->lock); if (!audio->wakelock_voted) { audio->wakelock_voted = true; @@ -1518,12 +1517,12 @@ static long audio_aio_shared_ioctl(struct file *file, unsigned int cmd, case AUDIO_PM_RELAX: { if ((audio->audio_ws_mgr == NULL) || (audio->miscdevice == NULL)) { - pr_err("%s[%p]: invalid ws_mgr or miscdevice", + pr_err("%s[%pK]: invalid ws_mgr or miscdevice", __func__, audio); rc = -EACCES; break; } - pr_debug("%s[%p]:AUDIO_PM_RELAX\n", __func__, audio); + pr_debug("%s[%pK]:AUDIO_PM_RELAX\n", __func__, audio); mutex_lock(&audio->lock); if (audio->wakelock_voted) { audio->wakelock_voted = false; @@ -1582,7 +1581,7 @@ static long audio_aio_ioctl(struct file *file, unsigned int cmd, break; } case AUDIO_GET_EVENT: { - pr_debug("%s[%p]:AUDIO_GET_EVENT\n", __func__, audio); + pr_debug("%s[%pK]:AUDIO_GET_EVENT\n", __func__, audio); if (mutex_trylock(&audio->get_event_lock)) { rc = audio_aio_process_event_req(audio, (void __user *)arg); @@ -1622,7 +1621,7 @@ static long audio_aio_ioctl(struct file *file, unsigned int cmd, memset(&cfg, 0, sizeof(cfg)); cfg.buffer_size = audio->str_cfg.buffer_size; cfg.buffer_count = audio->str_cfg.buffer_count; - pr_debug("%s[%p]:GET STREAM CFG %d %d\n", + pr_debug("%s[%pK]:GET STREAM CFG %d %d\n", __func__, audio, cfg.buffer_size, cfg.buffer_count); if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) { pr_err( @@ -1635,7 +1634,7 @@ static long audio_aio_ioctl(struct file *file, unsigned int cmd, } case AUDIO_SET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; - pr_debug("%s[%p]:SET STREAM CONFIG\n", __func__, audio); + pr_debug("%s[%pK]:SET STREAM CONFIG\n", __func__, audio); mutex_lock(&audio->lock); if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { pr_err( @@ -1665,7 +1664,7 @@ static long audio_aio_ioctl(struct file *file, unsigned int cmd, } case AUDIO_SET_CONFIG: { struct msm_audio_config config; - pr_err("%s[%p]:AUDIO_SET_CONFIG\n", __func__, audio); + pr_err("%s[%pK]:AUDIO_SET_CONFIG\n", __func__, audio); mutex_lock(&audio->lock); if (copy_from_user(&config, (void *)arg, sizeof(config))) { pr_err( @@ -1676,7 +1675,7 @@ static long audio_aio_ioctl(struct file *file, unsigned int cmd, break; } if (audio->feedback != NON_TUNNEL_MODE) { - pr_err("%s[%p]:Not sufficient permission to change the playback mode\n", + pr_err("%s[%pK]:Not sufficient permission to change the playback mode\n", __func__, audio); rc = -EACCES; mutex_unlock(&audio->lock); @@ -1716,14 +1715,14 @@ static long audio_aio_ioctl(struct file *file, unsigned int cmd, } audio->buf_cfg.meta_info_enable = cfg.meta_info_enable; - pr_debug("%s[%p]:session id %d: Set-buf-cfg: meta[%d]", + pr_debug("%s[%pK]:session id %d: Set-buf-cfg: meta[%d]", __func__, audio, audio->ac->session, cfg.meta_info_enable); mutex_unlock(&audio->lock); break; } case AUDIO_GET_BUF_CFG: { - pr_debug("%s[%p]:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n", + pr_debug("%s[%pK]:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n", __func__, audio, audio->ac->session, audio->buf_cfg.meta_info_enable, audio->buf_cfg.frames_per_buf); @@ -1741,7 +1740,7 @@ static long audio_aio_ioctl(struct file *file, unsigned int cmd, } case AUDIO_REGISTER_ION: { struct msm_audio_ion_info info; - pr_debug("%s[%p]:AUDIO_REGISTER_ION\n", __func__, audio); + pr_debug("%s[%pK]:AUDIO_REGISTER_ION\n", __func__, audio); mutex_lock(&audio->lock); if (copy_from_user(&info, (void *)arg, sizeof(info))) { pr_err( @@ -1761,7 +1760,7 @@ static long audio_aio_ioctl(struct file *file, unsigned int cmd, case AUDIO_DEREGISTER_ION: { struct msm_audio_ion_info info; mutex_lock(&audio->lock); - pr_debug("%s[%p]:AUDIO_DEREGISTER_ION\n", __func__, audio); + pr_debug("%s[%pK]:AUDIO_DEREGISTER_ION\n", __func__, audio); if (copy_from_user(&info, (void *)arg, sizeof(info))) { pr_err( "%s: copy_from_user for AUDIO_DEREGISTER_ION failed\n", @@ -1881,7 +1880,7 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd, break; } case AUDIO_GET_EVENT_32: { - pr_debug("%s[%p]:AUDIO_GET_EVENT\n", __func__, audio); + pr_debug("%s[%pK]:AUDIO_GET_EVENT\n", __func__, audio); if (mutex_trylock(&audio->get_event_lock)) { rc = audio_aio_process_event_req_compat(audio, (void __user *)arg); @@ -1921,7 +1920,7 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd, memset(&cfg, 0, sizeof(cfg)); cfg.buffer_size = audio->str_cfg.buffer_size; cfg.buffer_count = audio->str_cfg.buffer_count; - pr_debug("%s[%p]:GET STREAM CFG %d %d\n", + pr_debug("%s[%pK]:GET STREAM CFG %d %d\n", __func__, audio, cfg.buffer_size, cfg.buffer_count); if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) { pr_err("%s: copy_to_user for AUDIO_GET_STREAM_CONFIG_32 failed\n", @@ -1934,7 +1933,7 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd, case AUDIO_SET_STREAM_CONFIG_32: { struct msm_audio_stream_config32 cfg_32; struct msm_audio_stream_config cfg; - pr_debug("%s[%p]:SET STREAM CONFIG\n", __func__, audio); + pr_debug("%s[%pK]:SET STREAM CONFIG\n", __func__, audio); mutex_lock(&audio->lock); if (copy_from_user(&cfg_32, (void *)arg, sizeof(cfg_32))) { pr_err("%s: copy_from_user for AUDIO_SET_STREAM_CONFIG_32 failed\n", @@ -1978,13 +1977,13 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd, mutex_lock(&audio->lock); if (audio->feedback != NON_TUNNEL_MODE) { - pr_err("%s[%p]:Not sufficient permission to change the playback mode\n", + pr_err("%s[%pK]:Not sufficient permission to change the playback mode\n", __func__, audio); rc = -EACCES; mutex_unlock(&audio->lock); break; } - pr_err("%s[%p]:AUDIO_SET_CONFIG\n", __func__, audio); + pr_err("%s[%pK]:AUDIO_SET_CONFIG\n", __func__, audio); if (copy_from_user(&config_32, (void *)arg, sizeof(config_32))) { pr_err("%s: copy_from_user for AUDIO_SET_CONFIG_32 failed\n", @@ -2038,7 +2037,7 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd, } audio->buf_cfg.meta_info_enable = cfg.meta_info_enable; - pr_debug("%s[%p]:session id %d: Set-buf-cfg: meta[%d]", + pr_debug("%s[%pK]:session id %d: Set-buf-cfg: meta[%d]", __func__, audio, audio->ac->session, cfg.meta_info_enable); mutex_unlock(&audio->lock); @@ -2046,7 +2045,7 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd, } case AUDIO_GET_BUF_CFG_32: { struct msm_audio_buf_cfg32 cfg_32; - pr_debug("%s[%p]:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n", + pr_debug("%s[%pK]:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n", __func__, audio, audio->ac->session, audio->buf_cfg.meta_info_enable, audio->buf_cfg.frames_per_buf); @@ -2067,7 +2066,7 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd, case AUDIO_REGISTER_ION_32: { struct msm_audio_ion_info32 info_32; struct msm_audio_ion_info info; - pr_debug("%s[%p]:AUDIO_REGISTER_ION\n", __func__, audio); + pr_debug("%s[%pK]:AUDIO_REGISTER_ION\n", __func__, audio); mutex_lock(&audio->lock); if (copy_from_user(&info_32, (void *)arg, sizeof(info_32))) { pr_err("%s: copy_from_user for AUDIO_REGISTER_ION_32 failed\n", @@ -2089,7 +2088,7 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd, struct msm_audio_ion_info32 info_32; struct msm_audio_ion_info info; mutex_lock(&audio->lock); - pr_debug("%s[%p]:AUDIO_DEREGISTER_ION\n", __func__, audio); + pr_debug("%s[%pK]:AUDIO_DEREGISTER_ION\n", __func__, audio); if (copy_from_user(&info_32, (void *)arg, sizeof(info_32))) { pr_err("%s: copy_from_user for AUDIO_DEREGISTER_ION_32 failed\n", __func__); diff --git a/drivers/misc/qcom/qdsp6v2/audio_wma.c b/drivers/misc/qcom/qdsp6v2/audio_wma.c index 74f678da925a..b7dfdf23bec7 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_wma.c +++ b/drivers/misc/qcom/qdsp6v2/audio_wma.c @@ -40,7 +40,7 @@ static long audio_ioctl_shared(struct file *file, unsigned int cmd, case AUDIO_START: { struct asm_wma_cfg wma_cfg; struct msm_audio_wma_config_v2 *wma_config; - pr_debug("%s[%p]: AUDIO_START session_id[%d]\n", __func__, + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, audio, audio->ac->session); if (audio->feedback == NON_TUNNEL_MODE) { /* Configure PCM output block */ @@ -122,7 +122,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } default: { - pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_ioctl(file, cmd, arg); if (rc) pr_err("Failed in utils_ioctl: %d\n", rc); @@ -211,7 +211,7 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd, break; } default: { - pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_compat_ioctl(file, cmd, arg); if (rc) pr_err("Failed in utils_ioctl: %d\n", rc); diff --git a/drivers/misc/qcom/qdsp6v2/audio_wmapro.c b/drivers/misc/qcom/qdsp6v2/audio_wmapro.c index 21ad33b7fd5d..d37a5789391c 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_wmapro.c +++ b/drivers/misc/qcom/qdsp6v2/audio_wmapro.c @@ -173,7 +173,7 @@ static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } default: { - pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_ioctl(file, cmd, arg); if (rc) pr_err("Failed in utils_ioctl: %d\n", rc); @@ -283,7 +283,7 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd, break; } default: { - pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_compat_ioctl(file, cmd, arg); if (rc) pr_err("Failed in utils_ioctl: %d\n", rc); diff --git a/drivers/misc/qcom/qdsp6v2/q6audio_v2_aio.c b/drivers/misc/qcom/qdsp6v2/q6audio_v2_aio.c index 6e82c8051886..09b83f354406 100644 --- a/drivers/misc/qcom/qdsp6v2/q6audio_v2_aio.c +++ b/drivers/misc/qcom/qdsp6v2/q6audio_v2_aio.c @@ -54,18 +54,18 @@ void audio_aio_cb(uint32_t opcode, uint32_t token, switch (opcode) { case ASM_DATA_EVENT_WRITE_DONE_V2: - pr_debug("%s[%p]:ASM_DATA_EVENT_WRITE_DONE token = 0x%x\n", + pr_debug("%s[%pK]:ASM_DATA_EVENT_WRITE_DONE token = 0x%x\n", __func__, audio, token); audio_aio_async_write_ack(audio, token, payload); break; case ASM_DATA_EVENT_READ_DONE_V2: - pr_debug("%s[%p]:ASM_DATA_EVENT_READ_DONE token = 0x%x\n", + pr_debug("%s[%pK]:ASM_DATA_EVENT_READ_DONE token = 0x%x\n", __func__, audio, token); audio_aio_async_read_ack(audio, token, payload); break; case ASM_DATA_EVENT_RENDERED_EOS: /* EOS Handle */ - pr_debug("%s[%p]:ASM_DATA_CMDRSP_EOS\n", __func__, audio); + pr_debug("%s[%pK]:ASM_DATA_CMDRSP_EOS\n", __func__, audio); if (audio->feedback) { /* Non-Tunnel mode */ audio->eos_rsp = 1; /* propagate input EOS i/p buffer, @@ -87,16 +87,16 @@ void audio_aio_cb(uint32_t opcode, uint32_t token, break; case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2: case ASM_STREAM_CMD_SET_ENCDEC_PARAM: - pr_debug("%s[%p]:payload0[%x] payloa1d[%x]opcode= 0x%x\n", + pr_debug("%s[%pK]:payload0[%x] payloa1d[%x]opcode= 0x%x\n", __func__, audio, payload[0], payload[1], opcode); break; case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY: case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY: - pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0]-sr = %d, payload[1]-chl = %d, payload[2] = %d, payload[3] = %d\n", + pr_debug("%s[%pK]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0]-sr = %d, payload[1]-chl = %d, payload[2] = %d, payload[3] = %d\n", __func__, audio, payload[0], payload[1], payload[2], payload[3]); - pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, sr(prev) = %d, chl(prev) = %d,", + pr_debug("%s[%pK]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, sr(prev) = %d, chl(prev) = %d,", __func__, audio, audio->pcm_cfg.sample_rate, audio->pcm_cfg.channel_count); @@ -130,7 +130,7 @@ void extract_meta_out_info(struct q6audio_aio *audio, else memset(&buf_node->meta_info.meta_in, 0, sizeof(struct dec_meta_in)); - pr_debug("%s[%p]:i/p: msw_ts 0x%d lsw_ts 0x%d nflags 0x%8x\n", + pr_debug("%s[%pK]:i/p: msw_ts %d lsw_ts %d nflags 0x%8x\n", __func__, audio, buf_node->meta_info.meta_in.ntimestamp.highpart, buf_node->meta_info.meta_in.ntimestamp.lowpart, @@ -145,7 +145,7 @@ void extract_meta_out_info(struct q6audio_aio *audio, meta_data->meta_out_dsp[0].lsw_ts; meta_data->meta_out_dsp[0].lsw_ts = temp; - pr_debug("%s[%p]:o/p: msw_ts 0x%d lsw_ts 0x%d nflags 0x%8x, num_frames = %d\n", + pr_debug("%s[%pK]:o/p: msw_ts %d lsw_ts %d nflags 0x%8x, num_frames = %d\n", __func__, audio, ((struct dec_meta_out *)buf_node->kvaddr)->\ meta_out_dsp[0].msw_ts, @@ -201,7 +201,7 @@ void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token, = payload[9]; event_payload.aio_buf.data_len = payload[4]\ + payload[5] + sizeof(struct dec_meta_out); - pr_debug("%s[%p]:nr of frames 0x%8x len=%d\n", + pr_debug("%s[%pK]:nr of frames 0x%8x len=%d\n", __func__, audio, filled_buf->meta_info.meta_out.num_of_frames, event_payload.aio_buf.data_len); @@ -213,7 +213,7 @@ void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token, event_payload); kfree(filled_buf); } else { - pr_err("%s[%p]:expected=%x ret=%x\n", + pr_err("%s[%pK]:expected=%x ret=%x\n", __func__, audio, filled_buf->token, token); spin_unlock_irqrestore(&audio->dsp_lock, flags); } diff --git a/drivers/misc/qcom/qdsp6v2/ultrasound/q6usm.c b/drivers/misc/qcom/qdsp6v2/ultrasound/q6usm.c index 30274fd4b725..334e705ca8f1 100644 --- a/drivers/misc/qcom/qdsp6v2/ultrasound/q6usm.c +++ b/drivers/misc/qcom/qdsp6v2/ultrasound/q6usm.c @@ -208,7 +208,7 @@ static int q6usm_us_client_buf_free(unsigned int dir, rc = q6usm_memory_unmap(port->phys, dir, usc->session, *((uint32_t *)port->ext)); - pr_debug("%s: data[%p]phys[%llx][%p]\n", __func__, + pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__, (void *)port->data, (u64)port->phys, (void *)&port->phys); msm_audio_ion_free(port->client, port->handle); @@ -248,7 +248,7 @@ int q6usm_us_param_buf_free(unsigned int dir, rc = q6usm_memory_unmap(port->param_phys, dir, usc->session, *((uint32_t *)port->param_buf_mem_handle)); - pr_debug("%s: data[%p]phys[%llx][%p]\n", __func__, + pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__, (void *)port->param_buf, (u64)port->param_phys, (void *)&port->param_phys); @@ -362,7 +362,7 @@ struct us_client *q6usm_us_client_alloc( spin_lock_init(&usc->port[lcnt].dsp_lock); usc->port[lcnt].ext = (void *)p_mem_handle++; usc->port[lcnt].param_buf_mem_handle = (void *)p_mem_handle++; - pr_err("%s: usc->port[%d].ext=%p;\n", + pr_err("%s: usc->port[%d].ext=%pK;\n", __func__, lcnt, usc->port[lcnt].ext); } atomic_set(&usc->cmd_state, 0); @@ -417,7 +417,7 @@ int q6usm_us_client_buf_alloc(unsigned int dir, port->buf_cnt = bufcnt; port->buf_size = bufsz; - pr_debug("%s: data[%p]; phys[%llx]; [%p]\n", __func__, + pr_debug("%s: data[%pK]; phys[%llx]; [%pK]\n", __func__, (void *)port->data, (u64)port->phys, (void *)&port->phys); @@ -482,7 +482,7 @@ int q6usm_us_param_buf_alloc(unsigned int dir, } port->param_buf_size = bufsz; - pr_debug("%s: param_buf[%p]; param_phys[%llx]; [%p]\n", __func__, + pr_debug("%s: param_buf[%pK]; param_phys[%llx]; [%pK]\n", __func__, (void *)port->param_buf, (u64)port->param_phys, (void *)&port->param_phys); @@ -1335,7 +1335,7 @@ int q6usm_set_us_detection(struct us_client *usc, if ((usc == NULL) || (detect_info_size == 0) || (detect_info == NULL)) { - pr_err("%s: wrong input: usc=0x%p, inf_size=%d; info=0x%p", + pr_err("%s: wrong input: usc=0x%pK, inf_size=%d; info=0x%pK", __func__, usc, detect_info_size, diff --git a/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c b/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c index 7572374cc524..3bb95f50bc13 100644 --- a/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c +++ b/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c @@ -23,6 +23,7 @@ #include <linux/time.h> #include <linux/kmemleak.h> #include <linux/wakelock.h> +#include <linux/mutex.h> #include <sound/apr_audio.h> #include <linux/qdsp6v2/usf.h> #include "q6usm.h" @@ -135,6 +136,8 @@ struct usf_type { uint16_t conflicting_event_filters; /* The requested buttons bitmap */ uint16_t req_buttons_bitmap; + /* Mutex for exclusive operations (all public APIs) */ + struct mutex mutex; }; struct usf_input_dev_type { @@ -1403,9 +1406,22 @@ static int __usf_set_stream_param(struct usf_xx_type *usf_xx, int dir) { struct us_client *usc = usf_xx->usc; - struct us_port_data *port = &usc->port[dir]; + struct us_port_data *port; int rc = 0; + if (usc == NULL) { + pr_err("%s: usc is null\n", + __func__); + return -EFAULT; + } + + port = &usc->port[dir]; + if (port == NULL) { + pr_err("%s: port is null\n", + __func__); + return -EFAULT; + } + if (port->param_buf == NULL) { pr_err("%s: parameter buffer is null\n", __func__); @@ -1538,10 +1554,12 @@ static int usf_get_stream_param(struct usf_xx_type *usf_xx, return __usf_get_stream_param(usf_xx, &get_stream_param, dir); } /* usf_get_stream_param */ -static long usf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static long __usf_ioctl(struct usf_type *usf, + unsigned int cmd, + unsigned long arg) { + int rc = 0; - struct usf_type *usf = file->private_data; struct usf_xx_type *usf_xx = NULL; switch (cmd) { @@ -1704,6 +1722,18 @@ static long usf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) release_xx(usf_xx); return rc; +} /* __usf_ioctl */ + +static long usf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct usf_type *usf = file->private_data; + int rc = 0; + + mutex_lock(&usf->mutex); + rc = __usf_ioctl(usf, cmd, arg); + mutex_unlock(&usf->mutex); + + return rc; } /* usf_ioctl */ #ifdef CONFIG_COMPAT @@ -2147,12 +2177,11 @@ static int usf_get_stream_param32(struct usf_xx_type *usf_xx, return __usf_get_stream_param(usf_xx, &get_stream_param, dir); } /* usf_get_stream_param32 */ -static long usf_compat_ioctl(struct file *file, +static long __usf_compat_ioctl(struct usf_type *usf, unsigned int cmd, unsigned long arg) { int rc = 0; - struct usf_type *usf = file->private_data; struct usf_xx_type *usf_xx = NULL; switch (cmd) { @@ -2160,7 +2189,7 @@ static long usf_compat_ioctl(struct file *file, case US_START_RX: case US_STOP_TX: case US_STOP_RX: { - return usf_ioctl(file, cmd, arg); + return __usf_ioctl(usf, cmd, arg); } case US_SET_TX_INFO32: { @@ -2269,6 +2298,20 @@ static long usf_compat_ioctl(struct file *file, release_xx(usf_xx); return rc; +} /* __usf_compat_ioctl */ + +static long usf_compat_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + struct usf_type *usf = file->private_data; + int rc = 0; + + mutex_lock(&usf->mutex); + rc = __usf_compat_ioctl(usf, cmd, arg); + mutex_unlock(&usf->mutex); + + return rc; } /* usf_compat_ioctl */ #endif /* CONFIG_COMPAT */ @@ -2277,13 +2320,17 @@ static int usf_mmap(struct file *file, struct vm_area_struct *vms) struct usf_type *usf = file->private_data; int dir = OUT; struct usf_xx_type *usf_xx = &usf->usf_tx; + int rc = 0; + mutex_lock(&usf->mutex); if (vms->vm_flags & USF_VM_WRITE) { /* RX buf mapping */ dir = IN; usf_xx = &usf->usf_rx; } + rc = q6usm_get_virtual_address(dir, usf_xx->usc, vms); + mutex_unlock(&usf->mutex); - return q6usm_get_virtual_address(dir, usf_xx->usc, vms); + return rc; } static uint16_t add_opened_dev(int minor) @@ -2336,6 +2383,8 @@ static int usf_open(struct inode *inode, struct file *file) usf->usf_tx.us_detect_type = USF_US_DETECT_UNDEF; usf->usf_rx.us_detect_type = USF_US_DETECT_UNDEF; + mutex_init(&usf->mutex); + pr_debug("%s:usf in open\n", __func__); return 0; } @@ -2346,6 +2395,7 @@ static int usf_release(struct inode *inode, struct file *file) pr_debug("%s: release entry\n", __func__); + mutex_lock(&usf->mutex); usf_release_input(usf); usf_disable(&usf->usf_tx); @@ -2354,6 +2404,8 @@ static int usf_release(struct inode *inode, struct file *file) s_opened_devs[usf->dev_ind] = 0; wakeup_source_trash(&usf_wakeup_source); + mutex_unlock(&usf->mutex); + mutex_destroy(&usf->mutex); kfree(usf); pr_debug("%s: release exit\n", __func__); return 0; diff --git a/drivers/misc/qcom/qdsp6v2/ultrasound/usfcdev.c b/drivers/misc/qcom/qdsp6v2/ultrasound/usfcdev.c index 76bcc83e1c5e..a4d63f0c0d1a 100644 --- a/drivers/misc/qcom/qdsp6v2/ultrasound/usfcdev.c +++ b/drivers/misc/qcom/qdsp6v2/ultrasound/usfcdev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2013, 2016 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -170,7 +170,7 @@ static int usfcdev_connect(struct input_handler *handler, struct input_dev *dev, } usfc_handle->dev = dev; ret = input_register_handle(usfc_handle); - pr_debug("%s: name=[%s]; ind=%d; dev=0x%p\n", + pr_debug("%s: name=[%s]; ind=%d; dev=0x%pK\n", __func__, dev->name, ind, @@ -259,7 +259,7 @@ bool usfcdev_register( bool rc = false; if ((event_type_ind >= MAX_EVENT_TYPE_NUM) || !match_cb) { - pr_err("%s: wrong input: event_type_ind=%d; match_cb=0x%p\n", + pr_err("%s: wrong input: event_type_ind=%d; match_cb=0x%pK\n", __func__, event_type_ind, match_cb); diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 862d72cb86cf..ff838ebefba6 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -1065,6 +1065,10 @@ static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc, } /* Populate the structure for sending scm call to load image */ svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle); + if (IS_ERR_OR_NULL(svc->sb_virt)) { + pr_err("ION memory mapping for listener shared buffer failed\n"); + return -ENOMEM; + } svc->sb_phys = (phys_addr_t)pa; if (qseecom.qsee_version < QSEE_VERSION_40) { @@ -1522,6 +1526,10 @@ static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data, /* Populate the structure for sending scm call to load image */ data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, data->client.ihandle); + if (IS_ERR_OR_NULL(data->client.sb_virt)) { + pr_err("ION memory mapping for client shared buf failed\n"); + return -ENOMEM; + } data->client.sb_phys = (phys_addr_t)pa; data->client.sb_length = req.sb_len; data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base; @@ -4203,6 +4211,11 @@ int qseecom_start_app(struct qseecom_handle **handle, /* Populate the structure for sending scm call to load image */ data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, data->client.ihandle); + if (IS_ERR_OR_NULL(data->client.sb_virt)) { + pr_err("ION memory mapping for client shared buf failed\n"); + ret = -ENOMEM; + goto err; + } data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt; data->client.sb_phys = (phys_addr_t)pa; (*handle)->dev = (void *)data; diff --git a/drivers/misc/uid_stat.c b/drivers/misc/uid_stat.c deleted file mode 100644 index 185c69c9738a..000000000000 --- a/drivers/misc/uid_stat.c +++ /dev/null @@ -1,153 +0,0 @@ -/* drivers/misc/uid_stat.c - * - * Copyright (C) 2008 - 2009 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include <linux/atomic.h> - -#include <linux/err.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/list.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/stat.h> -#include <linux/uid_stat.h> -#include <net/activity_stats.h> - -static DEFINE_SPINLOCK(uid_lock); -static LIST_HEAD(uid_list); -static struct proc_dir_entry *parent; - -struct uid_stat { - struct list_head link; - uid_t uid; - atomic_t tcp_rcv; - atomic_t tcp_snd; -}; - -static struct uid_stat *find_uid_stat(uid_t uid) { - struct uid_stat *entry; - - list_for_each_entry(entry, &uid_list, link) { - if (entry->uid == uid) { - return entry; - } - } - return NULL; -} - -static int uid_stat_atomic_int_show(struct seq_file *m, void *v) -{ - unsigned int bytes; - atomic_t *counter = m->private; - - bytes = (unsigned int) (atomic_read(counter) + INT_MIN); - seq_printf(m, "%u\n", bytes); - return seq_has_overflowed(m) ? -ENOSPC : 0; -} - -static int uid_stat_read_atomic_int_open(struct inode *inode, struct file *file) -{ - return single_open(file, uid_stat_atomic_int_show, PDE_DATA(inode)); -} - -static const struct file_operations uid_stat_read_atomic_int_fops = { - .open = uid_stat_read_atomic_int_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* Create a new entry for tracking the specified uid. */ -static struct uid_stat *create_stat(uid_t uid) { - struct uid_stat *new_uid; - /* Create the uid stat struct and append it to the list. */ - new_uid = kmalloc(sizeof(struct uid_stat), GFP_ATOMIC); - if (!new_uid) - return NULL; - - new_uid->uid = uid; - /* Counters start at INT_MIN, so we can track 4GB of network traffic. */ - atomic_set(&new_uid->tcp_rcv, INT_MIN); - atomic_set(&new_uid->tcp_snd, INT_MIN); - - list_add_tail(&new_uid->link, &uid_list); - return new_uid; -} - -static void create_stat_proc(struct uid_stat *new_uid) -{ - char uid_s[32]; - struct proc_dir_entry *entry; - sprintf(uid_s, "%d", new_uid->uid); - entry = proc_mkdir(uid_s, parent); - - /* Keep reference to uid_stat so we know what uid to read stats from. */ - proc_create_data("tcp_snd", S_IRUGO, entry, - &uid_stat_read_atomic_int_fops, &new_uid->tcp_snd); - - proc_create_data("tcp_rcv", S_IRUGO, entry, - &uid_stat_read_atomic_int_fops, &new_uid->tcp_rcv); -} - -static struct uid_stat *find_or_create_uid_stat(uid_t uid) -{ - struct uid_stat *entry; - unsigned long flags; - spin_lock_irqsave(&uid_lock, flags); - entry = find_uid_stat(uid); - if (entry) { - spin_unlock_irqrestore(&uid_lock, flags); - return entry; - } - entry = create_stat(uid); - spin_unlock_irqrestore(&uid_lock, flags); - if (entry) - create_stat_proc(entry); - return entry; -} - -int uid_stat_tcp_snd(uid_t uid, int size) { - struct uid_stat *entry; - activity_stats_update(); - entry = find_or_create_uid_stat(uid); - if (!entry) - return -1; - atomic_add(size, &entry->tcp_snd); - return 0; -} - -int uid_stat_tcp_rcv(uid_t uid, int size) { - struct uid_stat *entry; - activity_stats_update(); - entry = find_or_create_uid_stat(uid); - if (!entry) - return -1; - atomic_add(size, &entry->tcp_rcv); - return 0; -} - -static int __init uid_stat_init(void) -{ - parent = proc_mkdir("uid_stat", NULL); - if (!parent) { - pr_err("uid_stat: failed to create proc entry\n"); - return -1; - } - return 0; -} - -__initcall(uid_stat_init); diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 7547463928d6..6eee4aa0e574 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -4407,11 +4407,12 @@ static const struct mmc_fixup blk_fixups[] = add_quirk_mmc, MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD), /* - * Some Micron MMC cards needs longer data read timeout than - * indicated in CSD. + * Some MMC cards need longer data read timeout than indicated in CSD. */ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, MMC_QUIRK_LONG_READ_TIME), + MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_LONG_READ_TIME), /* * Some Samsung MMC cards need longer data read timeout than diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 3762f698e1ee..828d2b85f6e4 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1970,7 +1970,7 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) /* * Some cards require longer data read timeout than indicated in CSD. * Address this by setting the read timeout to a "reasonably high" - * value. For the cards tested, 300ms has proven enough. If necessary, + * value. For the cards tested, 600ms has proven enough. If necessary, * this value can be increased if other problematic cards require this. * Certain Hynix 5.x cards giving read timeout even with 300ms. * Increasing further to max value (4s). diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 5489f243a682..89288bd1eaa4 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -348,6 +348,9 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd) } } +/* Minimum partition switch timeout in milliseconds */ +#define MMC_MIN_PART_SWITCH_TIME 300 + /* * Decode extended CSD. */ @@ -412,6 +415,10 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) /* EXT_CSD value is in units of 10ms, but we store in ms */ card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; + /* Some eMMC set the value too low so set a minimum */ + if (card->ext_csd.part_time && + card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME) + card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME; /* Sleep / awake timeout in 100ns units */ if (sa_shift > 0 && sa_shift <= 0x17) diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index a5cda926d38e..8aea3fa6938b 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -233,7 +233,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { .chip = &sdhci_acpi_chip_int, .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | - MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY, + MMC_CAP_WAIT_WHILE_BUSY, .caps2 = MMC_CAP2_HC_ERASE_SZ, .flags = SDHCI_ACPI_RUNTIME_PM, .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, @@ -248,7 +248,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD | - MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY, + MMC_CAP_WAIT_WHILE_BUSY, .flags = SDHCI_ACPI_RUNTIME_PM, .pm_caps = MMC_PM_KEEP_POWER, .probe_slot = sdhci_acpi_sdio_probe_slot, @@ -260,7 +260,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | SDHCI_QUIRK2_STOP_WITH_TC, - .caps = MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY, + .caps = MMC_CAP_WAIT_WHILE_BUSY, .probe_slot = sdhci_acpi_sd_probe_slot, }; diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 610154836d79..5ebe6eb6b89e 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -361,7 +361,6 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | - MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY; slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ; slot->hw_reset = sdhci_pci_int_hw_reset; @@ -377,15 +376,13 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | - MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY; return 0; } static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) { - slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST | - MMC_CAP_WAIT_WHILE_BUSY; + slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; slot->cd_con_id = NULL; slot->cd_idx = 0; slot->cd_override_level = true; diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 5b9834cf2820..4dd0391d2942 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -426,8 +426,25 @@ retry: pnum, vol_id, lnum); err = -EBADMSG; } else { - err = -EINVAL; - ubi_ro_mode(ubi); + /* + * Ending up here in the non-Fastmap case + * is a clear bug as the VID header had to + * be present at scan time to have it referenced. + * With fastmap the story is more complicated. + * Fastmap has the mapping info without the need + * of a full scan. So the LEB could have been + * unmapped, Fastmap cannot know this and keeps + * the LEB referenced. + * This is valid and works as the layer above UBI + * has to do bookkeeping about used/referenced + * LEBs in any case. + */ + if (ubi->fast_attach) { + err = -EBADMSG; + } else { + err = -EINVAL; + ubi_ro_mode(ubi); + } } } goto out_free; @@ -558,6 +575,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; struct ubi_volume *vol = ubi->volumes[idx]; struct ubi_vid_hdr *vid_hdr; + uint32_t crc; vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); if (!vid_hdr) @@ -582,14 +600,8 @@ retry: goto out_put; } - vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); - err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); - if (err) { - up_read(&ubi->fm_eba_sem); - goto write_error; - } + ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC); - data_size = offset + len; mutex_lock(&ubi->buf_mutex); memset(ubi->peb_buf + offset, 0xFF, len); @@ -604,6 +616,19 @@ retry: memcpy(ubi->peb_buf + offset, buf, len); + data_size = offset + len; + crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size); + vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); + vid_hdr->copy_flag = 1; + vid_hdr->data_size = cpu_to_be32(data_size); + vid_hdr->data_crc = cpu_to_be32(crc); + err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); + if (err) { + mutex_unlock(&ubi->buf_mutex); + up_read(&ubi->fm_eba_sem); + goto write_error; + } + err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); if (err) { mutex_unlock(&ubi->buf_mutex); diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 263b439e21a8..990898b9dc72 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -1058,6 +1058,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, ubi_msg(ubi, "fastmap WL pool size: %d", ubi->fm_wl_pool.max_size); ubi->fm_disabled = 0; + ubi->fast_attach = 1; ubi_free_vid_hdr(ubi, vh); kfree(ech); diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index fdb1931f66ed..bdb885d9d3fc 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -466,6 +466,7 @@ struct ubi_debug_info { * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes * @fm_work: fastmap work queue * @fm_work_scheduled: non-zero if fastmap work was scheduled + * @fast_attach: non-zero if UBI was attached by fastmap * * @used: RB-tree of used physical eraseblocks * @erroneous: RB-tree of erroneous used physical eraseblocks @@ -574,6 +575,7 @@ struct ubi_device { size_t fm_size; struct work_struct fm_work; int fm_work_scheduled; + int fast_attach; /* Wear-leveling sub-system's stuff */ struct rb_root used; diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 141c2a42d7ed..910c12e2638e 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -696,11 +696,17 @@ int can_change_mtu(struct net_device *dev, int new_mtu) /* allow change of MTU according to the CANFD ability of the device */ switch (new_mtu) { case CAN_MTU: + /* 'CANFD-only' controllers can not switch to CAN_MTU */ + if (priv->ctrlmode_static & CAN_CTRLMODE_FD) + return -EINVAL; + priv->ctrlmode &= ~CAN_CTRLMODE_FD; break; case CANFD_MTU: - if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD)) + /* check for potential CANFD ability */ + if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) && + !(priv->ctrlmode_static & CAN_CTRLMODE_FD)) return -EINVAL; priv->ctrlmode |= CAN_CTRLMODE_FD; @@ -782,6 +788,35 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { = { .len = sizeof(struct can_bittiming_const) }, }; +static int can_validate(struct nlattr *tb[], struct nlattr *data[]) +{ + bool is_can_fd = false; + + /* Make sure that valid CAN FD configurations always consist of + * - nominal/arbitration bittiming + * - data bittiming + * - control mode with CAN_CTRLMODE_FD set + */ + + if (data[IFLA_CAN_CTRLMODE]) { + struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]); + + is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD; + } + + if (is_can_fd) { + if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING]) + return -EOPNOTSUPP; + } + + if (data[IFLA_CAN_DATA_BITTIMING]) { + if (!is_can_fd || !data[IFLA_CAN_BITTIMING]) + return -EOPNOTSUPP; + } + + return 0; +} + static int can_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { @@ -813,19 +848,31 @@ static int can_changelink(struct net_device *dev, if (data[IFLA_CAN_CTRLMODE]) { struct can_ctrlmode *cm; + u32 ctrlstatic; + u32 maskedflags; /* Do not allow changing controller mode while running */ if (dev->flags & IFF_UP) return -EBUSY; cm = nla_data(data[IFLA_CAN_CTRLMODE]); + ctrlstatic = priv->ctrlmode_static; + maskedflags = cm->flags & cm->mask; + + /* check whether provided bits are allowed to be passed */ + if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic)) + return -EOPNOTSUPP; + + /* do not check for static fd-non-iso if 'fd' is disabled */ + if (!(maskedflags & CAN_CTRLMODE_FD)) + ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO; - /* check whether changed bits are allowed to be modified */ - if (cm->mask & ~priv->ctrlmode_supported) + /* make sure static options are provided by configuration */ + if ((maskedflags & ctrlstatic) != ctrlstatic) return -EOPNOTSUPP; /* clear bits to be modified and copy the flag values */ priv->ctrlmode &= ~cm->mask; - priv->ctrlmode |= (cm->flags & cm->mask); + priv->ctrlmode |= maskedflags; /* CAN_CTRLMODE_FD can only be set when driver supports FD */ if (priv->ctrlmode & CAN_CTRLMODE_FD) @@ -966,6 +1013,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = { .maxtype = IFLA_CAN_MAX, .policy = can_policy, .setup = can_setup, + .validate = can_validate, .newlink = can_newlink, .changelink = can_changelink, .get_size = can_get_size, diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 39cf911f7a1e..195f15edb32e 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -955,7 +955,7 @@ static struct net_device *alloc_m_can_dev(void) priv->can.do_get_berr_counter = m_can_get_berr_counter; /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */ - priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO; + can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index bd377a6b067d..df54475d163b 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -86,9 +86,14 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) while (!cur_buf->skb && next != rxq->read_idx) { struct alx_rfd *rfd = &rxq->rfd[cur]; - skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); + skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp); if (!skb) break; + + /* Workround for the HW RX DMA overflow issue */ + if (((unsigned long)skb->data & 0xfff) == 0xfc0) + skb_reserve(skb, 64); + dma = dma_map_single(&alx->hw.pdev->dev, skb->data, alx->rxbuf_size, DMA_FROM_DEVICE); diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 169059c92f80..8d54e7b41bbf 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -2405,9 +2405,9 @@ static int macb_init(struct platform_device *pdev) if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) val = GEM_BIT(RGMII); else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && - (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) val = MACB_BIT(RMII); - else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) val = MACB_BIT(MII); if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) @@ -2738,7 +2738,7 @@ static int at91ether_init(struct platform_device *pdev) } static const struct macb_config at91sam9260_config = { - .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII, + .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .clk_init = macb_clk_init, .init = macb_init, }; @@ -2751,21 +2751,22 @@ static const struct macb_config pc302gem_config = { }; static const struct macb_config sama5d2_config = { - .caps = 0, + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, }; static const struct macb_config sama5d3_config = { - .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, + .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE + | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, }; static const struct macb_config sama5d4_config = { - .caps = 0, + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .dma_burst_length = 4, .clk_init = macb_clk_init, .init = macb_init, diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index d83b0db77821..3f385ab94988 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -398,7 +398,7 @@ /* Capability mask bits */ #define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001 #define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 -#define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004 +#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 2b34622a4bfe..3920c3eb6006 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -4475,7 +4475,7 @@ static int rocker_port_obj_add(struct net_device *dev, fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); err = rocker_port_fib_ipv4(rocker_port, trans, htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, 0); + fib4->fi, fib4->tb_id, 0); break; case SWITCHDEV_OBJ_ID_PORT_FDB: err = rocker_port_fdb_add(rocker_port, trans, @@ -4547,7 +4547,7 @@ static int rocker_port_obj_del(struct net_device *dev, fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); err = rocker_port_fib_ipv4(rocker_port, NULL, htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, + fib4->fi, fib4->tb_id, ROCKER_OP_FLAG_REMOVE); break; case SWITCHDEV_OBJ_ID_PORT_FDB: diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index e6a084a6be12..cbe9a330117a 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -619,6 +619,17 @@ fail: return rc; } +static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + + /* All our existing PIO buffers went away */ + efx_for_each_channel(channel, efx) + efx_for_each_channel_tx_queue(tx_queue, channel) + tx_queue->piobuf = NULL; +} + #else /* !EFX_USE_PIO */ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) @@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx) { } +static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) +{ +} + #endif /* EFX_USE_PIO */ static void efx_ef10_remove(struct efx_nic *efx) @@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) nic_data->must_realloc_vis = true; nic_data->must_restore_filters = true; nic_data->must_restore_piobufs = true; + efx_ef10_forget_old_piobufs(efx); nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; /* Driver-created vswitches and vports must be re-created */ diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 58efdec12f30..69e31e2a68fc 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -310,15 +310,15 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) /* Need Geneve and inner Ethernet header to be present */ if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN))) - goto error; + goto drop; /* Return packets with reserved bits set */ geneveh = geneve_hdr(skb); if (unlikely(geneveh->ver != GENEVE_VER)) - goto error; + goto drop; if (unlikely(geneveh->proto_type != htons(ETH_P_TEB))) - goto error; + goto drop; opts_len = geneveh->opt_len * 4; if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, @@ -336,10 +336,6 @@ drop: /* Consume bad packet */ kfree_skb(skb); return 0; - -error: - /* Let the UDP layer deal with the skb */ - return 1; } static struct socket *geneve_create_sock(struct net *net, bool ipv6, @@ -998,6 +994,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) return geneve_xmit_skb(skb, dev, info); } +static int geneve_change_mtu(struct net_device *dev, int new_mtu) +{ + /* GENEVE overhead is not fixed, so we can't enforce a more + * precise max MTU. + */ + if (new_mtu < 68 || new_mtu > IP_MAX_MTU) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct ip_tunnel_info *info = skb_tunnel_info(skb); @@ -1042,7 +1049,7 @@ static const struct net_device_ops geneve_netdev_ops = { .ndo_stop = geneve_stop, .ndo_start_xmit = geneve_xmit, .ndo_get_stats64 = ip_tunnel_get_stats64, - .ndo_change_mtu = eth_change_mtu, + .ndo_change_mtu = geneve_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_fill_metadata_dst = geneve_fill_metadata_dst, @@ -1349,11 +1356,21 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, err = geneve_configure(net, dev, &geneve_remote_unspec, 0, 0, 0, htons(dst_port), true); - if (err) { - free_netdev(dev); - return ERR_PTR(err); - } + if (err) + goto err; + + /* openvswitch users expect packet sizes to be unrestricted, + * so set the largest MTU we can. + */ + err = geneve_change_mtu(dev, IP_MAX_MTU); + if (err) + goto err; + return dev; + + err: + free_netdev(dev); + return ERR_PTR(err); } EXPORT_SYMBOL_GPL(geneve_dev_create_fb); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 59fefca74263..a5f392ae30d5 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -969,7 +969,7 @@ static void team_port_disable(struct team *team, NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ NETIF_F_HIGHDMA | NETIF_F_LRO) -static void __team_compute_features(struct team *team) +static void ___team_compute_features(struct team *team) { struct team_port *port; u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; @@ -993,15 +993,20 @@ static void __team_compute_features(struct team *team) team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; +} +static void __team_compute_features(struct team *team) +{ + ___team_compute_features(team); netdev_change_features(team->dev); } static void team_compute_features(struct team *team) { mutex_lock(&team->lock); - __team_compute_features(team); + ___team_compute_features(team); mutex_unlock(&team->lock); + netdev_change_features(team->dev); } static int team_port_enter(struct team *team, struct team_port *port) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 4b15d9ee5a54..935e0b45e151 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -567,11 +567,13 @@ static void tun_detach_all(struct net_device *dev) for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); BUG_ON(!tfile); + tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; tfile->socket.sk->sk_data_ready(tfile->socket.sk); RCU_INIT_POINTER(tfile->tun, NULL); --tun->numqueues; } list_for_each_entry(tfile, &tun->disabled, next) { + tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; tfile->socket.sk->sk_data_ready(tfile->socket.sk); RCU_INIT_POINTER(tfile->tun, NULL); } @@ -627,6 +629,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte goto out; } tfile->queue_index = tun->numqueues; + tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; rcu_assign_pointer(tfile->tun, tun); rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); tun->numqueues++; @@ -1412,9 +1415,6 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, if (!iov_iter_count(to)) return 0; - if (tun->dev->reg_state != NETREG_REGISTERED) - return -EIO; - /* Read frames from queue */ skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0, &peeked, &off, &err); diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index bd9acff1eb7b..7fbd8f044207 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -66,7 +66,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, * buffer. */ if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) { - offset = ((rx->remaining + 1) & 0xfffe) + sizeof(u32); + offset = ((rx->remaining + 1) & 0xfffe); rx->header = get_unaligned_le32(skb->data + offset); offset = 0; diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index b11fe09552bf..e0e94b855bbe 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -809,6 +809,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ if (cdc_ncm_init(dev)) goto error2; + /* Some firmwares need a pause here or they will silently fail + * to set up the interface properly. This value was decided + * empirically on a Sierra Wireless MC7455 running 02.08.02.00 + * firmware. + */ + usleep_range(10000, 20000); + /* configure data interface */ temp = usb_set_interface(dev->udev, iface_no, data_altsetting); if (temp) { diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 3c0df70e2f53..003780901628 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1254,7 +1254,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) /* Need Vxlan and inner Ethernet header to be present */ if (!pskb_may_pull(skb, VXLAN_HLEN)) - goto error; + goto drop; vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1); flags = ntohl(vxh->vx_flags); @@ -1344,13 +1344,7 @@ drop: bad_flags: netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", ntohl(vxh->vx_flags), ntohl(vxh->vx_vni)); - -error: - if (tun_dst) - dst_release((struct dst_entry *)tun_dst); - - /* Return non vxlan pkt */ - return 1; + goto drop; } static int arp_reduce(struct net_device *dev, struct sk_buff *skb) @@ -2370,29 +2364,43 @@ static void vxlan_set_multicast_list(struct net_device *dev) { } -static int vxlan_change_mtu(struct net_device *dev, int new_mtu) +static int __vxlan_change_mtu(struct net_device *dev, + struct net_device *lowerdev, + struct vxlan_rdst *dst, int new_mtu, bool strict) { - struct vxlan_dev *vxlan = netdev_priv(dev); - struct vxlan_rdst *dst = &vxlan->default_dst; - struct net_device *lowerdev; - int max_mtu; + int max_mtu = IP_MAX_MTU; - lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex); - if (lowerdev == NULL) - return eth_change_mtu(dev, new_mtu); + if (lowerdev) + max_mtu = lowerdev->mtu; if (dst->remote_ip.sa.sa_family == AF_INET6) - max_mtu = lowerdev->mtu - VXLAN6_HEADROOM; + max_mtu -= VXLAN6_HEADROOM; else - max_mtu = lowerdev->mtu - VXLAN_HEADROOM; + max_mtu -= VXLAN_HEADROOM; - if (new_mtu < 68 || new_mtu > max_mtu) + if (new_mtu < 68) return -EINVAL; + if (new_mtu > max_mtu) { + if (strict) + return -EINVAL; + + new_mtu = max_mtu; + } + dev->mtu = new_mtu; return 0; } +static int vxlan_change_mtu(struct net_device *dev, int new_mtu) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_rdst *dst = &vxlan->default_dst; + struct net_device *lowerdev = __dev_get_by_index(vxlan->net, + dst->remote_ifindex); + return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true); +} + static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb, struct ip_tunnel_info *info, __be16 sport, __be16 dport) @@ -2768,6 +2776,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, int err; bool use_ipv6 = false; __be16 default_port = vxlan->cfg.dst_port; + struct net_device *lowerdev = NULL; vxlan->net = src_net; @@ -2788,9 +2797,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, } if (conf->remote_ifindex) { - struct net_device *lowerdev - = __dev_get_by_index(src_net, conf->remote_ifindex); - + lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex); dst->remote_ifindex = conf->remote_ifindex; if (!lowerdev) { @@ -2814,6 +2821,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, needed_headroom = lowerdev->hard_header_len; } + if (conf->mtu) { + err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false); + if (err) + return err; + } + if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA) needed_headroom += VXLAN6_HEADROOM; else @@ -2991,6 +3004,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; + if (tb[IFLA_MTU]) + conf.mtu = nla_get_u32(tb[IFLA_MTU]); + err = vxlan_dev_configure(src_net, dev, &conf); switch (err) { case -ENODEV: diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 0947cc271e69..531de256d58d 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1681,6 +1681,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) goto err_hif_stop; } + ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1; + + INIT_LIST_HEAD(&ar->arvifs); + /* we don't care about HTT in UTF mode */ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { status = ath10k_htt_setup(&ar->htt); @@ -1694,10 +1698,6 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) if (status) goto err_hif_stop; - ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1; - - INIT_LIST_HEAD(&ar->arvifs); - return 0; err_hif_stop: diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 6cc1aa3449c8..1a88a24ffeac 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1986,7 +1986,12 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file, goto out; } - if (filter && (filter != ar->debug.pktlog_filter)) { + if (filter == ar->debug.pktlog_filter) { + ret = count; + goto out; + } + + if (filter) { ret = ath10k_wmi_pdev_pktlog_enable(ar, filter); if (ret) { ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n", diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 95a55405ebf0..1e1bef349487 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4456,7 +4456,10 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, goto err_vdev_delete; } - if (ar->cfg_tx_chainmask) { + /* Configuring number of spatial stream for monitor interface is causing + * target assert in qca9888 and qca6174. + */ + if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) { u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); vdev_param = ar->wmi.vdev_param->nss; @@ -6416,7 +6419,13 @@ ath10k_mac_update_rx_channel(struct ath10k *ar, def = &vifs[0].new_ctx->def; ar->rx_channel = def->chan; - } else if (ctx && ath10k_mac_num_chanctxs(ar) == 0) { + } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) || + (ctx && (ar->state == ATH10K_STATE_RESTARTED))) { + /* During driver restart due to firmware assert, since mac80211 + * already has valid channel context for given radio, channel + * context iteration return num_chanctx > 0. So fix rx_channel + * when restart is in progress. + */ ar->rx_channel = ctx->def.chan; } else { ar->rx_channel = NULL; diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c index 803030fd17d3..6a2a16856763 100644 --- a/drivers/net/wireless/ath/ath5k/led.c +++ b/drivers/net/wireless/ath/ath5k/led.c @@ -77,7 +77,7 @@ static const struct pci_device_id ath5k_led_devices[] = { /* HP Compaq CQ60-206US (ddreggors@jumptv.com) */ { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) }, /* HP Compaq C700 (nitrousnrg@gmail.com) */ - { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) }, + { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 0) }, /* LiteOn AR5BXB63 (magooz@salug.it) */ { ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) }, /* IBM-specific AR5212 (all others) */ diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 2e2b92ba96b8..1bdeacf7b257 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -49,6 +49,10 @@ int ath9k_led_blink; module_param_named(blink, ath9k_led_blink, int, 0444); MODULE_PARM_DESC(blink, "Enable LED blink on activity"); +static int ath9k_led_active_high = -1; +module_param_named(led_active_high, ath9k_led_active_high, int, 0444); +MODULE_PARM_DESC(led_active_high, "Invert LED polarity"); + static int ath9k_btcoex_enable; module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); @@ -600,6 +604,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, if (ret) return ret; + if (ath9k_led_active_high != -1) + ah->config.led_active_high = ath9k_led_active_high == 1; + /* * Enable WLAN/BT RX Antenna diversity only when: * diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index e6fef1be9977..7cdaf40c3057 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -28,6 +28,16 @@ static const struct pci_device_id ath_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */ { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */ { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */ + +#ifdef CONFIG_ATH9K_PCOEM + /* Mini PCI AR9220 MB92 cards: Compex WLM200NX, Wistron DNMA-92 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0029, + PCI_VENDOR_ID_ATHEROS, + 0x2096), + .driver_data = ATH9K_PCI_LED_ACT_HI }, +#endif + { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */ #ifdef CONFIG_ATH9K_PCOEM diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 64046e0bd0a2..c3853a63b083 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -141,7 +141,7 @@ static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp) unmask_halp ? WIL6210_IMC_MISC : WIL6210_IMC_MISC_NO_HALP); } -static void wil6210_unmask_halp(struct wil6210_priv *wil) +void wil6210_unmask_halp(struct wil6210_priv *wil) { wil_dbg_irq(wil, "%s()\n", __func__); @@ -149,7 +149,7 @@ static void wil6210_unmask_halp(struct wil6210_priv *wil) BIT_DMA_EP_MISC_ICR_HALP); } -static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) +void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) { wil_dbg_irq(wil, "%s()\n", __func__); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 5285ebc8b9af..94e5b67abd59 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -875,19 +875,29 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) flush_workqueue(wil->wq_service); flush_workqueue(wil->wmi_wq); + wil6210_unmask_irq_pseudo(wil); + wil6210_unmask_halp(wil); + wil_halp_vote(wil); + wil_bl_crash_info(wil, false); rc = wil_target_reset(wil); + /* wil_target_reset clears the HALP IRQ, need to set it again. + * Call wil_halp_unvote to clear the HALP reference counter + * and unmask the HALP interrupt before setting it again + */ + wil_halp_unvote(wil); + wil_halp_vote(wil); wil_rx_fini(wil); if (rc) { wil_bl_crash_info(wil, true); - return rc; + goto out; } rc = wil_get_bl_info(wil); if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */ rc = 0; if (rc) - return rc; + goto out; wil_set_oob_mode(wil, oob_mode); if (load_fw) { @@ -899,14 +909,19 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) /* Loading f/w from the file */ rc = wil_request_firmware(wil, WIL_FW_NAME, true); if (rc) - return rc; + goto out; rc = wil_request_firmware(wil, WIL_FW2_NAME, true); if (rc) - return rc; + goto out; /* Mark FW as loaded from host */ wil_s(wil, RGF_USER_USAGE_6, 1); + /* Clear the HALP while in BL, before clearing all the IRQs + * and running the FW. + */ + wil_halp_unvote(wil); + /* clear any interrupts which on-card-firmware * may have set */ @@ -917,6 +932,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); wil_release_cpu(wil); + } else { + /* Allow XTAL off when going down */ + wil_halp_unvote(wil); } /* init after reset */ @@ -955,6 +973,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) } return rc; + +out: + wil_halp_unvote(wil); + return rc; } void wil_fw_error_recovery(struct wil6210_priv *wil) diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index a19dba5b9e5f..8961b4ce4898 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -837,6 +837,7 @@ void wil_configure_interrupt_moderation(struct wil6210_priv *wil); void wil_disable_irq(struct wil6210_priv *wil); void wil_enable_irq(struct wil6210_priv *wil); void wil6210_mask_halp(struct wil6210_priv *wil); +void wil6210_unmask_halp(struct wil6210_priv *wil); /* P2P */ bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request); @@ -902,6 +903,8 @@ void wil6210_unmask_irq_tx(struct wil6210_priv *wil); void wil_rx_handle(struct wil6210_priv *wil, int *quota); void wil6210_unmask_irq_rx(struct wil6210_priv *wil); +void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil); + int wil_iftype_nl2wmi(enum nl80211_iftype type); int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index c00a7daaa4bc..0cd95120bc78 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2723,6 +2723,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || !info->attrs[HWSIM_ATTR_FLAGS] || !info->attrs[HWSIM_ATTR_COOKIE] || + !info->attrs[HWSIM_ATTR_SIGNAL] || !info->attrs[HWSIM_ATTR_TX_INFO]) goto out; diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 0517a4f2d3f2..7a40d8dffa36 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1660,9 +1660,9 @@ void rtl_watchdog_wq_callback(void *data) if (((rtlpriv->link_info.num_rx_inperiod + rtlpriv->link_info.num_tx_inperiod) > 8) || (rtlpriv->link_info.num_rx_inperiod > 2)) - rtl_lps_enter(hw); - else rtl_lps_leave(hw); + else + rtl_lps_enter(hw); } rtlpriv->link_info.num_rx_inperiod = 0; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index f2b9d11adc9e..e85f1652ce55 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -1203,7 +1203,6 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist, /* Force GNT_BT to low */ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0); - btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { /* tell firmware "no antenna inverse" */ @@ -1211,19 +1210,25 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist, h2c_parameter[1] = 1; /* ext switch type */ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, h2c_parameter); + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); } else { /* tell firmware "antenna inverse" */ h2c_parameter[0] = 1; h2c_parameter[1] = 1; /* ext switch type */ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, h2c_parameter); + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280); } } /* ext switch setting */ if (use_ext_switch) { /* fixed internal switch S1->WiFi, S0->BT */ - btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); + if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); + else + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280); + switch (antpos_type) { case BTC_ANT_WIFI_AT_MAIN: /* ext switch main at wifi */ diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index b2791c893417..babd1490f20c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -965,13 +965,38 @@ void exhalbtc_set_chip_type(u8 chip_type) } } -void exhalbtc_set_ant_num(u8 type, u8 ant_num) +void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num) { if (BT_COEX_ANT_TYPE_PG == type) { gl_bt_coexist.board_info.pg_ant_num = ant_num; gl_bt_coexist.board_info.btdm_ant_num = ant_num; + /* The antenna position: + * Main (default) or Aux for pgAntNum=2 && btdmAntNum =1. + * The antenna position should be determined by + * auto-detect mechanism. + * The following is assumed to main, + * and those must be modified + * if y auto-detect mechanism is ready + */ + if ((gl_bt_coexist.board_info.pg_ant_num == 2) && + (gl_bt_coexist.board_info.btdm_ant_num == 1)) + gl_bt_coexist.board_info.btdm_ant_pos = + BTC_ANTENNA_AT_MAIN_PORT; + else + gl_bt_coexist.board_info.btdm_ant_pos = + BTC_ANTENNA_AT_MAIN_PORT; } else if (BT_COEX_ANT_TYPE_ANTDIV == type) { gl_bt_coexist.board_info.btdm_ant_num = ant_num; + gl_bt_coexist.board_info.btdm_ant_pos = + BTC_ANTENNA_AT_MAIN_PORT; + } else if (type == BT_COEX_ANT_TYPE_DETECTED) { + gl_bt_coexist.board_info.btdm_ant_num = ant_num; + if (rtlpriv->cfg->mod_params->ant_sel == 1) + gl_bt_coexist.board_info.btdm_ant_pos = + BTC_ANTENNA_AT_AUX_PORT; + else + gl_bt_coexist.board_info.btdm_ant_pos = + BTC_ANTENNA_AT_MAIN_PORT; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h index 0a903ea179ef..f41ca57dd8a7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h @@ -535,7 +535,7 @@ void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version); void exhalbtc_update_min_bt_rssi(char bt_rssi); void exhalbtc_set_bt_exist(bool bt_exist); void exhalbtc_set_chip_type(u8 chip_type); -void exhalbtc_set_ant_num(u8 type, u8 ant_num); +void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num); void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist); void exhalbtc_signal_compensation(struct btc_coexist *btcoexist, u8 *rssi_wifi, u8 *rssi_bt); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c index b9b0cb7af8ea..d3fd9211b3a4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c @@ -72,7 +72,10 @@ void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv) __func__, bt_type); exhalbtc_set_chip_type(bt_type); - exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num); + if (rtlpriv->cfg->mod_params->ant_sel == 1) + exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_DETECTED, 1); + else + exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num); } void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv) diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 7f471bff435c..5b4048041147 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -1573,7 +1573,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) true, HW_DESC_TXBUFF_ADDR), skb->len, PCI_DMA_TODEVICE); - kfree_skb(skb); + dev_kfree_skb_irq(skb); ring->idx = (ring->idx + 1) % ring->entries; } ring->idx = 0; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index c983d2fe147f..5a3df9198ddf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -2684,6 +2684,7 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, bool auto_load_fail, u8 *hwinfo) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; u8 value; u32 tmpu_32; @@ -2702,6 +2703,10 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; } + /* override ant_num / ant_path */ + if (mod_params->ant_sel) + rtlpriv->btcoexist.btc_info.ant_num = + (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1); } void rtl8723be_bt_reg_init(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index a78eaeda0008..2101793438ed 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -273,6 +273,7 @@ static struct rtl_mod_params rtl8723be_mod_params = { .msi_support = false, .disable_watchdog = false, .debug = DBG_EMERG, + .ant_sel = 0, }; static struct rtl_hal_cfg rtl8723be_hal_cfg = { @@ -394,6 +395,7 @@ module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444); module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, bool, 0444); +module_param_named(ant_sel, rtl8723be_mod_params.ant_sel, int, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); @@ -402,6 +404,7 @@ MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); +MODULE_PARM_DESC(ant_sel, "Set to 1 or 2 to force antenna number (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 4544752a2ba8..b6faf624480e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2252,6 +2252,9 @@ struct rtl_mod_params { /* default 0: 1 means do not disable interrupts */ bool int_clear; + + /* select antenna */ + int ant_sel; }; struct rtl_hal_usbint_cfg { diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c index e92f2639af2c..9fd3c6af0a61 100644 --- a/drivers/net/wireless/ti/wlcore/init.c +++ b/drivers/net/wireless/ti/wlcore/init.c @@ -549,6 +549,11 @@ static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret; + /* Disable filtering */ + ret = wl1271_acx_group_address_tbl(wl, wlvif, false, NULL, 0); + if (ret < 0) + return ret; + ret = wl1271_acx_ap_max_tx_retry(wl, wlvif); if (ret < 0) return ret; diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 72a2c1969646..28da6242eb84 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -386,13 +386,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) EXPORT_SYMBOL_GPL(of_irq_to_resource); /** - * of_irq_get - Decode a node's IRQ and return it as a Linux irq number + * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number * @dev: pointer to device tree node - * @index: zero-based index of the irq - * - * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain - * is not yet created. + * @index: zero-based index of the IRQ * + * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or + * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case + * of any other failure. */ int of_irq_get(struct device_node *dev, int index) { @@ -413,12 +413,13 @@ int of_irq_get(struct device_node *dev, int index) EXPORT_SYMBOL_GPL(of_irq_get); /** - * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number + * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number * @dev: pointer to device tree node - * @name: irq name + * @name: IRQ name * - * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain - * is not yet created, or error code in case of any other failure. + * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or + * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case + * of any other failure. */ int of_irq_get_byname(struct device_node *dev, const char *name) { diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 7aafb5fb9336..9757cf9037a2 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -179,9 +179,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, u16 orig_cmd; struct pci_bus_region region, inverted_region; - if (dev->non_compliant_bars) - return 0; - mask = type ? PCI_ROM_ADDRESS_MASK : ~0; /* No printks while decoding is disabled! */ @@ -322,6 +319,9 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) { unsigned int pos, reg; + if (dev->non_compliant_bars) + return; + for (pos = 0; pos < howmany; pos++) { struct resource *res = &dev->resource[pos]; reg = PCI_BASE_ADDRESS_0 + (pos << 2); diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 3d8019eb3d84..181b35879ebd 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -1191,9 +1191,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc) const struct mtk_desc_pin *pin; chained_irq_enter(chip, desc); - for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) { + for (eint_num = 0; + eint_num < pctl->devdata->ap_num; + eint_num += 32, reg += 4) { status = readl(reg); - reg += 4; while (status) { offset = __ffs(status); index = eint_num + offset; diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c index 82dc109f7ed4..3149a877c51f 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c @@ -107,6 +107,7 @@ struct exynos5440_pmx_func { * @nr_groups: number of pin groups available. * @pmx_functions: list of pin functions parsed from device tree. * @nr_functions: number of pin functions available. + * @range: gpio range to register with pinctrl */ struct exynos5440_pinctrl_priv_data { void __iomem *reg_base; @@ -117,6 +118,7 @@ struct exynos5440_pinctrl_priv_data { unsigned int nr_groups; const struct exynos5440_pmx_func *pmx_functions; unsigned int nr_functions; + struct pinctrl_gpio_range range; }; /** @@ -742,7 +744,6 @@ static int exynos5440_pinctrl_register(struct platform_device *pdev, struct pinctrl_desc *ctrldesc; struct pinctrl_dev *pctl_dev; struct pinctrl_pin_desc *pindesc, *pdesc; - struct pinctrl_gpio_range grange; char *pin_names; int pin, ret; @@ -794,12 +795,12 @@ static int exynos5440_pinctrl_register(struct platform_device *pdev, return PTR_ERR(pctl_dev); } - grange.name = "exynos5440-pctrl-gpio-range"; - grange.id = 0; - grange.base = 0; - grange.npins = EXYNOS5440_MAX_PINS; - grange.gc = priv->gc; - pinctrl_add_gpio_range(pctl_dev, &grange); + priv->range.name = "exynos5440-pctrl-gpio-range"; + priv->range.id = 0; + priv->range.base = 0; + priv->range.npins = EXYNOS5440_MAX_PINS; + priv->range.gc = priv->gc; + pinctrl_add_gpio_range(pctl_dev, &priv->range); return 0; } diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c index d18308344431..293371b88ab9 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c @@ -127,6 +127,7 @@ enum ipa3_usb_state { IPA_USB_SUSPEND_REQUESTED, IPA_USB_SUSPEND_IN_PROGRESS, IPA_USB_SUSPENDED, + IPA_USB_SUSPENDED_NO_RWAKEUP, IPA_USB_RESUME_IN_PROGRESS }; @@ -152,6 +153,12 @@ struct finish_suspend_work_context { u32 ul_clnt_hdl; }; +struct ipa3_usb_teth_prot_conn_params { + u32 usb_to_ipa_clnt_hdl; + u32 ipa_to_usb_clnt_hdl; + struct ipa_usb_teth_prot_params params; +}; + /** * Transport type - could be either data tethering or DPL * Each transport has it's own RM resources and statuses @@ -163,6 +170,7 @@ struct ipa3_usb_transport_type_ctx { enum ipa3_usb_state state; struct finish_suspend_work_context finish_suspend_work; struct ipa_usb_xdci_chan_params ch_params; + struct ipa3_usb_teth_prot_conn_params teth_conn_params; }; struct ipa3_usb_smmu_reg_map { @@ -189,14 +197,15 @@ struct ipa3_usb_context { }; enum ipa3_usb_op { - IPA_USB_INIT_TETH_PROT, - IPA_USB_REQUEST_CHANNEL, - IPA_USB_CONNECT, - IPA_USB_DISCONNECT, - IPA_USB_RELEASE_CHANNEL, - IPA_USB_DEINIT_TETH_PROT, - IPA_USB_SUSPEND, - IPA_USB_RESUME + IPA_USB_OP_INIT_TETH_PROT, + IPA_USB_OP_REQUEST_CHANNEL, + IPA_USB_OP_CONNECT, + IPA_USB_OP_DISCONNECT, + IPA_USB_OP_RELEASE_CHANNEL, + IPA_USB_OP_DEINIT_TETH_PROT, + IPA_USB_OP_SUSPEND, + IPA_USB_OP_SUSPEND_NO_RWAKEUP, + IPA_USB_OP_RESUME }; struct ipa3_usb_status_dbg_info { @@ -228,22 +237,24 @@ struct ipa3_usb_context *ipa3_usb_ctx; static char *ipa3_usb_op_to_string(enum ipa3_usb_op op) { switch (op) { - case IPA_USB_INIT_TETH_PROT: - return "IPA_USB_INIT_TETH_PROT"; - case IPA_USB_REQUEST_CHANNEL: - return "IPA_USB_REQUEST_CHANNEL"; - case IPA_USB_CONNECT: - return "IPA_USB_CONNECT"; - case IPA_USB_DISCONNECT: - return "IPA_USB_DISCONNECT"; - case IPA_USB_RELEASE_CHANNEL: - return "IPA_USB_RELEASE_CHANNEL"; - case IPA_USB_DEINIT_TETH_PROT: - return "IPA_USB_DEINIT_TETH_PROT"; - case IPA_USB_SUSPEND: - return "IPA_USB_SUSPEND"; - case IPA_USB_RESUME: - return "IPA_USB_RESUME"; + case IPA_USB_OP_INIT_TETH_PROT: + return "IPA_USB_OP_INIT_TETH_PROT"; + case IPA_USB_OP_REQUEST_CHANNEL: + return "IPA_USB_OP_REQUEST_CHANNEL"; + case IPA_USB_OP_CONNECT: + return "IPA_USB_OP_CONNECT"; + case IPA_USB_OP_DISCONNECT: + return "IPA_USB_OP_DISCONNECT"; + case IPA_USB_OP_RELEASE_CHANNEL: + return "IPA_USB_OP_RELEASE_CHANNEL"; + case IPA_USB_OP_DEINIT_TETH_PROT: + return "IPA_USB_OP_DEINIT_TETH_PROT"; + case IPA_USB_OP_SUSPEND: + return "IPA_USB_OP_SUSPEND"; + case IPA_USB_OP_SUSPEND_NO_RWAKEUP: + return "IPA_USB_OP_SUSPEND_NO_RWAKEUP"; + case IPA_USB_OP_RESUME: + return "IPA_USB_OP_RESUME"; } return "UNSUPPORTED"; @@ -266,6 +277,8 @@ static char *ipa3_usb_state_to_string(enum ipa3_usb_state state) return "IPA_USB_SUSPEND_IN_PROGRESS"; case IPA_USB_SUSPENDED: return "IPA_USB_SUSPENDED"; + case IPA_USB_SUSPENDED_NO_RWAKEUP: + return "IPA_USB_SUSPENDED_NO_RWAKEUP"; case IPA_USB_RESUME_IN_PROGRESS: return "IPA_USB_RESUME_IN_PROGRESS"; } @@ -312,6 +325,7 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit, if (state == IPA_USB_INITIALIZED || state == IPA_USB_STOPPED || state == IPA_USB_RESUME_IN_PROGRESS || + state == IPA_USB_SUSPENDED_NO_RWAKEUP || /* * In case of failure during suspend request * handling, state is reverted to connected. @@ -327,7 +341,8 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit, case IPA_USB_STOPPED: if (state == IPA_USB_SUSPEND_IN_PROGRESS || state == IPA_USB_CONNECTED || - state == IPA_USB_SUSPENDED) + state == IPA_USB_SUSPENDED || + state == IPA_USB_SUSPENDED_NO_RWAKEUP) state_legal = true; break; case IPA_USB_SUSPEND_REQUESTED: @@ -354,6 +369,10 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit, (err_permit && state == IPA_USB_RESUME_IN_PROGRESS)) state_legal = true; break; + case IPA_USB_SUSPENDED_NO_RWAKEUP: + if (state == IPA_USB_CONNECTED) + state_legal = true; + break; case IPA_USB_RESUME_IN_PROGRESS: if (state == IPA_USB_SUSPEND_IN_PROGRESS || state == IPA_USB_SUSPENDED) @@ -418,32 +437,33 @@ static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op, spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); state = ipa3_usb_ctx->ttype_ctx[ttype].state; switch (op) { - case IPA_USB_INIT_TETH_PROT: + case IPA_USB_OP_INIT_TETH_PROT: if (state == IPA_USB_INVALID || (!is_dpl && state == IPA_USB_INITIALIZED)) is_legal = true; break; - case IPA_USB_REQUEST_CHANNEL: + case IPA_USB_OP_REQUEST_CHANNEL: if (state == IPA_USB_INITIALIZED) is_legal = true; break; - case IPA_USB_CONNECT: + case IPA_USB_OP_CONNECT: if (state == IPA_USB_INITIALIZED || state == IPA_USB_STOPPED) is_legal = true; break; - case IPA_USB_DISCONNECT: + case IPA_USB_OP_DISCONNECT: if (state == IPA_USB_CONNECTED || state == IPA_USB_SUSPEND_IN_PROGRESS || - state == IPA_USB_SUSPENDED) + state == IPA_USB_SUSPENDED || + state == IPA_USB_SUSPENDED_NO_RWAKEUP) is_legal = true; break; - case IPA_USB_RELEASE_CHANNEL: + case IPA_USB_OP_RELEASE_CHANNEL: /* when releasing 1st channel state will be changed already */ if (state == IPA_USB_STOPPED || (!is_dpl && state == IPA_USB_INITIALIZED)) is_legal = true; break; - case IPA_USB_DEINIT_TETH_PROT: + case IPA_USB_OP_DEINIT_TETH_PROT: /* * For data tethering we should allow deinit an inited protocol * always. E.g. rmnet is inited and rndis is connected. @@ -453,13 +473,18 @@ static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op, if (!is_dpl || state == IPA_USB_INITIALIZED) is_legal = true; break; - case IPA_USB_SUSPEND: + case IPA_USB_OP_SUSPEND: if (state == IPA_USB_CONNECTED) is_legal = true; break; - case IPA_USB_RESUME: + case IPA_USB_OP_SUSPEND_NO_RWAKEUP: + if (state == IPA_USB_CONNECTED) + is_legal = true; + break; + case IPA_USB_OP_RESUME: if (state == IPA_USB_SUSPENDED || - state == IPA_USB_SUSPEND_IN_PROGRESS) + state == IPA_USB_SUSPEND_IN_PROGRESS || + state == IPA_USB_SUSPENDED_NO_RWAKEUP) is_legal = true; break; default: @@ -638,6 +663,7 @@ static int ipa3_usb_cons_request_resource_cb_do( ipa3_usb_ctx->ttype_ctx[ttype].state)); switch (ipa3_usb_ctx->ttype_ctx[ttype].state) { case IPA_USB_CONNECTED: + case IPA_USB_SUSPENDED_NO_RWAKEUP: rm_ctx->cons_state = IPA_USB_CONS_GRANTED; result = 0; break; @@ -717,6 +743,7 @@ static int ipa3_usb_cons_release_resource_cb_do( break; case IPA_USB_STOPPED: case IPA_USB_RESUME_IN_PROGRESS: + case IPA_USB_SUSPENDED_NO_RWAKEUP: if (rm_ctx->cons_requested) rm_ctx->cons_requested = false; break; @@ -886,7 +913,7 @@ int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot, ttype = IPA3_USB_GET_TTYPE(teth_prot); - if (!ipa3_usb_check_legal_op(IPA_USB_INIT_TETH_PROT, ttype)) { + if (!ipa3_usb_check_legal_op(IPA_USB_OP_INIT_TETH_PROT, ttype)) { IPA_USB_ERR("Illegal operation.\n"); result = -EPERM; goto bad_params; @@ -1204,7 +1231,7 @@ static int ipa3_usb_request_xdci_channel( ttype = IPA3_USB_GET_TTYPE(params->teth_prot); - if (!ipa3_usb_check_legal_op(IPA_USB_REQUEST_CHANNEL, ttype)) { + if (!ipa3_usb_check_legal_op(IPA_USB_OP_REQUEST_CHANNEL, ttype)) { IPA_USB_ERR("Illegal operation\n"); return -EPERM; } @@ -1347,7 +1374,7 @@ static int ipa3_usb_release_xdci_channel(u32 clnt_hdl, return -EINVAL; } - if (!ipa3_usb_check_legal_op(IPA_USB_RELEASE_CHANNEL, ttype)) { + if (!ipa3_usb_check_legal_op(IPA_USB_OP_RELEASE_CHANNEL, ttype)) { IPA_USB_ERR("Illegal operation.\n"); return -EPERM; } @@ -1511,81 +1538,79 @@ static int ipa3_usb_connect_dpl(void) return 0; } -static int ipa3_usb_connect_teth_prot( - struct ipa_usb_xdci_connect_params_internal *params, - enum ipa3_usb_transport_type ttype) +static int ipa3_usb_connect_teth_prot(enum ipa_usb_teth_prot teth_prot) { int result; struct teth_bridge_connect_params teth_bridge_params; + struct ipa3_usb_teth_prot_conn_params *teth_conn_params; + enum ipa3_usb_transport_type ttype; - IPA_USB_DBG("connecting protocol = %d\n", - params->teth_prot); - switch (params->teth_prot) { + IPA_USB_DBG("connecting protocol = %s\n", + ipa3_usb_teth_prot_to_string(teth_prot)); + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + teth_conn_params = &(ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params); + + switch (teth_prot) { case IPA_USB_RNDIS: if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].state == IPA_USB_TETH_PROT_CONNECTED) { IPA_USB_DBG("%s is already connected.\n", - ipa3_usb_teth_prot_to_string( - params->teth_prot)); + ipa3_usb_teth_prot_to_string(teth_prot)); break; } ipa3_usb_ctx->ttype_ctx[ttype].user_data = ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].user_data; result = rndis_ipa_pipe_connect_notify( - params->usb_to_ipa_clnt_hdl, - params->ipa_to_usb_clnt_hdl, - params->teth_prot_params.max_xfer_size_bytes_to_dev, - params->teth_prot_params.max_packet_number_to_dev, - params->teth_prot_params.max_xfer_size_bytes_to_host, + teth_conn_params->usb_to_ipa_clnt_hdl, + teth_conn_params->ipa_to_usb_clnt_hdl, + teth_conn_params->params.max_xfer_size_bytes_to_dev, + teth_conn_params->params.max_packet_number_to_dev, + teth_conn_params->params.max_xfer_size_bytes_to_host, ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS]. teth_prot_params.rndis.private); if (result) { IPA_USB_ERR("failed to connect %s.\n", - ipa3_usb_teth_prot_to_string( - params->teth_prot)); + ipa3_usb_teth_prot_to_string(teth_prot)); ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL; return result; } ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].state = IPA_USB_TETH_PROT_CONNECTED; IPA_USB_DBG("%s is connected.\n", - ipa3_usb_teth_prot_to_string( - params->teth_prot)); + ipa3_usb_teth_prot_to_string(teth_prot)); break; case IPA_USB_ECM: if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].state == IPA_USB_TETH_PROT_CONNECTED) { IPA_USB_DBG("%s is already connected.\n", - ipa3_usb_teth_prot_to_string( - params->teth_prot)); + ipa3_usb_teth_prot_to_string(teth_prot)); break; } ipa3_usb_ctx->ttype_ctx[ttype].user_data = ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].user_data; - result = ecm_ipa_connect(params->usb_to_ipa_clnt_hdl, - params->ipa_to_usb_clnt_hdl, + result = ecm_ipa_connect(teth_conn_params->usb_to_ipa_clnt_hdl, + teth_conn_params->ipa_to_usb_clnt_hdl, ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM]. teth_prot_params.ecm.private); if (result) { IPA_USB_ERR("failed to connect %s.\n", - ipa3_usb_teth_prot_to_string( - params->teth_prot)); + ipa3_usb_teth_prot_to_string(teth_prot)); ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL; return result; } ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].state = IPA_USB_TETH_PROT_CONNECTED; IPA_USB_DBG("%s is connected.\n", - ipa3_usb_teth_prot_to_string( - params->teth_prot)); + ipa3_usb_teth_prot_to_string(teth_prot)); break; case IPA_USB_RMNET: case IPA_USB_MBIM: - if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state == + if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state == IPA_USB_TETH_PROT_CONNECTED) { IPA_USB_DBG("%s is already connected.\n", - ipa3_usb_teth_prot_to_string( - params->teth_prot)); + ipa3_usb_teth_prot_to_string(teth_prot)); break; } result = ipa3_usb_init_teth_bridge(); @@ -1593,14 +1618,14 @@ static int ipa3_usb_connect_teth_prot( return result; ipa3_usb_ctx->ttype_ctx[ttype].user_data = - ipa3_usb_ctx->teth_prot_ctx[params->teth_prot]. + ipa3_usb_ctx->teth_prot_ctx[teth_prot]. user_data; teth_bridge_params.ipa_usb_pipe_hdl = - params->ipa_to_usb_clnt_hdl; + teth_conn_params->ipa_to_usb_clnt_hdl; teth_bridge_params.usb_ipa_pipe_hdl = - params->usb_to_ipa_clnt_hdl; + teth_conn_params->usb_to_ipa_clnt_hdl; teth_bridge_params.tethering_mode = - (params->teth_prot == IPA_USB_RMNET) ? + (teth_prot == IPA_USB_RMNET) ? (TETH_TETHERING_MODE_RMNET):(TETH_TETHERING_MODE_MBIM); teth_bridge_params.client_type = IPA_CLIENT_USB_PROD; result = ipa3_usb_connect_teth_bridge(&teth_bridge_params); @@ -1608,27 +1633,23 @@ static int ipa3_usb_connect_teth_prot( ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL; return result; } - ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state = + ipa3_usb_ctx->teth_prot_ctx[teth_prot].state = IPA_USB_TETH_PROT_CONNECTED; ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY); IPA_USB_DBG("%s (%s) is connected.\n", - ipa3_usb_teth_prot_to_string( - params->teth_prot), - ipa3_usb_teth_bridge_prot_to_string( - params->teth_prot)); + ipa3_usb_teth_prot_to_string(teth_prot), + ipa3_usb_teth_bridge_prot_to_string(teth_prot)); break; case IPA_USB_DIAG: if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state == IPA_USB_TETH_PROT_CONNECTED) { IPA_USB_DBG("%s is already connected.\n", - ipa3_usb_teth_prot_to_string( - params->teth_prot)); + ipa3_usb_teth_prot_to_string(teth_prot)); break; } ipa3_usb_ctx->ttype_ctx[ttype].user_data = - ipa3_usb_ctx->teth_prot_ctx[params->teth_prot]. - user_data; + ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data; result = ipa3_usb_connect_dpl(); if (result) { IPA_USB_ERR("Failed connecting DPL result=%d\n", @@ -1640,8 +1661,7 @@ static int ipa3_usb_connect_teth_prot( IPA_USB_TETH_PROT_CONNECTED; ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY); IPA_USB_DBG("%s is connected.\n", - ipa3_usb_teth_prot_to_string( - params->teth_prot)); + ipa3_usb_teth_prot_to_string(teth_prot)); break; default: IPA_USB_ERR("Invalid tethering protocol\n"); @@ -1775,11 +1795,19 @@ static int ipa3_usb_xdci_connect_internal( ttype = (params->teth_prot == IPA_USB_DIAG) ? IPA_USB_TRANSPORT_DPL : IPA_USB_TRANSPORT_TETH; - if (!ipa3_usb_check_legal_op(IPA_USB_CONNECT, ttype)) { + if (!ipa3_usb_check_legal_op(IPA_USB_OP_CONNECT, ttype)) { IPA_USB_ERR("Illegal operation.\n"); return -EPERM; } + ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params.ipa_to_usb_clnt_hdl + = params->ipa_to_usb_clnt_hdl; + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) + ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params. + usb_to_ipa_clnt_hdl = params->usb_to_ipa_clnt_hdl; + ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params.params + = params->teth_prot_params; + /* Set EE xDCI specific scratch */ result = ipa3_set_usb_max_packet_size(params->max_pkt_size); if (result) { @@ -1816,7 +1844,7 @@ static int ipa3_usb_xdci_connect_internal( if (params->teth_prot != IPA_USB_DIAG) { /* Start UL channel */ - result = ipa3_xdci_connect(params->usb_to_ipa_clnt_hdl, + result = ipa3_xdci_start(params->usb_to_ipa_clnt_hdl, params->usb_to_ipa_xferrscidx, params->usb_to_ipa_xferrscidx_valid); if (result) { @@ -1826,7 +1854,7 @@ static int ipa3_usb_xdci_connect_internal( } /* Start DL/DPL channel */ - result = ipa3_xdci_connect(params->ipa_to_usb_clnt_hdl, + result = ipa3_xdci_start(params->ipa_to_usb_clnt_hdl, params->ipa_to_usb_xferrscidx, params->ipa_to_usb_xferrscidx_valid); if (result) { @@ -1835,7 +1863,7 @@ static int ipa3_usb_xdci_connect_internal( } /* Connect tethering protocol */ - result = ipa3_usb_connect_teth_prot(params, ttype); + result = ipa3_usb_connect_teth_prot(params->teth_prot); if (result) { IPA_USB_ERR("failed to connect teth protocol\n"); goto connect_teth_prot_fail; @@ -2164,6 +2192,70 @@ static int ipa3_usb_check_disconnect_prot(enum ipa_usb_teth_prot teth_prot) return 0; } +/* Assumes lock already acquired */ +static int ipa_usb_xdci_dismiss_channels(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + int result = 0; + enum ipa3_usb_transport_type ttype; + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + IPA_USB_DBG_LOW("entry\n"); + + /* Reset DL channel */ + result = ipa3_reset_gsi_channel(dl_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to reset DL channel.\n"); + return result; + } + + /* Reset DL event ring */ + result = ipa3_reset_gsi_event_ring(dl_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to reset DL event ring.\n"); + return result; + } + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + /* Reset UL channel */ + result = ipa3_reset_gsi_channel(ul_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to reset UL channel.\n"); + return result; + } + + /* Reset UL event ring */ + result = ipa3_reset_gsi_event_ring(ul_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to reset UL event ring.\n"); + return result; + } + } + + /* Change state to STOPPED */ + if (!ipa3_usb_set_state(IPA_USB_STOPPED, false, ttype)) + IPA_USB_ERR("failed to change state to stopped\n"); + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + result = ipa3_usb_release_xdci_channel(ul_clnt_hdl, ttype); + if (result) { + IPA_USB_ERR("failed to release UL channel.\n"); + return result; + } + } + + result = ipa3_usb_release_xdci_channel(dl_clnt_hdl, ttype); + if (result) { + IPA_USB_ERR("failed to release DL channel.\n"); + return result; + } + + IPA_USB_DBG_LOW("exit\n"); + + return 0; +} + int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl, enum ipa_usb_teth_prot teth_prot) { @@ -2175,20 +2267,31 @@ int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl, mutex_lock(&ipa3_usb_ctx->general_mutex); IPA_USB_DBG_LOW("entry\n"); - if (ipa3_usb_check_disconnect_prot(teth_prot)) { - result = -EINVAL; - goto bad_params; - } ttype = IPA3_USB_GET_TTYPE(teth_prot); - if (!ipa3_usb_check_legal_op(IPA_USB_DISCONNECT, ttype)) { + if (!ipa3_usb_check_legal_op(IPA_USB_OP_DISCONNECT, ttype)) { IPA_USB_ERR("Illegal operation.\n"); result = -EPERM; goto bad_params; } spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); + if (ipa3_usb_ctx->ttype_ctx[ttype].state == + IPA_USB_SUSPENDED_NO_RWAKEUP) { + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + result = ipa_usb_xdci_dismiss_channels(ul_clnt_hdl, dl_clnt_hdl, + teth_prot); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; + } + + if (ipa3_usb_check_disconnect_prot(teth_prot)) { + spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + result = -EINVAL; + goto bad_params; + } + if (ipa3_usb_ctx->ttype_ctx[ttype].state != IPA_USB_SUSPENDED) { spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); /* Stop DL/DPL channel */ @@ -2227,53 +2330,10 @@ int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl, } else spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); - /* Reset DL channel */ - result = ipa3_reset_gsi_channel(dl_clnt_hdl); - if (result) { - IPA_USB_ERR("failed to reset DL channel.\n"); - goto bad_params; - } - - /* Reset DL event ring */ - result = ipa3_reset_gsi_event_ring(dl_clnt_hdl); - if (result) { - IPA_USB_ERR("failed to reset DL event ring.\n"); - goto bad_params; - } - - if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { - /* Reset UL channel */ - result = ipa3_reset_gsi_channel(ul_clnt_hdl); - if (result) { - IPA_USB_ERR("failed to reset UL channel.\n"); - goto bad_params; - } - - /* Reset UL event ring */ - result = ipa3_reset_gsi_event_ring(ul_clnt_hdl); - if (result) { - IPA_USB_ERR("failed to reset UL event ring.\n"); - goto bad_params; - } - } - - /* Change state to STOPPED */ - if (!ipa3_usb_set_state(IPA_USB_STOPPED, false, ttype)) - IPA_USB_ERR("failed to change state to stopped\n"); - - if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { - result = ipa3_usb_release_xdci_channel(ul_clnt_hdl, ttype); - if (result) { - IPA_USB_ERR("failed to release UL channel.\n"); - goto bad_params; - } - } - - result = ipa3_usb_release_xdci_channel(dl_clnt_hdl, ttype); - if (result) { - IPA_USB_ERR("failed to release DL channel.\n"); + result = ipa_usb_xdci_dismiss_channels(ul_clnt_hdl, dl_clnt_hdl, + teth_prot); + if (result) goto bad_params; - } /* Disconnect tethering protocol */ result = ipa3_usb_disconnect_teth_prot(teth_prot); @@ -2315,7 +2375,7 @@ int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot) ttype = IPA3_USB_GET_TTYPE(teth_prot); - if (!ipa3_usb_check_legal_op(IPA_USB_DEINIT_TETH_PROT, ttype)) { + if (!ipa3_usb_check_legal_op(IPA_USB_OP_DEINIT_TETH_PROT, ttype)) { IPA_USB_ERR("Illegal operation.\n"); result = -EPERM; goto bad_params; @@ -2411,25 +2471,104 @@ bad_params: } EXPORT_SYMBOL(ipa_usb_deinit_teth_prot); -int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, +/* Assumes lock already acquired */ +static int ipa3_usb_suspend_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl, enum ipa_usb_teth_prot teth_prot) { int result = 0; + enum ipa3_usb_transport_type ttype; + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + if (!ipa3_usb_check_legal_op(IPA_USB_OP_SUSPEND_NO_RWAKEUP, ttype)) { + IPA_USB_ERR("Illegal operation.\n"); + result = -EPERM; + goto fail_exit; + } + + IPA_USB_DBG("Start suspend with no remote wakeup sequence: %s\n", + IPA3_USB_IS_TTYPE_DPL(ttype) ? + "DPL channel":"Data Tethering channels"); + + if (ipa3_usb_check_disconnect_prot(teth_prot)) { + result = -EINVAL; + goto fail_exit; + } + + /* Stop DL/DPL channel */ + result = ipa3_xdci_disconnect(dl_clnt_hdl, false, -1); + if (result) { + IPA_USB_ERR("failed to disconnect DL/DPL channel.\n"); + goto fail_exit; + } + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + /* Stop UL channel */ + result = ipa3_xdci_disconnect(ul_clnt_hdl, true, + ipa3_usb_ctx->qmi_req_id); + if (result) { + IPA_USB_ERR("failed disconnect UL channel\n"); + goto start_dl; + } + ipa3_usb_ctx->qmi_req_id++; + } + + /* Disconnect tethering protocol */ + result = ipa3_usb_disconnect_teth_prot(teth_prot); + if (result) + goto start_ul; + + result = ipa3_usb_release_prod(ttype); + if (result) { + IPA_USB_ERR("failed to release PROD.\n"); + goto connect_teth; + } + + /* Change ipa_usb state to SUSPENDED_NO_RWAKEUP */ + if (!ipa3_usb_set_state(IPA_USB_SUSPENDED_NO_RWAKEUP, false, ttype)) + IPA_USB_ERR("failed to change state to suspend no rwakeup\n"); + + IPA_USB_DBG_LOW("exit\n"); + return 0; + +connect_teth: + (void)ipa3_usb_connect_teth_prot(teth_prot); +start_ul: + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) + (void)ipa3_xdci_connect(ul_clnt_hdl); +start_dl: + (void)ipa3_xdci_connect(dl_clnt_hdl); +fail_exit: + return result; +} + +int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot, bool with_remote_wakeup) +{ + int result = 0; unsigned long flags; enum ipa3_usb_cons_state curr_cons_state; enum ipa3_usb_transport_type ttype; mutex_lock(&ipa3_usb_ctx->general_mutex); IPA_USB_DBG_LOW("entry\n"); + if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) { IPA_USB_ERR("bad parameters.\n"); result = -EINVAL; goto bad_params; } + if (!with_remote_wakeup) { + result = ipa3_usb_suspend_no_remote_wakeup(ul_clnt_hdl, + dl_clnt_hdl, teth_prot); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; + } + ttype = IPA3_USB_GET_TTYPE(teth_prot); - if (!ipa3_usb_check_legal_op(IPA_USB_SUSPEND, ttype)) { + if (!ipa3_usb_check_legal_op(IPA_USB_OP_SUSPEND, ttype)) { IPA_USB_ERR("Illegal operation.\n"); result = -EPERM; goto bad_params; @@ -2538,6 +2677,72 @@ bad_params: } EXPORT_SYMBOL(ipa_usb_xdci_suspend); +/* Assumes lock already acquired */ +static int ipa3_usb_resume_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl, + enum ipa_usb_teth_prot teth_prot) +{ + int result = -EFAULT; + enum ipa3_usb_transport_type ttype; + + ttype = IPA3_USB_GET_TTYPE(teth_prot); + + IPA_USB_DBG("Start resume with no remote wakeup sequence: %s\n", + IPA3_USB_IS_TTYPE_DPL(ttype) ? + "DPL channel":"Data Tethering channels"); + + /* Request USB_PROD */ + result = ipa3_usb_request_prod(ttype); + if (result) + goto fail_exit; + + /* Connect tethering protocol */ + result = ipa3_usb_connect_teth_prot(teth_prot); + if (result) { + IPA_USB_ERR("failed to connect teth protocol\n"); + goto release_prod; + } + + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + /* Start UL channel */ + result = ipa3_xdci_connect(ul_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to start UL channel.\n"); + goto disconn_teth; + } + } + + /* Start DL/DPL channel */ + result = ipa3_xdci_connect(dl_clnt_hdl); + if (result) { + IPA_USB_ERR("failed to start DL/DPL channel.\n"); + goto stop_ul; + } + + /* Change state to CONNECTED */ + if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) { + IPA_USB_ERR("failed to change state to connected\n"); + result = -EFAULT; + goto stop_dl; + } + + return 0; + +stop_dl: + (void)ipa3_xdci_disconnect(dl_clnt_hdl, false, -1); +stop_ul: + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) { + (void)ipa3_xdci_disconnect(ul_clnt_hdl, true, + ipa3_usb_ctx->qmi_req_id); + ipa3_usb_ctx->qmi_req_id++; + } +disconn_teth: + (void)ipa3_usb_disconnect_teth_prot(teth_prot); +release_prod: + (void)ipa3_usb_release_prod(ttype); +fail_exit: + return result; +} + int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, enum ipa_usb_teth_prot teth_prot) { @@ -2557,19 +2762,25 @@ int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, ttype = IPA3_USB_GET_TTYPE(teth_prot); - if (!ipa3_usb_check_legal_op(IPA_USB_RESUME, ttype)) { + if (!ipa3_usb_check_legal_op(IPA_USB_OP_RESUME, ttype)) { IPA_USB_ERR("Illegal operation.\n"); result = -EPERM; goto bad_params; } - IPA_USB_DBG_LOW("Start resume sequence: %s\n", - IPA3_USB_IS_TTYPE_DPL(ttype) ? - "DPL channel" : "Data Tethering channels"); - spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags); prev_state = ipa3_usb_ctx->ttype_ctx[ttype].state; spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags); + if (prev_state == IPA_USB_SUSPENDED_NO_RWAKEUP) { + result = ipa3_usb_resume_no_remote_wakeup(ul_clnt_hdl, + dl_clnt_hdl, teth_prot); + mutex_unlock(&ipa3_usb_ctx->general_mutex); + return result; + } + + IPA_USB_DBG("Start resume sequence: %s\n", + IPA3_USB_IS_TTYPE_DPL(ttype) ? + "DPL channel" : "Data Tethering channels"); /* Change state to RESUME_IN_PROGRESS */ if (!ipa3_usb_set_state(IPA_USB_RESUME_IN_PROGRESS, false, ttype)) { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index ab62dbcddd22..8676b35914e2 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -3790,6 +3790,12 @@ static int ipa3_gsi_pre_fw_load_init(void) return 0; } +static void ipa3_uc_is_loaded(void) +{ + IPADBG("\n"); + complete_all(&ipa3_ctx->uc_loaded_completion_obj); +} + static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type) { enum gsi_ver gsi_ver; @@ -3842,6 +3848,7 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p, int result; struct sps_bam_props bam_props = { 0 }; struct gsi_per_props gsi_props; + struct ipa3_uc_hdlrs uc_hdlrs = { 0 }; if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) { memset(&gsi_props, 0, sizeof(gsi_props)); @@ -3918,6 +3925,9 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p, else IPADBG(":ipa Uc interface init ok\n"); + uc_hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_is_loaded; + ipa3_uc_register_handlers(IPA_HW_FEATURE_COMMON, &uc_hdlrs); + result = ipa3_wdi_init(); if (result) IPAERR(":wdi init failed (%d)\n", -result); @@ -4609,6 +4619,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list); init_completion(&ipa3_ctx->init_completion_obj); + init_completion(&ipa3_ctx->uc_loaded_completion_obj); /* * For GSI, we can't register the GSI driver yet, as it expects diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c index 8326c3fdd9d1..26bd180624f1 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c @@ -61,7 +61,7 @@ int ipa3_enable_data_path(u32 clnt_hdl) !ipa3_should_pipe_be_suspended(ep->client))) { memset(&ep_cfg_ctrl, 0 , sizeof(ep_cfg_ctrl)); ep_cfg_ctrl.ipa_ep_suspend = false; - ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); } /* Assign the resource group for pipe */ @@ -99,9 +99,21 @@ int ipa3_disable_data_path(u32 clnt_hdl) /* Suspend the pipe */ if (IPA_CLIENT_IS_CONS(ep->client)) { + /* + * for RG10 workaround uC needs to be loaded before pipe can + * be suspended in this case. + */ + if (ipa3_ctx->apply_rg10_wa && ipa3_uc_state_check()) { + IPADBG("uC is not loaded yet, waiting...\n"); + res = wait_for_completion_timeout( + &ipa3_ctx->uc_loaded_completion_obj, 60 * HZ); + if (res == 0) + IPADBG("timeout waiting for uC to load\n"); + } + memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl)); ep_cfg_ctrl.ipa_ep_suspend = true; - ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); } udelay(IPA_PKT_FLUSH_TO_US); @@ -1311,7 +1323,46 @@ int ipa3_set_usb_max_packet_size( return 0; } -int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid) +int ipa3_xdci_connect(u32 clnt_hdl) +{ + int result; + struct ipa3_ep_context *ep; + + IPADBG("entry\n"); + + if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || + ipa3_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("Bad parameter.\n"); + return -EINVAL; + } + + ep = &ipa3_ctx->ep[clnt_hdl]; + IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); + + result = ipa3_start_gsi_channel(clnt_hdl); + if (result) { + IPAERR("failed to start gsi channel clnt_hdl=%u\n", clnt_hdl); + goto exit; + } + + result = ipa3_enable_data_path(clnt_hdl); + if (result) { + IPAERR("enable data path failed res=%d clnt_hdl=%d.\n", result, + clnt_hdl); + goto stop_ch; + } + + IPADBG("exit\n"); + goto exit; + +stop_ch: + (void)ipa3_stop_gsi_channel(clnt_hdl); +exit: + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + return result; +} + +int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid) { struct ipa3_ep_context *ep; int result = -EFAULT; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c index 09c7c1b0fd05..cc1cb456ab8a 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -767,6 +767,30 @@ static void ipa3_transport_irq_cmd_ack(void *user1, int user2) } /** + * ipa3_transport_irq_cmd_ack_free - callback function which will be + * called by SPS/GSI driver after an immediate command is complete. + * This function will also free the completion object once it is done. + * @tag_comp: pointer to the completion object + * @ignored: parameter not used + * + * Complete the immediate commands completion object, this will release the + * thread which waits on this completion object (ipa3_send_cmd()) + */ +static void ipa3_transport_irq_cmd_ack_free(void *tag_comp, int ignored) +{ + struct ipa3_tag_completion *comp = tag_comp; + + if (!comp) { + IPAERR("comp is NULL\n"); + return; + } + + complete(&comp->comp); + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); +} + +/** * ipa3_send_cmd - send immediate commands * @num_desc: number of descriptors within the desc struct * @descr: descriptor structure @@ -778,7 +802,58 @@ static void ipa3_transport_irq_cmd_ack(void *user1, int user2) */ int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr) { - return ipa3_send_cmd_timeout(num_desc, descr, 0); + struct ipa3_desc *desc; + int i, result = 0; + struct ipa3_sys_context *sys; + int ep_idx; + + for (i = 0; i < num_desc; i++) + IPADBG("sending imm cmd %d\n", descr[i].opcode); + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD); + if (-1 == ep_idx) { + IPAERR("Client %u is not mapped\n", + IPA_CLIENT_APPS_CMD_PROD); + return -EFAULT; + } + + sys = ipa3_ctx->ep[ep_idx].sys; + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + if (num_desc == 1) { + init_completion(&descr->xfer_done); + + if (descr->callback || descr->user1) + WARN_ON(1); + + descr->callback = ipa3_transport_irq_cmd_ack; + descr->user1 = descr; + if (ipa3_send_one(sys, descr, true)) { + IPAERR("fail to send immediate command\n"); + result = -EFAULT; + goto bail; + } + wait_for_completion(&descr->xfer_done); + } else { + desc = &descr[num_desc - 1]; + init_completion(&desc->xfer_done); + + if (desc->callback || desc->user1) + WARN_ON(1); + + desc->callback = ipa3_transport_irq_cmd_ack; + desc->user1 = desc; + if (ipa3_send(sys, num_desc, descr, true)) { + IPAERR("fail to send multiple immediate command set\n"); + result = -EFAULT; + goto bail; + } + wait_for_completion(&desc->xfer_done); + } + +bail: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; } /** @@ -800,6 +875,7 @@ int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout) struct ipa3_sys_context *sys; int ep_idx; int completed; + struct ipa3_tag_completion *comp; for (i = 0; i < num_desc; i++) IPADBG("sending imm cmd %d\n", descr[i].opcode); @@ -810,55 +886,56 @@ int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout) IPA_CLIENT_APPS_CMD_PROD); return -EFAULT; } + + comp = kzalloc(sizeof(*comp), GFP_ATOMIC); + if (!comp) { + IPAERR("no mem\n"); + return -ENOMEM; + } + init_completion(&comp->comp); + + /* completion needs to be released from both here and in ack callback */ + atomic_set(&comp->cnt, 2); + sys = ipa3_ctx->ep[ep_idx].sys; IPA_ACTIVE_CLIENTS_INC_SIMPLE(); if (num_desc == 1) { - init_completion(&descr->xfer_done); - if (descr->callback || descr->user1) WARN_ON(1); - descr->callback = ipa3_transport_irq_cmd_ack; - descr->user1 = descr; + descr->callback = ipa3_transport_irq_cmd_ack_free; + descr->user1 = comp; if (ipa3_send_one(sys, descr, true)) { IPAERR("fail to send immediate command\n"); + kfree(comp); result = -EFAULT; goto bail; } - if (timeout) { - completed = wait_for_completion_timeout( - &descr->xfer_done, msecs_to_jiffies(timeout)); - if (!completed) - IPADBG("timeout waiting for imm-cmd ACK\n"); - } else { - wait_for_completion(&descr->xfer_done); - } } else { desc = &descr[num_desc - 1]; - init_completion(&desc->xfer_done); if (desc->callback || desc->user1) WARN_ON(1); - desc->callback = ipa3_transport_irq_cmd_ack; - desc->user1 = desc; + desc->callback = ipa3_transport_irq_cmd_ack_free; + desc->user1 = comp; if (ipa3_send(sys, num_desc, descr, true)) { IPAERR("fail to send multiple immediate command set\n"); + kfree(comp); result = -EFAULT; goto bail; } - if (timeout) { - completed = wait_for_completion_timeout( - &desc->xfer_done, msecs_to_jiffies(timeout)); - if (!completed) - IPADBG("timeout waiting for imm-cmd ACK\n"); - } else { - wait_for_completion(&desc->xfer_done); - } - } + completed = wait_for_completion_timeout( + &comp->comp, msecs_to_jiffies(timeout)); + if (!completed) + IPADBG("timeout waiting for imm-cmd ACK\n"); + + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); + bail: IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return result; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 33be22f98b9d..40f1e93653f9 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -1232,6 +1232,7 @@ struct ipa3_context { bool ipa_initialization_complete; struct list_head ipa_ready_cb_list; struct completion init_completion_obj; + struct completion uc_loaded_completion_obj; struct ipa3_smp2p_info smp2p_info; u32 ipa_tz_unlock_reg_num; struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg; @@ -1482,7 +1483,9 @@ int ipa3_reset_gsi_event_ring(u32 clnt_hdl); int ipa3_set_usb_max_packet_size( enum ipa_usb_max_usb_packet_size usb_max_packet_size); -int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid); +int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid); + +int ipa3_xdci_connect(u32 clnt_hdl); int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index c0a6e8b00d71..4ea68ae1e95c 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -3522,7 +3522,7 @@ int ipa3_stop_gsi_channel(u32 clnt_hdl) goto end_sequence; IPADBG("Inject a DMA_TASK with 1B packet to IPA and retry\n"); - /* Send a 1B packet DMA_RASK to IPA and try again*/ + /* Send a 1B packet DMA_TASK to IPA and try again */ res = ipa3_inject_dma_task_for_gsi(); if (res) { IPAERR("Failed to inject DMA TASk for GSI\n"); diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c index 43188c9d690e..5cb017fdf2d3 100644 --- a/drivers/platform/msm/usb_bam.c +++ b/drivers/platform/msm/usb_bam.c @@ -1323,6 +1323,9 @@ static void usb_bam_finish_suspend(enum usb_ctrl cur_bam) __func__, ret); goto no_lpm; } + } else { + log_event_err("%s: pipe type is not B2B\n", __func__); + cons_empty = true; } spin_lock(&usb_bam_ipa_handshake_info_lock); @@ -1959,8 +1962,8 @@ static void usb_bam_finish_resume(struct work_struct *w) spin_unlock(&usb_bam_ipa_handshake_info_lock); mutex_unlock(&info[cur_bam].suspend_resume_mutex); - log_event_dbg("%s: done..PM Runtime PUT %d, count: %d\n", - __func__, idx, get_pm_runtime_counter(bam_dev)); + log_event_dbg("%s: done..PM Runtime PUT :%d\n", + __func__, get_pm_runtime_counter(bam_dev)); /* Put to match _get at the beginning of this routine */ pm_runtime_put(&ctx->usb_bam_pdev->dev); } @@ -2762,16 +2765,14 @@ static void usb_bam_sps_events(enum sps_callback_case sps_cb_case, void *user) log_event_dbg("%s: received SPS_CALLBACK_BAM_TIMER_IRQ\n", __func__); - spin_lock(&ctx->usb_bam_lock); - bam = get_bam_type_from_core_name((char *)user); if (bam < 0 || bam >= MAX_BAMS) { log_event_err("%s: Invalid bam, type=%d ,name=%s\n", __func__, bam, (char *)user); - spin_unlock(&ctx->usb_bam_lock); return; } ctx = &msm_usb_bam[bam]; + spin_lock(&ctx->usb_bam_lock); ctx->is_bam_inactivity = true; log_event_dbg("%s: Inactivity happened on bam=%s,%d\n", diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c index cd410e392550..d33e9ad3218f 100644 --- a/drivers/platform/x86/dell-rbtn.c +++ b/drivers/platform/x86/dell-rbtn.c @@ -28,6 +28,7 @@ struct rbtn_data { enum rbtn_type type; struct rfkill *rfkill; struct input_dev *input_dev; + bool suspended; }; @@ -220,9 +221,55 @@ static const struct acpi_device_id rbtn_ids[] = { { "", 0 }, }; +#ifdef CONFIG_PM_SLEEP +static void ACPI_SYSTEM_XFACE rbtn_clear_suspended_flag(void *context) +{ + struct rbtn_data *rbtn_data = context; + + rbtn_data->suspended = false; +} + +static int rbtn_suspend(struct device *dev) +{ + struct acpi_device *device = to_acpi_device(dev); + struct rbtn_data *rbtn_data = acpi_driver_data(device); + + rbtn_data->suspended = true; + + return 0; +} + +static int rbtn_resume(struct device *dev) +{ + struct acpi_device *device = to_acpi_device(dev); + struct rbtn_data *rbtn_data = acpi_driver_data(device); + acpi_status status; + + /* + * Upon resume, some BIOSes send an ACPI notification thet triggers + * an unwanted input event. In order to ignore it, we use a flag + * that we set at suspend and clear once we have received the extra + * ACPI notification. Since ACPI notifications are delivered + * asynchronously to drivers, we clear the flag from the workqueue + * used to deliver the notifications. This should be enough + * to have the flag cleared only after we received the extra + * notification, if any. + */ + status = acpi_os_execute(OSL_NOTIFY_HANDLER, + rbtn_clear_suspended_flag, rbtn_data); + if (ACPI_FAILURE(status)) + rbtn_clear_suspended_flag(rbtn_data); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(rbtn_pm_ops, rbtn_suspend, rbtn_resume); + static struct acpi_driver rbtn_driver = { .name = "dell-rbtn", .ids = rbtn_ids, + .drv.pm = &rbtn_pm_ops, .ops = { .add = rbtn_add, .remove = rbtn_remove, @@ -384,6 +431,15 @@ static void rbtn_notify(struct acpi_device *device, u32 event) { struct rbtn_data *rbtn_data = device->driver_data; + /* + * Some BIOSes send a notification at resume. + * Ignore it to prevent unwanted input events. + */ + if (rbtn_data->suspended) { + dev_dbg(&device->dev, "ACPI notification ignored\n"); + return; + } + if (event != 0x80) { dev_info(&device->dev, "Received unknown event (0x%x)\n", event); diff --git a/drivers/power/qcom-charger/pmic-voter.c b/drivers/power/qcom-charger/pmic-voter.c index d0bad7dec094..8072b63f53fe 100644 --- a/drivers/power/qcom-charger/pmic-voter.c +++ b/drivers/power/qcom-charger/pmic-voter.c @@ -421,6 +421,7 @@ static int show_votable_clients(struct seq_file *m, void *data) lock_votable(votable); + seq_printf(m, "%s:\n", votable->name); seq_puts(m, "Clients:\n"); for (i = 0; i < votable->num_clients; i++) { if (votable->client_strs[i]) { diff --git a/drivers/power/qcom-charger/smb138x-charger.c b/drivers/power/qcom-charger/smb138x-charger.c index 3db295b3e6e8..9a87ff5fb081 100644 --- a/drivers/power/qcom-charger/smb138x-charger.c +++ b/drivers/power/qcom-charger/smb138x-charger.c @@ -48,14 +48,14 @@ static struct smb_params v1_params = { .name = "fast charge current", .reg = FAST_CHARGE_CURRENT_CFG_REG, .min_u = 0, - .max_u = 4500000, + .max_u = 6000000, .step_u = 25000, }, .fv = { .name = "float voltage", .reg = FLOAT_VOLTAGE_CFG_REG, - .min_u = 2500000, - .max_u = 5000000, + .min_u = 2450000, + .max_u = 4950000, .step_u = 10000, }, .usb_icl = { diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 17c8a5b00843..7569e35b59e0 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -183,5 +183,19 @@ config POWER_RESET_ZX help Reboot support for ZTE SoCs. +config REBOOT_MODE + tristate + +config SYSCON_REBOOT_MODE + tristate "Generic SYSCON regmap reboot mode driver" + depends on OF + select REBOOT_MODE + select MFD_SYSCON + help + Say y here will enable reboot mode driver. This will + get reboot mode arguments and store it in SYSCON mapped + register, then the bootloader can read it to take different + action according to the mode. + endif diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile index 3904e7977d07..66568c4497a4 100644 --- a/drivers/power/reset/Makefile +++ b/drivers/power/reset/Makefile @@ -20,3 +20,5 @@ obj-$(CONFIG_POWER_RESET_SYSCON) += syscon-reboot.o obj-$(CONFIG_POWER_RESET_SYSCON_POWEROFF) += syscon-poweroff.o obj-$(CONFIG_POWER_RESET_RMOBILE) += rmobile-reset.o obj-$(CONFIG_POWER_RESET_ZX) += zx-reboot.o +obj-$(CONFIG_REBOOT_MODE) += reboot-mode.o +obj-$(CONFIG_SYSCON_REBOOT_MODE) += syscon-reboot-mode.o diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c index 2f109013f723..d32f293695bb 100644 --- a/drivers/power/reset/msm-poweroff.c +++ b/drivers/power/reset/msm-poweroff.c @@ -380,7 +380,6 @@ static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd) msm_trigger_wdog_bite(); #endif - scm_disable_sdi(); halt_spmi_pmic_arbiter(); deassert_ps_hold(); diff --git a/drivers/power/reset/reboot-mode.c b/drivers/power/reset/reboot-mode.c new file mode 100644 index 000000000000..2dfbbce0f817 --- /dev/null +++ b/drivers/power/reset/reboot-mode.c @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/reboot.h> +#include "reboot-mode.h" + +#define PREFIX "mode-" + +struct mode_info { + const char *mode; + u32 magic; + struct list_head list; +}; + +static unsigned int get_reboot_mode_magic(struct reboot_mode_driver *reboot, + const char *cmd) +{ + const char *normal = "normal"; + int magic = 0; + struct mode_info *info; + + if (!cmd) + cmd = normal; + + list_for_each_entry(info, &reboot->head, list) { + if (!strcmp(info->mode, cmd)) { + magic = info->magic; + break; + } + } + + return magic; +} + +static int reboot_mode_notify(struct notifier_block *this, + unsigned long mode, void *cmd) +{ + struct reboot_mode_driver *reboot; + unsigned int magic; + + reboot = container_of(this, struct reboot_mode_driver, reboot_notifier); + magic = get_reboot_mode_magic(reboot, cmd); + if (magic) + reboot->write(reboot, magic); + + return NOTIFY_DONE; +} + +/** + * reboot_mode_register - register a reboot mode driver + * @reboot: reboot mode driver + * + * Returns: 0 on success or a negative error code on failure. + */ +int reboot_mode_register(struct reboot_mode_driver *reboot) +{ + struct mode_info *info; + struct property *prop; + struct device_node *np = reboot->dev->of_node; + size_t len = strlen(PREFIX); + int ret; + + INIT_LIST_HEAD(&reboot->head); + + for_each_property_of_node(np, prop) { + if (strncmp(prop->name, PREFIX, len)) + continue; + + info = devm_kzalloc(reboot->dev, sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto error; + } + + if (of_property_read_u32(np, prop->name, &info->magic)) { + dev_err(reboot->dev, "reboot mode %s without magic number\n", + info->mode); + devm_kfree(reboot->dev, info); + continue; + } + + info->mode = kstrdup_const(prop->name + len, GFP_KERNEL); + if (!info->mode) { + ret = -ENOMEM; + goto error; + } else if (info->mode[0] == '\0') { + kfree_const(info->mode); + ret = -EINVAL; + dev_err(reboot->dev, "invalid mode name(%s): too short!\n", + prop->name); + goto error; + } + + list_add_tail(&info->list, &reboot->head); + } + + reboot->reboot_notifier.notifier_call = reboot_mode_notify; + register_reboot_notifier(&reboot->reboot_notifier); + + return 0; + +error: + list_for_each_entry(info, &reboot->head, list) + kfree_const(info->mode); + + return ret; +} +EXPORT_SYMBOL_GPL(reboot_mode_register); + +/** + * reboot_mode_unregister - unregister a reboot mode driver + * @reboot: reboot mode driver + */ +int reboot_mode_unregister(struct reboot_mode_driver *reboot) +{ + struct mode_info *info; + + unregister_reboot_notifier(&reboot->reboot_notifier); + + list_for_each_entry(info, &reboot->head, list) + kfree_const(info->mode); + + return 0; +} +EXPORT_SYMBOL_GPL(reboot_mode_unregister); + +MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com"); +MODULE_DESCRIPTION("System reboot mode core library"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/power/reset/reboot-mode.h b/drivers/power/reset/reboot-mode.h new file mode 100644 index 000000000000..2491bb71f591 --- /dev/null +++ b/drivers/power/reset/reboot-mode.h @@ -0,0 +1,14 @@ +#ifndef __REBOOT_MODE_H__ +#define __REBOOT_MODE_H__ + +struct reboot_mode_driver { + struct device *dev; + struct list_head head; + int (*write)(struct reboot_mode_driver *reboot, unsigned int magic); + struct notifier_block reboot_notifier; +}; + +int reboot_mode_register(struct reboot_mode_driver *reboot); +int reboot_mode_unregister(struct reboot_mode_driver *reboot); + +#endif diff --git a/drivers/power/reset/syscon-reboot-mode.c b/drivers/power/reset/syscon-reboot-mode.c new file mode 100644 index 000000000000..9e1cba5dd58e --- /dev/null +++ b/drivers/power/reset/syscon-reboot-mode.c @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> +#include "reboot-mode.h" + +struct syscon_reboot_mode { + struct regmap *map; + struct reboot_mode_driver reboot; + u32 offset; + u32 mask; +}; + +static int syscon_reboot_mode_write(struct reboot_mode_driver *reboot, + unsigned int magic) +{ + struct syscon_reboot_mode *syscon_rbm; + int ret; + + syscon_rbm = container_of(reboot, struct syscon_reboot_mode, reboot); + + ret = regmap_update_bits(syscon_rbm->map, syscon_rbm->offset, + syscon_rbm->mask, magic); + if (ret < 0) + dev_err(reboot->dev, "update reboot mode bits failed\n"); + + return ret; +} + +static int syscon_reboot_mode_probe(struct platform_device *pdev) +{ + int ret; + struct syscon_reboot_mode *syscon_rbm; + + syscon_rbm = devm_kzalloc(&pdev->dev, sizeof(*syscon_rbm), GFP_KERNEL); + if (!syscon_rbm) + return -ENOMEM; + + syscon_rbm->reboot.dev = &pdev->dev; + syscon_rbm->reboot.write = syscon_reboot_mode_write; + syscon_rbm->mask = 0xffffffff; + + dev_set_drvdata(&pdev->dev, syscon_rbm); + + syscon_rbm->map = syscon_node_to_regmap(pdev->dev.parent->of_node); + if (IS_ERR(syscon_rbm->map)) + return PTR_ERR(syscon_rbm->map); + + if (of_property_read_u32(pdev->dev.of_node, "offset", + &syscon_rbm->offset)) + return -EINVAL; + + of_property_read_u32(pdev->dev.of_node, "mask", &syscon_rbm->mask); + + ret = reboot_mode_register(&syscon_rbm->reboot); + if (ret) + dev_err(&pdev->dev, "can't register reboot mode\n"); + + return ret; +} + +static int syscon_reboot_mode_remove(struct platform_device *pdev) +{ + struct syscon_reboot_mode *syscon_rbm = dev_get_drvdata(&pdev->dev); + + return reboot_mode_unregister(&syscon_rbm->reboot); +} + +static const struct of_device_id syscon_reboot_mode_of_match[] = { + { .compatible = "syscon-reboot-mode" }, + {} +}; + +static struct platform_driver syscon_reboot_mode_driver = { + .probe = syscon_reboot_mode_probe, + .remove = syscon_reboot_mode_remove, + .driver = { + .name = "syscon-reboot-mode", + .of_match_table = syscon_reboot_mode_of_match, + }, +}; +module_platform_driver(syscon_reboot_mode_driver); + +MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com"); +MODULE_DESCRIPTION("SYSCON reboot mode driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index d4c285688ce9..3ddc85e6efd6 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -1122,7 +1122,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, } else { struct scsi_cmnd *SCp; - SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG); + SCp = SDp->current_cmnd; if(unlikely(SCp == NULL)) { sdev_printk(KERN_ERR, SDp, "no saved request for untagged cmd\n"); @@ -1826,7 +1826,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *) slot->tag, slot); } else { slot->tag = SCSI_NO_TAG; - /* must populate current_cmnd for scsi_host_find_tag to work */ + /* save current command for reselection */ SCp->device->current_cmnd = SCp; } /* sanity check: some of the commands generated by the mid-layer diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index d044f3f273be..467773033a20 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -29,6 +29,7 @@ enum { #define AAC_INT_MODE_MSI (1<<1) #define AAC_INT_MODE_AIF (1<<2) #define AAC_INT_MODE_SYNC (1<<3) +#define AAC_INT_MODE_MSIX (1<<16) #define AAC_INT_ENABLE_TYPE1_INTX 0xfffffffb #define AAC_INT_ENABLE_TYPE1_MSIX 0xfffffffa diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 0e954e37f0b5..0d351cd3191b 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -37,6 +37,7 @@ #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/blkdev.h> +#include <linux/delay.h> #include <linux/completion.h> #include <linux/mm.h> #include <scsi/scsi_host.h> @@ -47,6 +48,20 @@ struct aac_common aac_config = { .irq_mod = 1 }; +static inline int aac_is_msix_mode(struct aac_dev *dev) +{ + u32 status; + + status = src_readl(dev, MUnit.OMR); + return (status & AAC_INT_MODE_MSIX); +} + +static inline void aac_change_to_intx(struct aac_dev *dev) +{ + aac_src_access_devreg(dev, AAC_DISABLE_MSIX); + aac_src_access_devreg(dev, AAC_ENABLE_INTX); +} + static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign) { unsigned char *base; @@ -425,6 +440,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) dev->comm_interface = AAC_COMM_PRODUCER; dev->raw_io_interface = dev->raw_io_64 = 0; + + /* + * Enable INTX mode, if not done already Enabled + */ + if (aac_is_msix_mode(dev)) { + aac_change_to_intx(dev); + dev_info(&dev->pdev->dev, "Changed firmware to INTX mode"); + } + if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, status+3, NULL)) && diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 4cbf54928640..8c758c36fc70 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -611,10 +611,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, } return -EFAULT; } - /* We used to udelay() here but that absorbed - * a CPU when a timeout occured. Not very - * useful. */ - cpu_relax(); + /* + * Allow other processes / CPUS to use core + */ + schedule(); } } else if (down_interruptible(&fibptr->event_wait)) { /* Do nothing ... satisfy @@ -1970,6 +1970,10 @@ int aac_command_thread(void *data) if (difference <= 0) difference = 1; set_current_state(TASK_INTERRUPTIBLE); + + if (kthread_should_stop()) + break; + schedule_timeout(difference); if (kthread_should_stop()) diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index da2e068ee47d..93cbefa75b26 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -227,6 +227,7 @@ static struct { {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, {"Promise", "", NULL, BLIST_SPARSELUN}, + {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES}, {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024}, {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 984ddcb4786d..1b9c049bd5c5 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -1127,7 +1127,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn) */ void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q) { - scmd->device->host->host_failed--; scmd->eh_eflags = 0; list_move_tail(&scmd->eh_entry, done_q); } @@ -2226,6 +2225,9 @@ int scsi_error_handler(void *data) else scsi_unjam_host(shost); + /* All scmds have been handled */ + shost->host_failed = 0; + /* * Note - if the above fails completely, the action is to take * individual devices offline and flush the queue of any diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index dd8ad2a44510..cf5b99e1f12b 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -910,9 +910,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) } /* - * If we finished all bytes in the request we are done now. + * special case: failed zero length commands always need to + * drop down into the retry code. Otherwise, if we finished + * all bytes in the request we are done now. */ - if (!scsi_end_request(req, error, good_bytes, 0)) + if (!(blk_rq_bytes(req) == 0 && error) && + !scsi_end_request(req, error, good_bytes, 0)) return; /* diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 7808ef94ff07..87b35b78d879 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -314,6 +314,7 @@ static void scsi_target_destroy(struct scsi_target *starget) struct Scsi_Host *shost = dev_to_shost(dev->parent); unsigned long flags; + BUG_ON(starget->state == STARGET_DEL); starget->state = STARGET_DEL; transport_destroy_device(dev); spin_lock_irqsave(shost->host_lock, flags); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index d2a0ae4971ed..d47624000edf 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1193,18 +1193,18 @@ static void __scsi_remove_target(struct scsi_target *starget) void scsi_remove_target(struct device *dev) { struct Scsi_Host *shost = dev_to_shost(dev->parent); - struct scsi_target *starget, *last_target = NULL; + struct scsi_target *starget; unsigned long flags; restart: spin_lock_irqsave(shost->host_lock, flags); list_for_each_entry(starget, &shost->__targets, siblings) { if (starget->state == STARGET_DEL || - starget == last_target) + starget->state == STARGET_REMOVE) continue; if (starget->dev.parent == dev || &starget->dev == dev) { kref_get(&starget->reap_ref); - last_target = starget; + starget->state = STARGET_REMOVE; spin_unlock_irqrestore(shost->host_lock, flags); __scsi_remove_target(starget); scsi_target_reap(starget); diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index f1d3c7e99a4c..feeed645fc47 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -131,6 +131,9 @@ module_param(qmi_timeout, ulong, 0600); * Registers: WCSS_HM_A_PMM_PMM * Base Address: 0x18880000 */ +#define WCSS_HM_A_PMM_ROOT_CLK_ENABLE 0x80010 +#define PMM_TCXO_CLK_ENABLE BIT(13) + #define PMM_COMMON_IDLEREQ_CSR_OFFSET 0x80120 #define PMM_COMMON_IDLEREQ_CSR_SW_WNOC_IDLEREQ_SET BIT(16) #define PMM_COMMON_IDLEREQ_CSR_WNOC_IDLEACK BIT(26) @@ -1332,8 +1335,28 @@ static int icnss_hw_reset_rf_reset_cmd(struct icnss_priv *priv) static int icnss_hw_reset_switch_to_cxo(struct icnss_priv *priv) { + u32 rdata; + icnss_pr_dbg("RESET: Switch to CXO, state: 0x%lx\n", priv->state); + rdata = icnss_hw_read_reg(priv->mem_base_va, + WCSS_HM_A_PMM_ROOT_CLK_ENABLE); + + icnss_pr_dbg("RESET: PMM_TCXO_CLK_ENABLE : 0x%05lx\n", + rdata & PMM_TCXO_CLK_ENABLE); + + if ((rdata & PMM_TCXO_CLK_ENABLE) == 0) { + icnss_pr_dbg("RESET: Set PMM_TCXO_CLK_ENABLE to 1\n"); + + icnss_hw_write_reg_field(priv->mem_base_va, + WCSS_HM_A_PMM_ROOT_CLK_ENABLE, + PMM_TCXO_CLK_ENABLE, 1); + icnss_hw_poll_reg_field(priv->mem_base_va, + WCSS_HM_A_PMM_ROOT_CLK_ENABLE, + PMM_TCXO_CLK_ENABLE, 1, 10, + ICNSS_HW_REG_RETRY); + } + icnss_hw_write_reg_field(priv->mem_base_va, WCSS_CLK_CTL_NOC_CFG_RCGR_OFFSET, WCSS_CLK_CTL_NOC_CFG_RCGR_SRC_SEL, 0); @@ -1464,6 +1487,26 @@ static int icnss_hw_reset(struct icnss_priv *priv) icnss_hw_reset_switch_to_cxo(priv); + for (i = 0; i < ICNSS_HW_REG_RETRY; i++) { + rdata = icnss_hw_read_reg(priv->mem_base_va, SR_PMM_SR_MSB); + usleep_range(5, 10); + rdata1 = icnss_hw_read_reg(priv->mem_base_va, SR_PMM_SR_MSB); + + icnss_pr_dbg("RESET: SR_PMM_SR_MSB: 0x%08x/0x%08x, XO: 0x%05lx/0x%05lx, AHB: 0x%05lx/0x%05lx\n", + rdata, rdata1, + rdata & SR_PMM_SR_MSB_XO_CLOCK_MASK, + rdata1 & SR_PMM_SR_MSB_XO_CLOCK_MASK, + rdata & SR_PMM_SR_MSB_AHB_CLOCK_MASK, + rdata1 & SR_PMM_SR_MSB_AHB_CLOCK_MASK); + + if ((rdata & SR_PMM_SR_MSB_AHB_CLOCK_MASK) != + (rdata1 & SR_PMM_SR_MSB_AHB_CLOCK_MASK) && + (rdata & SR_PMM_SR_MSB_XO_CLOCK_MASK) != + (rdata1 & SR_PMM_SR_MSB_XO_CLOCK_MASK)) + break; + usleep_range(5, 10); + } + ret = icnss_hw_reset_xo_disable_cmd(priv); if (ret) goto top_level_reset; @@ -2612,10 +2655,10 @@ static int icnss_driver_event_pd_service_down(struct icnss_priv *priv, icnss_call_driver_remove(priv); out: - icnss_remove_msa_permissions(priv); - ret = icnss_hw_power_off(priv); + icnss_remove_msa_permissions(priv); + kfree(data); return ret; diff --git a/drivers/soc/qcom/qdsp6v2/apr.c b/drivers/soc/qcom/qdsp6v2/apr.c index ee9b054dcc24..128ea434dcc8 100644 --- a/drivers/soc/qcom/qdsp6v2/apr.c +++ b/drivers/soc/qcom/qdsp6v2/apr.c @@ -54,6 +54,28 @@ struct apr_reset_work { struct work_struct work; }; +static bool apr_cf_debug; + +#ifdef CONFIG_DEBUG_FS +static struct dentry *debugfs_apr_debug; +static ssize_t apr_debug_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char cmd; + + if (copy_from_user(&cmd, ubuf, 1)) + return -EFAULT; + + apr_cf_debug = (cmd == '1') ? true : false; + + return cnt; +} + +static const struct file_operations apr_debug_ops = { + .write = apr_debug_write, +}; +#endif + #define APR_PKT_INFO(x...) \ do { \ if (apr_pkt_ctx) \ @@ -343,8 +365,13 @@ int apr_send_pkt(void *handle, uint32_t *buf) hdr->dest_domain = svc->dest_domain; hdr->dest_svc = svc->id; - APR_PKT_INFO("Tx: dest_svc[%d], opcode[0x%X], size[%d]", - hdr->dest_svc, hdr->opcode, hdr->pkt_size); + if (unlikely(apr_cf_debug)) { + APR_PKT_INFO( + "Tx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X]", + (hdr->src_domain << 8) | hdr->src_svc, + (hdr->dest_domain << 8) | hdr->dest_svc, hdr->opcode, + hdr->token); + } rc = apr_tal_write(clnt->handle, buf, (struct apr_pkt_priv *)&svc->pkt_owner, @@ -538,8 +565,6 @@ void apr_cb_func(void *buf, int len, void *priv) return; } hdr = buf; - APR_PKT_INFO("Rx: dest_svc[%d], opcode[0x%X], size[%d]", - hdr->dest_svc, hdr->opcode, hdr->pkt_size); ver = hdr->hdr_field; ver = (ver & 0x000F); @@ -631,9 +656,28 @@ void apr_cb_func(void *buf, int len, void *priv) data.dest_port = hdr->dest_port; data.token = hdr->token; data.msg_type = msg_type; + data.payload = NULL; if (data.payload_size > 0) data.payload = (char *)hdr + hdr_size; + if (unlikely(apr_cf_debug)) { + if (hdr->opcode == APR_BASIC_RSP_RESULT && data.payload) { + uint32_t *ptr = data.payload; + + APR_PKT_INFO( + "Rx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X] rc[0x%X]", + (hdr->src_domain << 8) | hdr->src_svc, + (hdr->dest_domain << 8) | hdr->dest_svc, + hdr->opcode, hdr->token, ptr[1]); + } else { + APR_PKT_INFO( + "Rx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X]", + (hdr->src_domain << 8) | hdr->src_svc, + (hdr->dest_domain << 8) | hdr->dest_svc, hdr->opcode, + hdr->token); + } + } + temp_port = ((data.dest_port >> 8) * 8) + (data.dest_port & 0xFF); pr_debug("port = %d t_port = %d\n", data.src_port, temp_port); if (c_svc->port_cnt && c_svc->port_fn[temp_port]) @@ -910,3 +954,14 @@ static int __init apr_late_init(void) return ret; } late_initcall(apr_late_init); + +#ifdef CONFIG_DEBUG_FS +static int __init apr_debug_init(void) +{ + debugfs_apr_debug = debugfs_create_file("msm_apr_debug", + S_IFREG | S_IRUGO, NULL, NULL, + &apr_debug_ops); + return 0; +} +device_initcall(apr_debug_init); +#endif diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c index 714c848ec9c0..b4713ac1b68b 100644 --- a/drivers/soc/qcom/scm.c +++ b/drivers/soc/qcom/scm.c @@ -56,9 +56,16 @@ DEFINE_MUTEX(scm_lmh_lock); #define SMC_ATOMIC_MASK 0x80000000 #define IS_CALL_AVAIL_CMD 1 -#define SCM_BUF_LEN(__cmd_size, __resp_size) \ - (sizeof(struct scm_command) + sizeof(struct scm_response) + \ - __cmd_size + __resp_size) +#define SCM_BUF_LEN(__cmd_size, __resp_size) ({ \ + size_t x = __cmd_size + __resp_size; \ + size_t y = sizeof(struct scm_command) + sizeof(struct scm_response); \ + size_t result; \ + if (x < __cmd_size || (x + y) < x) \ + result = 0; \ + else \ + result = x + y; \ + result; \ + }) /** * struct scm_command - one SCM command buffer * @len: total available memory for command and response @@ -356,8 +363,7 @@ int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf, int ret; size_t len = SCM_BUF_LEN(cmd_len, resp_len); - if (cmd_len > scm_buf_len || resp_len > scm_buf_len || - len > scm_buf_len) + if (len == 0) return -EINVAL; if (!IS_ALIGNED((unsigned long)scm_buf, PAGE_SIZE)) @@ -780,7 +786,7 @@ int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len, int ret; size_t len = SCM_BUF_LEN(cmd_len, resp_len); - if (cmd_len > len || resp_len > len) + if (len == 0 || PAGE_ALIGN(len) < len) return -EINVAL; cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL); diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c index d9ebc1edda9c..afe6b2309e27 100644 --- a/drivers/soc/qcom/secure_buffer.c +++ b/drivers/soc/qcom/secure_buffer.c @@ -52,7 +52,7 @@ struct mem_prot_info { struct dest_vm_and_perm_info { u32 vm; u32 perm; - u32 *ctx; + u64 ctx; u32 ctx_size; }; @@ -209,7 +209,7 @@ populate_dest_info(int *dest_vmids, int nelements, int *dest_perms, for (i = 0; i < nelements; i++) { dest_info[i].vm = dest_vmids[i]; dest_info[i].perm = dest_perms[i]; - dest_info[i].ctx = NULL; + dest_info[i].ctx = 0x0; dest_info[i].ctx_size = 0; } diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c index 97d922fa5724..6bc815862541 100644 --- a/drivers/soc/qcom/wcd-dsp-glink.c +++ b/drivers/soc/qcom/wcd-dsp-glink.c @@ -161,8 +161,8 @@ static void wdsp_glink_notify_rx(void *handle, const void *priv, wpriv->rsp_cnt = ++rsp_cnt; mutex_unlock(&wpriv->rsp_mutex); - complete(&wpriv->rsp_complete); glink_rx_done(handle, ptr, true); + complete(&wpriv->rsp_complete); } /* diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index b02e48185355..c0b936d802ef 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -1005,6 +1005,12 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) pa->spmic = ctrl; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); + if (!res) { + dev_err(&pdev->dev, "core resource not specified\n"); + err = -EINVAL; + goto err_put_ctrl; + } + pa->core_size = resource_size(res); if (pa->core_size <= 0x800) { dev_err(&pdev->dev, "core_size is smaller than 0x800. Failing Probe\n"); diff --git a/drivers/staging/android/fiq_debugger/Kconfig b/drivers/staging/android/fiq_debugger/Kconfig index 56f7f999377e..60fc224d4efc 100644 --- a/drivers/staging/android/fiq_debugger/Kconfig +++ b/drivers/staging/android/fiq_debugger/Kconfig @@ -42,6 +42,15 @@ config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE If enabled, this puts the fiq debugger into console mode by default. Otherwise, the fiq debugger will start out in debug mode. +config FIQ_DEBUGGER_UART_OVERLAY + bool "Install uart DT overlay" + depends on FIQ_DEBUGGER + select OF_OVERLAY + default n + help + If enabled, fiq debugger is calling fiq_debugger_uart_overlay() + that will apply overlay uart_overlay@0 to disable proper uart. + config FIQ_WATCHDOG bool select FIQ_DEBUGGER diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c index 7f056831dbff..b132cff14f01 100644 --- a/drivers/staging/android/fiq_debugger/fiq_debugger.c +++ b/drivers/staging/android/fiq_debugger/fiq_debugger.c @@ -39,6 +39,10 @@ #include <asm/fiq_glue.h> #endif +#ifdef CONFIG_FIQ_DEBUGGER_UART_OVERLAY +#include <linux/of.h> +#endif + #include <linux/uaccess.h> #include "fiq_debugger.h" @@ -119,11 +123,13 @@ static bool initial_console_enable; #endif static bool fiq_kgdb_enable; +static bool fiq_debugger_disable; module_param_named(no_sleep, initial_no_sleep, bool, 0644); module_param_named(debug_enable, initial_debug_enable, bool, 0644); module_param_named(console_enable, initial_console_enable, bool, 0644); module_param_named(kgdb_enable, fiq_kgdb_enable, bool, 0644); +module_param_named(disable, fiq_debugger_disable, bool, 0644); #ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON static inline @@ -1201,11 +1207,41 @@ static struct platform_driver fiq_debugger_driver = { }, }; +#if defined(CONFIG_FIQ_DEBUGGER_UART_OVERLAY) +int fiq_debugger_uart_overlay(void) +{ + struct device_node *onp = of_find_node_by_path("/uart_overlay@0"); + int ret; + + if (!onp) { + pr_err("serial_debugger: uart overlay not found\n"); + return -ENODEV; + } + + ret = of_overlay_create(onp); + if (ret < 0) { + pr_err("serial_debugger: fail to create overlay: %d\n", ret); + of_node_put(onp); + return ret; + } + + pr_info("serial_debugger: uart overlay applied\n"); + return 0; +} +#endif + static int __init fiq_debugger_init(void) { + if (fiq_debugger_disable) { + pr_err("serial_debugger: disabled\n"); + return -ENODEV; + } #if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) fiq_debugger_tty_init(); #endif +#if defined(CONFIG_FIQ_DEBUGGER_UART_OVERLAY) + fiq_debugger_uart_overlay(); +#endif return platform_driver_register(&fiq_debugger_driver); } diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index b8bf80f02f4c..43d3f92cd418 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -2,7 +2,7 @@ * drivers/staging/android/ion/ion_heap.c * * Copyright (C) 2011 Google, Inc. - * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -325,8 +325,9 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) switch (heap_data->type) { case ION_HEAP_TYPE_SYSTEM_CONTIG: - heap = ion_system_contig_heap_create(heap_data); - break; + pr_err("%s: Heap type is disabled: %d\n", __func__, + heap_data->type); + return ERR_PTR(-EINVAL); case ION_HEAP_TYPE_SYSTEM: heap = ion_system_heap_create(heap_data); break; @@ -366,7 +367,8 @@ void ion_heap_destroy(struct ion_heap *heap) switch (heap->type) { case ION_HEAP_TYPE_SYSTEM_CONTIG: - ion_system_contig_heap_destroy(heap); + pr_err("%s: Heap type is disabled: %d\n", __func__, + heap->type); break; case ION_HEAP_TYPE_SYSTEM: ion_system_heap_destroy(heap); diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c index 940781183fac..3be10963f98b 100644 --- a/drivers/staging/comedi/drivers/das1800.c +++ b/drivers/staging/comedi/drivers/das1800.c @@ -567,14 +567,17 @@ static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s) struct comedi_isadma_desc *desc; int i; - outb(0x0, dev->iobase + DAS1800_STATUS); /* disable conversions */ - outb(0x0, dev->iobase + DAS1800_CONTROL_B); /* disable interrupts and dma */ - outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* disable and clear fifo and stop triggering */ - - for (i = 0; i < 2; i++) { - desc = &dma->desc[i]; - if (desc->chan) - comedi_isadma_disable(desc->chan); + /* disable and stop conversions */ + outb(0x0, dev->iobase + DAS1800_STATUS); + outb(0x0, dev->iobase + DAS1800_CONTROL_B); + outb(0x0, dev->iobase + DAS1800_CONTROL_A); + + if (dma) { + for (i = 0; i < 2; i++) { + desc = &dma->desc[i]; + if (desc->chan) + comedi_isadma_disable(desc->chan); + } } return 0; @@ -934,13 +937,14 @@ static void das1800_ai_setup_dma(struct comedi_device *dev, { struct das1800_private *devpriv = dev->private; struct comedi_isadma *dma = devpriv->dma; - struct comedi_isadma_desc *desc = &dma->desc[0]; + struct comedi_isadma_desc *desc; unsigned int bytes; if ((devpriv->irq_dma_bits & DMA_ENABLED) == 0) return; dma->cur_dma = 0; + desc = &dma->desc[0]; /* determine a dma transfer size to fill buffer in 0.3 sec */ bytes = das1800_ai_transfer_size(dev, s, desc->maxsize, 300000000); diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c index 02e930c55570..e4839ee4ca61 100644 --- a/drivers/staging/iio/accel/sca3000_core.c +++ b/drivers/staging/iio/accel/sca3000_core.c @@ -595,7 +595,7 @@ static ssize_t sca3000_read_frequency(struct device *dev, goto error_ret_mut; ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL); mutex_unlock(&st->lock); - if (ret) + if (ret < 0) goto error_ret; val = ret; if (base_freq > 0) diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 560c5c72daeb..65c7033e0df0 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -879,14 +879,6 @@ __cpufreq_cooling_register(struct device_node *np, goto free_power_table; } - snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", - cpufreq_dev->id); - - cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, - &cpufreq_cooling_ops); - if (IS_ERR(cool_dev)) - goto remove_idr; - /* Fill freq-table in descending order of frequencies */ for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) { freq = find_next_max(table, freq); @@ -899,6 +891,14 @@ __cpufreq_cooling_register(struct device_node *np, pr_debug("%s: freq:%u KHz\n", __func__, freq); } + snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", + cpufreq_dev->id); + + cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, + &cpufreq_cooling_ops); + if (IS_ERR(cool_dev)) + goto remove_idr; + cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0]; cpufreq_dev->cool_dev = cool_dev; diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c index 0dde34e3a7c5..545c60c826a1 100644 --- a/drivers/thunderbolt/eeprom.c +++ b/drivers/thunderbolt/eeprom.c @@ -444,6 +444,7 @@ int tb_drom_read(struct tb_switch *sw) return tb_drom_parse_entries(sw); err: kfree(sw->drom); + sw->drom = NULL; return -EIO; } diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index c3fe026d3168..9aff37186246 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -2045,7 +2045,9 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm) } } spin_unlock(&gsm_mux_lock); - WARN_ON(i == MAX_MUX); + /* open failed before registering => nothing to do */ + if (i == MAX_MUX) + return; /* In theory disconnecting DLCI 0 is sufficient but for some modems this is apparently not the case. */ diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index bbc4ce66c2c1..644ddb841d9f 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c @@ -600,7 +600,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, add_wait_queue(&tty->read_wait, &wait); for (;;) { - if (test_bit(TTY_OTHER_DONE, &tty->flags)) { + if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { ret = -EIO; break; } @@ -828,7 +828,7 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp, /* set bits for operations that won't block */ if (n_hdlc->rx_buf_list.head) mask |= POLLIN | POLLRDNORM; /* readable */ - if (test_bit(TTY_OTHER_DONE, &tty->flags)) + if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) mask |= POLLHUP; if (tty_hung_up_p(filp)) mask |= POLLHUP; diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index cf000b331eed..84e71bd19082 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1955,18 +1955,6 @@ static inline int input_available_p(struct tty_struct *tty, int poll) return ldata->commit_head - ldata->read_tail >= amt; } -static inline int check_other_done(struct tty_struct *tty) -{ - int done = test_bit(TTY_OTHER_DONE, &tty->flags); - if (done) { - /* paired with cmpxchg() in check_other_closed(); ensures - * read buffer head index is not stale - */ - smp_mb__after_atomic(); - } - return done; -} - /** * copy_from_read_buf - copy read data directly * @tty: terminal device @@ -2171,7 +2159,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, struct n_tty_data *ldata = tty->disc_data; unsigned char __user *b = buf; DEFINE_WAIT_FUNC(wait, woken_wake_function); - int c, done; + int c; int minimum, time; ssize_t retval = 0; long timeout; @@ -2239,32 +2227,35 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, ((minimum - (b - buf)) >= 1)) ldata->minimum_to_wake = (minimum - (b - buf)); - done = check_other_done(tty); - if (!input_available_p(tty, 0)) { - if (done) { - retval = -EIO; - break; - } - if (tty_hung_up_p(file)) - break; - if (!timeout) - break; - if (file->f_flags & O_NONBLOCK) { - retval = -EAGAIN; - break; - } - if (signal_pending(current)) { - retval = -ERESTARTSYS; - break; - } up_read(&tty->termios_rwsem); + tty_buffer_flush_work(tty->port); + down_read(&tty->termios_rwsem); + if (!input_available_p(tty, 0)) { + if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { + retval = -EIO; + break; + } + if (tty_hung_up_p(file)) + break; + if (!timeout) + break; + if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + break; + } + if (signal_pending(current)) { + retval = -ERESTARTSYS; + break; + } + up_read(&tty->termios_rwsem); - timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, - timeout); + timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, + timeout); - down_read(&tty->termios_rwsem); - continue; + down_read(&tty->termios_rwsem); + continue; + } } if (ldata->icanon && !L_EXTPROC(tty)) { @@ -2446,12 +2437,17 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file, poll_wait(file, &tty->read_wait, wait); poll_wait(file, &tty->write_wait, wait); - if (check_other_done(tty)) - mask |= POLLHUP; if (input_available_p(tty, 1)) mask |= POLLIN | POLLRDNORM; + else { + tty_buffer_flush_work(tty->port); + if (input_available_p(tty, 1)) + mask |= POLLIN | POLLRDNORM; + } if (tty->packet && tty->link->ctrl_status) mask |= POLLPRI | POLLIN | POLLRDNORM; + if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) + mask |= POLLHUP; if (tty_hung_up_p(file)) mask |= POLLHUP; if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) { diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 78e983677339..7865228f664f 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -59,7 +59,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp) if (!tty->link) return; set_bit(TTY_OTHER_CLOSED, &tty->link->flags); - tty_flip_buffer_push(tty->link->port); + wake_up_interruptible(&tty->link->read_wait); wake_up_interruptible(&tty->link->write_wait); if (tty->driver->subtype == PTY_TYPE_MASTER) { set_bit(TTY_OTHER_CLOSED, &tty->flags); @@ -247,9 +247,7 @@ static int pty_open(struct tty_struct *tty, struct file *filp) goto out; clear_bit(TTY_IO_ERROR, &tty->flags); - /* TTY_OTHER_CLOSED must be cleared before TTY_OTHER_DONE */ clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); - clear_bit(TTY_OTHER_DONE, &tty->link->flags); set_bit(TTY_THROTTLED, &tty->flags); return 0; diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c index 88531a36b69c..ed489880e62b 100644 --- a/drivers/tty/serial/8250/8250_mid.c +++ b/drivers/tty/serial/8250/8250_mid.c @@ -14,6 +14,7 @@ #include <linux/pci.h> #include <linux/dma/hsu.h> +#include <linux/8250_pci.h> #include "8250.h" @@ -24,6 +25,7 @@ #define PCI_DEVICE_ID_INTEL_DNV_UART 0x19d8 /* Intel MID Specific registers */ +#define INTEL_MID_UART_DNV_FISR 0x08 #define INTEL_MID_UART_PS 0x30 #define INTEL_MID_UART_MUL 0x34 #define INTEL_MID_UART_DIV 0x38 @@ -31,6 +33,7 @@ struct mid8250; struct mid8250_board { + unsigned int flags; unsigned long freq; unsigned int base_baud; int (*setup)(struct mid8250 *, struct uart_port *p); @@ -88,16 +91,16 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p) static int dnv_handle_irq(struct uart_port *p) { struct mid8250 *mid = p->private_data; - int ret; - - ret = hsu_dma_irq(&mid->dma_chip, 0); - ret |= hsu_dma_irq(&mid->dma_chip, 1); - - /* For now, letting the HW generate separate interrupt for the UART */ - if (ret) - return ret; - - return serial8250_handle_irq(p, serial_port_in(p, UART_IIR)); + unsigned int fisr = serial_port_in(p, INTEL_MID_UART_DNV_FISR); + int ret = IRQ_NONE; + + if (fisr & BIT(2)) + ret |= hsu_dma_irq(&mid->dma_chip, 1); + if (fisr & BIT(1)) + ret |= hsu_dma_irq(&mid->dma_chip, 0); + if (fisr & BIT(0)) + ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR)); + return ret; } #define DNV_DMA_CHAN_OFFSET 0x80 @@ -106,12 +109,13 @@ static int dnv_setup(struct mid8250 *mid, struct uart_port *p) { struct hsu_dma_chip *chip = &mid->dma_chip; struct pci_dev *pdev = to_pci_dev(p->dev); + unsigned int bar = FL_GET_BASE(mid->board->flags); int ret; chip->dev = &pdev->dev; chip->irq = pdev->irq; chip->regs = p->membase; - chip->length = pci_resource_len(pdev, 0); + chip->length = pci_resource_len(pdev, bar); chip->offset = DNV_DMA_CHAN_OFFSET; /* Falling back to PIO mode if DMA probing fails */ @@ -217,6 +221,7 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct uart_8250_port uart; struct mid8250 *mid; + unsigned int bar; int ret; ret = pcim_enable_device(pdev); @@ -230,6 +235,7 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; mid->board = (struct mid8250_board *)id->driver_data; + bar = FL_GET_BASE(mid->board->flags); memset(&uart, 0, sizeof(struct uart_8250_port)); @@ -242,8 +248,8 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE; uart.port.set_termios = mid8250_set_termios; - uart.port.mapbase = pci_resource_start(pdev, 0); - uart.port.membase = pcim_iomap(pdev, 0, 0); + uart.port.mapbase = pci_resource_start(pdev, bar); + uart.port.membase = pcim_iomap(pdev, bar, 0); if (!uart.port.membase) return -ENOMEM; @@ -282,18 +288,21 @@ static void mid8250_remove(struct pci_dev *pdev) } static const struct mid8250_board pnw_board = { + .flags = FL_BASE0, .freq = 50000000, .base_baud = 115200, .setup = pnw_setup, }; static const struct mid8250_board tng_board = { + .flags = FL_BASE0, .freq = 38400000, .base_baud = 1843200, .setup = tng_setup, }; static const struct mid8250_board dnv_board = { + .flags = FL_BASE1, .freq = 133333333, .base_baud = 115200, .setup = dnv_setup, diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 7cd6f9a90542..c1d4a8fa9be8 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1401,6 +1401,9 @@ byt_set_termios(struct uart_port *p, struct ktermios *termios, unsigned long m, n; u32 reg; + /* Gracefully handle the B0 case: fall back to B9600 */ + fuart = fuart ? fuart : 9600 * 16; + /* Get Fuart closer to Fref */ fuart *= rounddown_pow_of_two(fref / fuart); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 94294558943c..7bbadd176c74 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -277,6 +277,13 @@ static bool atmel_use_dma_rx(struct uart_port *port) return atmel_port->use_dma_rx; } +static bool atmel_use_fifo(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + return atmel_port->fifo_size; +} + static unsigned int atmel_get_lines_status(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); @@ -2169,7 +2176,12 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, mode |= ATMEL_US_USMODE_RS485; } else if (termios->c_cflag & CRTSCTS) { /* RS232 with hardware handshake (RTS/CTS) */ - mode |= ATMEL_US_USMODE_HWHS; + if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) { + dev_info(port->dev, "not enabling hardware flow control because DMA is used"); + termios->c_cflag &= ~CRTSCTS; + } else { + mode |= ATMEL_US_USMODE_HWHS; + } } else { /* RS232 without hadware handshake */ mode |= ATMEL_US_USMODE_NORMAL; diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index fd9c47f2f29f..c8ab5370670d 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1263,6 +1263,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port, /* check to see if we need to change clock source */ if (ourport->baudclk != clk) { + clk_prepare_enable(clk); + s3c24xx_serial_setsource(port, clk_sel); if (!IS_ERR(ourport->baudclk)) { @@ -1270,8 +1272,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port, ourport->baudclk = ERR_PTR(-EINVAL); } - clk_prepare_enable(clk); - ourport->baudclk = clk; ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0; } diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c index 73190f5d2832..71d26c8e1b8f 100644 --- a/drivers/tty/serial/ucc_uart.c +++ b/drivers/tty/serial/ucc_uart.c @@ -1478,6 +1478,9 @@ static const struct of_device_id ucc_uart_match[] = { .type = "serial", .compatible = "ucc_uart", }, + { + .compatible = "fsl,t1040-ucc-uart", + }, {}, }; MODULE_DEVICE_TABLE(of, ucc_uart_match); diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index 3cd31e0d4bd9..fb31eecb708d 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -37,29 +37,6 @@ #define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF) -/* - * If all tty flip buffers have been processed by flush_to_ldisc() or - * dropped by tty_buffer_flush(), check if the linked pty has been closed. - * If so, wake the reader/poll to process - */ -static inline void check_other_closed(struct tty_struct *tty) -{ - unsigned long flags, old; - - /* transition from TTY_OTHER_CLOSED => TTY_OTHER_DONE must be atomic */ - for (flags = ACCESS_ONCE(tty->flags); - test_bit(TTY_OTHER_CLOSED, &flags); - ) { - old = flags; - __set_bit(TTY_OTHER_DONE, &flags); - flags = cmpxchg(&tty->flags, old, flags); - if (old == flags) { - wake_up_interruptible(&tty->read_wait); - break; - } - } -} - /** * tty_buffer_lock_exclusive - gain exclusive access to buffer * tty_buffer_unlock_exclusive - release exclusive access @@ -254,8 +231,6 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld) if (ld && ld->ops->flush_buffer) ld->ops->flush_buffer(tty); - check_other_closed(tty); - atomic_dec(&buf->priority); mutex_unlock(&buf->lock); } @@ -505,10 +480,8 @@ static void flush_to_ldisc(struct work_struct *work) */ count = smp_load_acquire(&head->commit) - head->read; if (!count) { - if (next == NULL) { - check_other_closed(tty); + if (next == NULL) break; - } buf->head = next; tty_buffer_free(port, head); continue; @@ -597,3 +570,8 @@ bool tty_buffer_cancel_work(struct tty_port *port) { return cancel_work_sync(&port->buf.work); } + +void tty_buffer_flush_work(struct tty_port *port) +{ + flush_work(&port->buf.work); +} diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index 6f0336fff501..41987a55a538 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -366,34 +366,22 @@ static void to_utf8(struct vc_data *vc, uint c) static void do_compute_shiftstate(void) { - unsigned int i, j, k, sym, val; + unsigned int k, sym, val; shift_state = 0; memset(shift_down, 0, sizeof(shift_down)); - for (i = 0; i < ARRAY_SIZE(key_down); i++) { - - if (!key_down[i]) + for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) { + sym = U(key_maps[0][k]); + if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK) continue; - k = i * BITS_PER_LONG; - - for (j = 0; j < BITS_PER_LONG; j++, k++) { - - if (!test_bit(k, key_down)) - continue; + val = KVAL(sym); + if (val == KVAL(K_CAPSSHIFT)) + val = KVAL(K_SHIFT); - sym = U(key_maps[0][k]); - if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK) - continue; - - val = KVAL(sym); - if (val == KVAL(K_CAPSSHIFT)) - val = KVAL(K_SHIFT); - - shift_down[val]++; - shift_state |= (1 << val); - } + shift_down[val]++; + shift_state |= BIT(val); } } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 4462d167900c..136ebaaa9cc0 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -750,6 +750,7 @@ static void visual_init(struct vc_data *vc, int num, int init) vc->vc_complement_mask = 0; vc->vc_can_do_color = 0; vc->vc_panic_force_write = false; + vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS; vc->vc_sw->con_init(vc, init); if (!vc->vc_complement_mask) vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800; @@ -3583,9 +3584,10 @@ static int do_register_con_driver(const struct consw *csw, int first, int last) goto err; desc = csw->con_startup(); - - if (!desc) + if (!desc) { + retval = -ENODEV; goto err; + } retval = -EINVAL; diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c index 61d538aa2346..4f4f06a5889f 100644 --- a/drivers/usb/common/usb-otg-fsm.c +++ b/drivers/usb/common/usb-otg-fsm.c @@ -21,6 +21,7 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/mutex.h> @@ -365,3 +366,4 @@ int otg_statemachine(struct otg_fsm *fsm) return state_changed; } EXPORT_SYMBOL_GPL(otg_statemachine); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 2057d91d8336..dadd1e8dfe09 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -284,7 +284,7 @@ static int usb_probe_interface(struct device *dev) struct usb_device *udev = interface_to_usbdev(intf); const struct usb_device_id *id; int error = -ENODEV; - int lpm_disable_error; + int lpm_disable_error = -ENODEV; dev_dbg(dev, "%s\n", __func__); @@ -336,12 +336,14 @@ static int usb_probe_interface(struct device *dev) * setting during probe, that should also be fine. usb_set_interface() * will attempt to disable LPM, and fail if it can't disable it. */ - lpm_disable_error = usb_unlocked_disable_lpm(udev); - if (lpm_disable_error && driver->disable_hub_initiated_lpm) { - dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.", - __func__, driver->name); - error = lpm_disable_error; - goto err; + if (driver->disable_hub_initiated_lpm) { + lpm_disable_error = usb_unlocked_disable_lpm(udev); + if (lpm_disable_error) { + dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.", + __func__, driver->name); + error = lpm_disable_error; + goto err; + } } /* Carry out a deferred switch to altsetting 0 */ @@ -391,7 +393,8 @@ static int usb_unbind_interface(struct device *dev) struct usb_interface *intf = to_usb_interface(dev); struct usb_host_endpoint *ep, **eps = NULL; struct usb_device *udev; - int i, j, error, r, lpm_disable_error; + int i, j, error, r; + int lpm_disable_error = -ENODEV; intf->condition = USB_INTERFACE_UNBINDING; @@ -399,12 +402,13 @@ static int usb_unbind_interface(struct device *dev) udev = interface_to_usbdev(intf); error = usb_autoresume_device(udev); - /* Hub-initiated LPM policy may change, so attempt to disable LPM until + /* If hub-initiated LPM policy may change, attempt to disable LPM until * the driver is unbound. If LPM isn't disabled, that's fine because it * wouldn't be enabled unless all the bound interfaces supported * hub-initiated LPM. */ - lpm_disable_error = usb_unlocked_disable_lpm(udev); + if (driver->disable_hub_initiated_lpm) + lpm_disable_error = usb_unlocked_disable_lpm(udev); /* * Terminate all URBs for this interface unless the driver @@ -505,7 +509,7 @@ int usb_driver_claim_interface(struct usb_driver *driver, struct device *dev; struct usb_device *udev; int retval = 0; - int lpm_disable_error; + int lpm_disable_error = -ENODEV; if (!iface) return -ENODEV; @@ -526,12 +530,14 @@ int usb_driver_claim_interface(struct usb_driver *driver, iface->condition = USB_INTERFACE_BOUND; - /* Disable LPM until this driver is bound. */ - lpm_disable_error = usb_unlocked_disable_lpm(udev); - if (lpm_disable_error && driver->disable_hub_initiated_lpm) { - dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.", - __func__, driver->name); - return -ENOMEM; + /* See the comment about disabling LPM in usb_probe_interface(). */ + if (driver->disable_hub_initiated_lpm) { + lpm_disable_error = usb_unlocked_disable_lpm(udev); + if (lpm_disable_error) { + dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.", + __func__, driver->name); + return -ENOMEM; + } } /* Claimed interfaces are initially inactive (suspended) and diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 6dc810bce295..944a6dca0fcb 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Creative SB Audigy 2 NX */ { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, + /* USB3503 */ + { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Microsoft Wireless Laser Mouse 6000 Receiver */ { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -173,6 +176,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* MAYA44USB sound device */ { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME }, + /* ASUS Base Station(T100) */ + { USB_DEVICE(0x0b05, 0x17e0), .driver_info = + USB_QUIRK_IGNORE_REMOTE_WAKEUP }, + /* Action Semiconductor flash disk */ { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255 }, @@ -188,26 +195,22 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1908, 0x1315), .driver_info = USB_QUIRK_HONOR_BNUMINTERFACES }, - /* INTEL VALUE SSD */ - { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, - - /* USB3503 */ - { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME }, - - /* ASUS Base Station(T100) */ - { USB_DEVICE(0x0b05, 0x17e0), .driver_info = - USB_QUIRK_IGNORE_REMOTE_WAKEUP }, - /* Protocol and OTG Electrical Test Device */ { USB_DEVICE(0x1a0a, 0x0200), .driver_info = USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, + /* Acer C120 LED Projector */ + { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM }, + /* Blackmagic Design Intensity Shuttle */ { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM }, /* Blackmagic Design UltraStudio SDI */ { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM }, + /* INTEL VALUE SSD */ + { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, + { } /* terminating entry must be last */ }; diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index a66d3cb62b65..a738a68d2292 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -44,6 +44,17 @@ #include <linux/usb/phy.h> #include "hw.h" +#ifdef CONFIG_MIPS +/* + * There are some MIPS machines that can run in either big-endian + * or little-endian mode and that use the dwc2 register without + * a byteswap in both ways. + * Unlike other architectures, MIPS apparently does not require a + * barrier before the __raw_writel() to synchronize with DMA but does + * require the barrier after the __raw_writel() to serialize a set of + * writes. This set of operations was added specifically for MIPS and + * should only be used there. + */ static inline u32 dwc2_readl(const void __iomem *addr) { u32 value = __raw_readl(addr); @@ -70,6 +81,22 @@ static inline void dwc2_writel(u32 value, void __iomem *addr) pr_info("INFO:: wrote %08x to %p\n", value, addr); #endif } +#else +/* Normal architectures just use readl/write */ +static inline u32 dwc2_readl(const void __iomem *addr) +{ + return readl(addr); +} + +static inline void dwc2_writel(u32 value, void __iomem *addr) +{ + writel(value, addr); + +#ifdef DWC2_LOG_WRITES + pr_info("info:: wrote %08x to %p\n", value, addr); +#endif +} +#endif /* Maximum number of Endpoints/HostChannels */ #define MAX_EPS_CHANNELS 16 diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index dd5cb5577dca..2f1fb7e7aa54 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -128,12 +128,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev) platform_set_drvdata(pdev, exynos); - ret = dwc3_exynos_register_phys(exynos); - if (ret) { - dev_err(dev, "couldn't register PHYs\n"); - return ret; - } - exynos->dev = dev; exynos->clk = devm_clk_get(dev, "usbdrd30"); @@ -183,20 +177,29 @@ static int dwc3_exynos_probe(struct platform_device *pdev) goto err3; } + ret = dwc3_exynos_register_phys(exynos); + if (ret) { + dev_err(dev, "couldn't register PHYs\n"); + goto err4; + } + if (node) { ret = of_platform_populate(node, NULL, NULL, dev); if (ret) { dev_err(dev, "failed to add dwc3 core\n"); - goto err4; + goto err5; } } else { dev_err(dev, "no device node, failed to add dwc3 core\n"); ret = -ENODEV; - goto err4; + goto err5; } return 0; +err5: + platform_device_unregister(exynos->usb2_phy); + platform_device_unregister(exynos->usb3_phy); err4: regulator_disable(exynos->vdd10); err3: diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index e5a88adfce59..add035269ae7 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -2073,6 +2073,11 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc) clk_prepare_enable(mdwc->iface_clk); clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate); clk_prepare_enable(mdwc->core_clk); + + /* set Memory core: ON, Memory periphery: ON */ + clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM); + clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH); + clk_prepare_enable(mdwc->utmi_clk); if (mdwc->bus_aggr_clk) clk_prepare_enable(mdwc->bus_aggr_clk); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 687d51e25d4b..2450cc52fa24 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2130,24 +2130,11 @@ static int dwc3_gadget_stop(struct usb_gadget *g) struct dwc3 *dwc = gadget_to_dwc(g); unsigned long flags; - pm_runtime_get_sync(dwc->dev); - dbg_event(0xFF, "Stop gsync", - atomic_read(&dwc->dev->power.usage_count)); - dwc3_gadget_disable_irq(dwc); spin_lock_irqsave(&dwc->lock, flags); - - __dwc3_gadget_ep_disable(dwc->eps[0]); - __dwc3_gadget_ep_disable(dwc->eps[1]); - dwc->gadget_driver = NULL; - spin_unlock_irqrestore(&dwc->lock, flags); - pm_runtime_mark_last_busy(dwc->dev); - pm_runtime_put_autosuspend(dwc->dev); - dbg_event(0xFF, "Auto_susgsync", 0); - return 0; } @@ -2814,7 +2801,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT); dwc3_usb3_phy_suspend(dwc, false); - usb_gadget_vbus_draw(&dwc->gadget, 0); + usb_gadget_vbus_draw(&dwc->gadget, 100); dwc3_reset_gadget(dwc); dbg_event(0xFF, "BUS RST", 0); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 20fe358387be..eb2409dda50d 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -717,7 +717,7 @@ static void ffs_user_copy_worker(struct work_struct *work) if (io_data->read && ret > 0) { use_mm(io_data->mm); ret = copy_to_iter(io_data->buf, ret, &io_data->data); - if (iov_iter_count(&io_data->data)) + if (ret != io_data->req->actual && iov_iter_count(&io_data->data)) ret = -EFAULT; unuse_mm(io_data->mm); } diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index c298c95d4ba0..738f20d935d6 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -497,7 +497,8 @@ static int ipa_suspend_work_handler(struct gsi_data_port *d_port) log_event_dbg("%s: Calling xdci_suspend", __func__); ret = ipa_usb_xdci_suspend(gsi->d_port.out_channel_handle, - gsi->d_port.in_channel_handle, gsi->prot_id); + gsi->d_port.in_channel_handle, gsi->prot_id, + true); if (!ret) { d_port->sm_state = STATE_SUSPENDED; diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index b135da661fc9..f3715d85aedc 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -2981,25 +2981,6 @@ void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn, } EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string); -int fsg_common_run_thread(struct fsg_common *common) -{ - common->state = FSG_STATE_IDLE; - /* Tell the thread to start working */ - common->thread_task = - kthread_create(fsg_main_thread, common, "file-storage"); - if (IS_ERR(common->thread_task)) { - common->state = FSG_STATE_TERMINATED; - return PTR_ERR(common->thread_task); - } - - DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task)); - - wake_up_process(common->thread_task); - - return 0; -} -EXPORT_SYMBOL_GPL(fsg_common_run_thread); - static void fsg_common_release(struct kref *ref) { struct fsg_common *common = container_of(ref, struct fsg_common, ref); @@ -3009,6 +2990,7 @@ static void fsg_common_release(struct kref *ref) if (common->state != FSG_STATE_TERMINATED) { raise_exception(common, FSG_STATE_EXIT); wait_for_completion(&common->thread_notifier); + common->thread_task = NULL; } for (i = 0; i < ARRAY_SIZE(common->luns); ++i) { @@ -3054,9 +3036,21 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f) if (ret) return ret; fsg_common_set_inquiry_string(fsg->common, NULL, NULL); - ret = fsg_common_run_thread(fsg->common); - if (ret) + } + + if (!common->thread_task) { + common->state = FSG_STATE_IDLE; + common->thread_task = + kthread_create(fsg_main_thread, common, "file-storage"); + if (IS_ERR(common->thread_task)) { + int ret = PTR_ERR(common->thread_task); + common->thread_task = NULL; + common->state = FSG_STATE_TERMINATED; return ret; + } + DBG(common, "I/O thread pid: %d\n", + task_pid_nr(common->thread_task)); + wake_up_process(common->thread_task); } fsg->gadget = gadget; diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h index 445df6775609..b6a9918eaefb 100644 --- a/drivers/usb/gadget/function/f_mass_storage.h +++ b/drivers/usb/gadget/function/f_mass_storage.h @@ -153,8 +153,6 @@ int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg); void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn, const char *pn); -int fsg_common_run_thread(struct fsg_common *common); - void fsg_config_from_params(struct fsg_config *cfg, const struct fsg_module_parameters *params, unsigned int fsg_num_buffers); diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 98d5908c1e2f..8919cc26b98e 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -1141,6 +1141,7 @@ static void f_midi_free(struct usb_function *f) kfree(midi->in_port[i]); opts->func_inst.f = NULL; kfree(midi); + opts->func_inst.f = NULL; --opts->refcnt; mutex_unlock(&opts->lock); } diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index dd73dfe5dcab..74e9f5b5a45d 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -863,8 +863,6 @@ static int eth_stop(struct net_device *net) /*-------------------------------------------------------------------------*/ -static u8 host_ethaddr[ETH_ALEN]; - static int get_ether_addr(const char *str, u8 *dev_addr) { if (str) { @@ -895,17 +893,6 @@ static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len) return 18; } -static int get_host_ether_addr(u8 *str, u8 *dev_addr) -{ - memcpy(dev_addr, str, ETH_ALEN); - if (is_valid_ether_addr(dev_addr)) - return 0; - - random_ether_addr(dev_addr); - memcpy(str, dev_addr, ETH_ALEN); - return 1; -} - static const struct net_device_ops eth_netdev_ops = { .ndo_open = eth_open, .ndo_stop = eth_stop, @@ -963,11 +950,9 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, if (get_ether_addr(dev_addr, net->dev_addr)) dev_warn(&g->dev, "using random %s ethernet address\n", "self"); - - if (get_host_ether_addr(host_ethaddr, dev->host_mac)) - dev_warn(&g->dev, "using random %s ethernet address\n", "host"); - else - dev_warn(&g->dev, "using previous %s ethernet address\n", "host"); + if (get_ether_addr(host_addr, dev->host_mac)) + dev_warn(&g->dev, + "using random %s ethernet address\n", "host"); if (ethaddr) memcpy(ethaddr, dev->host_mac, ETH_ALEN); diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c index 4b158e2d1e57..64b2cbb0bc6b 100644 --- a/drivers/usb/gadget/legacy/acm_ms.c +++ b/drivers/usb/gadget/legacy/acm_ms.c @@ -133,10 +133,6 @@ static int acm_ms_do_config(struct usb_configuration *c) if (status < 0) goto put_msg; - status = fsg_common_run_thread(opts->common); - if (status) - goto remove_acm; - status = usb_add_function(c, f_msg); if (status) goto remove_acm; diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index f454c7af489c..55386619a0f1 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -937,8 +937,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) struct usb_ep *ep = dev->gadget->ep0; struct usb_request *req = dev->req; - if ((retval = setup_req (ep, req, 0)) == 0) - retval = usb_ep_queue (ep, req, GFP_ATOMIC); + if ((retval = setup_req (ep, req, 0)) == 0) { + spin_unlock_irq (&dev->lock); + retval = usb_ep_queue (ep, req, GFP_KERNEL); + spin_lock_irq (&dev->lock); + } dev->state = STATE_DEV_CONNECTED; /* assume that was SET_CONFIGURATION */ @@ -1456,8 +1459,11 @@ delegate: w_length); if (value < 0) break; + + spin_unlock (&dev->lock); value = usb_ep_queue (gadget->ep0, dev->req, - GFP_ATOMIC); + GFP_KERNEL); + spin_lock (&dev->lock); if (value < 0) { clean_req (gadget->ep0, dev->req); break; @@ -1480,11 +1486,14 @@ delegate: if (value >= 0 && dev->state != STATE_DEV_SETUP) { req->length = value; req->zero = value < w_length; - value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC); + + spin_unlock (&dev->lock); + value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL); if (value < 0) { DBG (dev, "ep_queue --> %d\n", value); req->status = 0; } + return value; } /* device stalls when value < 0 */ diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c index bda3c519110f..99aa22c81770 100644 --- a/drivers/usb/gadget/legacy/mass_storage.c +++ b/drivers/usb/gadget/legacy/mass_storage.c @@ -132,10 +132,6 @@ static int msg_do_config(struct usb_configuration *c) if (IS_ERR(f_msg)) return PTR_ERR(f_msg); - ret = fsg_common_run_thread(opts->common); - if (ret) - goto put_func; - ret = usb_add_function(c, f_msg); if (ret) goto put_func; diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c index 4fe794ddcd49..09c7c28f32f7 100644 --- a/drivers/usb/gadget/legacy/multi.c +++ b/drivers/usb/gadget/legacy/multi.c @@ -137,7 +137,6 @@ static struct usb_function *f_msg_rndis; static int rndis_do_config(struct usb_configuration *c) { - struct fsg_opts *fsg_opts; int ret; if (gadget_is_otg(c->cdev->gadget)) { @@ -169,11 +168,6 @@ static int rndis_do_config(struct usb_configuration *c) goto err_fsg; } - fsg_opts = fsg_opts_from_func_inst(fi_msg); - ret = fsg_common_run_thread(fsg_opts->common); - if (ret) - goto err_run; - ret = usb_add_function(c, f_msg_rndis); if (ret) goto err_run; @@ -225,7 +219,6 @@ static struct usb_function *f_msg_multi; static int cdc_do_config(struct usb_configuration *c) { - struct fsg_opts *fsg_opts; int ret; if (gadget_is_otg(c->cdev->gadget)) { @@ -258,11 +251,6 @@ static int cdc_do_config(struct usb_configuration *c) goto err_fsg; } - fsg_opts = fsg_opts_from_func_inst(fi_msg); - ret = fsg_common_run_thread(fsg_opts->common); - if (ret) - goto err_run; - ret = usb_add_function(c, f_msg_multi); if (ret) goto err_run; diff --git a/drivers/usb/gadget/legacy/nokia.c b/drivers/usb/gadget/legacy/nokia.c index 8b3f6fb1825d..05d3f79e768d 100644 --- a/drivers/usb/gadget/legacy/nokia.c +++ b/drivers/usb/gadget/legacy/nokia.c @@ -152,7 +152,6 @@ static int nokia_bind_config(struct usb_configuration *c) struct usb_function *f_ecm; struct usb_function *f_obex2 = NULL; struct usb_function *f_msg; - struct fsg_opts *fsg_opts; int status = 0; int obex1_stat = -1; int obex2_stat = -1; @@ -222,12 +221,6 @@ static int nokia_bind_config(struct usb_configuration *c) goto err_ecm; } - fsg_opts = fsg_opts_from_func_inst(fi_msg); - - status = fsg_common_run_thread(fsg_opts->common); - if (status) - goto err_msg; - status = usb_add_function(c, f_msg); if (status) goto err_msg; diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index c148a4fdfe99..476ac5e511a4 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c @@ -71,7 +71,7 @@ int usb_gadget_map_request(struct usb_gadget *gadget, mapped = dma_map_sg(dev, req->sg, req->num_sgs, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (mapped == 0) { - dev_err(&gadget->dev, "failed to map SGs\n"); + dev_err(dev, "failed to map SGs\n"); return -EFAULT; } diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c index 4031b372008e..c1c1024a054c 100644 --- a/drivers/usb/host/ehci-tegra.c +++ b/drivers/usb/host/ehci-tegra.c @@ -89,7 +89,7 @@ static int tegra_reset_usb_controller(struct platform_device *pdev) if (!usb1_reset_attempted) { struct reset_control *usb1_reset; - usb1_reset = of_reset_control_get(phy_np, "usb"); + usb1_reset = of_reset_control_get(phy_np, "utmi-pads"); if (IS_ERR(usb1_reset)) { dev_warn(&pdev->dev, "can't get utmi-pads reset from the PHY\n"); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index ea4fb4b0cd44..de644e56aa3b 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -37,6 +37,7 @@ /* Device for a quirk */ #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 +#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 #define PCI_VENDOR_ID_ETRON 0x1b6f @@ -115,6 +116,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_TRUST_TX_LENGTH; } + if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && + pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009) + xhci->quirks |= XHCI_BROKEN_STREAMS; + if (pdev->vendor == PCI_VENDOR_ID_NEC) xhci->quirks |= XHCI_NEC_HOST; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 3e49861a09a2..084dbbc81a6d 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -186,6 +186,9 @@ static int xhci_plat_probe(struct platform_device *pdev) ret = clk_prepare_enable(clk); if (ret) goto put_hcd; + } else if (PTR_ERR(clk) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto put_hcd; } if (pdev->dev.parent) @@ -241,10 +244,14 @@ static int xhci_plat_probe(struct platform_device *pdev) if (ret) goto disable_usb_phy; + device_wakeup_enable(&hcd->self.root_hub->dev); + ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED | IRQF_ONESHOT); if (ret) goto dealloc_usb2_hcd; + device_wakeup_enable(&xhci->shared_hcd->self.root_hub->dev); + ret = device_create_file(&pdev->dev, &dev_attr_config_imod); if (ret) dev_err(&pdev->dev, "%s: unable to create imod sysfs entry\n", diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 2b63969c2bbf..34cd23724bed 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -289,6 +289,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; + + /* + * Writing the CMD_RING_ABORT bit should cause a cmd completion event, + * however on some host hw the CMD_RING_RUNNING bit is correctly cleared + * but the completion event in never sent. Use the cmd timeout timer to + * handle those cases. Use twice the time to cover the bit polling retry + */ + mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT)); xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); @@ -313,6 +321,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) xhci_err(xhci, "Stopped the command ring failed, " "maybe the host is dead\n"); + del_timer(&xhci->cmd_timer); xhci->xhc_state |= XHCI_STATE_DYING; xhci_quiesce(xhci); xhci_halt(xhci); @@ -1252,22 +1261,21 @@ void xhci_handle_command_timeout(unsigned long data) int ret; unsigned long flags; u64 hw_ring_state; - struct xhci_command *cur_cmd = NULL; + bool second_timeout = false; xhci = (struct xhci_hcd *) data; /* mark this command to be cancelled */ spin_lock_irqsave(&xhci->lock, flags); if (xhci->current_cmd) { - cur_cmd = xhci->current_cmd; - cur_cmd->status = COMP_CMD_ABORT; + if (xhci->current_cmd->status == COMP_CMD_ABORT) + second_timeout = true; + xhci->current_cmd->status = COMP_CMD_ABORT; } - /* Make sure command ring is running before aborting it */ hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && (hw_ring_state & CMD_RING_RUNNING)) { - spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "Command timeout\n"); ret = xhci_abort_cmd_ring(xhci); @@ -1279,6 +1287,15 @@ void xhci_handle_command_timeout(unsigned long data) } return; } + + /* command ring failed to restart, or host removed. Bail out */ + if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "command timed out twice, ring start fail?\n"); + xhci_cleanup_command_queue(xhci); + return; + } + /* command timeout on stopped ring, ring can't be aborted */ xhci_dbg(xhci, "Command timeout on stopped ring\n"); xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); @@ -2727,7 +2744,8 @@ hw_died: writel(irq_pending, &xhci->ir_set->irq_pending); } - if (xhci->xhc_state & XHCI_STATE_DYING) { + if (xhci->xhc_state & XHCI_STATE_DYING || + xhci->xhc_state & XHCI_STATE_HALTED) { xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " "Shouldn't IRQs be disabled?\n"); /* Clear the event handler busy flag (RW1C); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 5fc20c7c51f8..a37b219a8dc5 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -692,20 +692,23 @@ void xhci_stop(struct usb_hcd *hcd) u32 temp; struct xhci_hcd *xhci = hcd_to_xhci(hcd); - if (xhci->xhc_state & XHCI_STATE_HALTED) - return; - mutex_lock(&xhci->mutex); - spin_lock_irq(&xhci->lock); - xhci->xhc_state |= XHCI_STATE_HALTED; - xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; - /* Make sure the xHC is halted for a USB3 roothub - * (xhci_stop() could be called as part of failed init). - */ - xhci_halt(xhci); - xhci_reset(xhci); - spin_unlock_irq(&xhci->lock); + if (!(xhci->xhc_state & XHCI_STATE_HALTED)) { + spin_lock_irq(&xhci->lock); + + xhci->xhc_state |= XHCI_STATE_HALTED; + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; + xhci_halt(xhci); + xhci_reset(xhci); + + spin_unlock_irq(&xhci->lock); + } + + if (!usb_hcd_is_primary_hcd(hcd)) { + mutex_unlock(&xhci->mutex); + return; + } xhci_cleanup_msix(xhci); diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 637f3f7cfce8..1a812eafe670 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -505,6 +505,7 @@ static struct scatterlist * alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe) { struct scatterlist *sg; + unsigned int n_size = 0; unsigned i; unsigned size = max; unsigned maxpacket = @@ -537,7 +538,8 @@ alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe) break; case 1: for (j = 0; j < size; j++) - *buf++ = (u8) ((j % maxpacket) % 63); + *buf++ = (u8) (((j + n_size) % maxpacket) % 63); + n_size += size; break; } diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index ee9ff7028b92..00eed5d66fda 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2401,7 +2401,8 @@ static void musb_restore_context(struct musb *musb) musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe); musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe); musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe); - musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl); + if (musb->context.devctl & MUSB_DEVCTL_SESSION) + musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl); for (i = 0; i < musb->config->num_eps; ++i) { struct musb_hw_ep *hw_ep; diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 795a45b1b25b..59a63a0b7985 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -594,14 +594,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum) musb_writew(ep->regs, MUSB_TXCSR, 0); /* scrub all previous state, clearing toggle */ - } else { - csr = musb_readw(ep->regs, MUSB_RXCSR); - if (csr & MUSB_RXCSR_RXPKTRDY) - WARNING("rx%d, packet/%d ready?\n", ep->epnum, - musb_readw(ep->regs, MUSB_RXCOUNT)); - - musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); } + csr = musb_readw(ep->regs, MUSB_RXCSR); + if (csr & MUSB_RXCSR_RXPKTRDY) + WARNING("rx%d, packet/%d ready?\n", ep->epnum, + musb_readw(ep->regs, MUSB_RXCOUNT)); + + musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); /* target addr and (for multipoint) hub addr/port */ if (musb->is_multipoint) { @@ -995,9 +994,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep, if (is_in) { dma = is_dma_capable() ? ep->rx_channel : NULL; - /* clear nak timeout bit */ + /* + * Need to stop the transaction by clearing REQPKT first + * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED + * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2 + */ rx_csr = musb_readw(epio, MUSB_RXCSR); rx_csr |= MUSB_RXCSR_H_WZC_BITS; + rx_csr &= ~MUSB_RXCSR_H_REQPKT; + musb_writew(epio, MUSB_RXCSR, rx_csr); rx_csr &= ~MUSB_RXCSR_DATAERROR; musb_writew(epio, MUSB_RXCSR, rx_csr); @@ -1551,7 +1556,7 @@ static int musb_rx_dma_iso_cppi41(struct dma_controller *dma, struct urb *urb, size_t len) { - struct dma_channel *channel = hw_ep->tx_channel; + struct dma_channel *channel = hw_ep->rx_channel; void __iomem *epio = hw_ep->regs; dma_addr_t *buf; u32 length, res; diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c index 2bc70d1cf6fa..b4d7c7d8bddf 100644 --- a/drivers/usb/pd/policy_engine.c +++ b/drivers/usb/pd/policy_engine.c @@ -21,6 +21,7 @@ #include <linux/power_supply.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/extcon.h> #include <linux/usb/usbpd.h> @@ -264,6 +265,16 @@ struct vdm_tx { int size; }; +struct rx_msg { + u8 type; + u8 len; + u32 payload[7]; + struct list_head entry; +}; + +#define IS_DATA(m, t) ((m) && ((m)->len) && ((m)->type == (t))) +#define IS_CTRL(m, t) ((m) && !((m)->len) && ((m)->type == (t))) + struct usbpd { struct device dev; struct workqueue_struct *wq; @@ -274,10 +285,9 @@ struct usbpd { struct extcon_dev *extcon; enum usbpd_state current_state; - bool hard_reset; - u8 rx_msg_type; - u8 rx_msg_len; - u32 rx_payload[7]; + bool hard_reset_recvd; + struct list_head rx_q; + spinlock_t rx_lock; u32 received_pdos[7]; int src_cap_id; @@ -457,14 +467,10 @@ static int pd_select_pdo(struct usbpd *pd, int pdo_pos) return 0; } -static int pd_eval_src_caps(struct usbpd *pd, const u32 *src_caps) +static int pd_eval_src_caps(struct usbpd *pd) { union power_supply_propval val; - u32 first_pdo = src_caps[0]; - - /* save the PDOs so userspace can further evaluate */ - memcpy(&pd->received_pdos, src_caps, sizeof(pd->received_pdos)); - pd->src_cap_id++; + u32 first_pdo = pd->received_pdos[0]; if (PD_SRC_PDO_TYPE(first_pdo) != PD_SRC_PDO_TYPE_FIXED) { usbpd_err(&pd->dev, "First src_cap invalid! %08x\n", first_pdo); @@ -487,16 +493,12 @@ static int pd_eval_src_caps(struct usbpd *pd, const u32 *src_caps) static void pd_send_hard_reset(struct usbpd *pd) { - int ret; - usbpd_dbg(&pd->dev, "send hard reset"); /* Force CC logic to source/sink to keep Rp/Rd unchanged */ set_power_role(pd, pd->current_pr); pd->hard_reset_count++; - ret = pd_phy_signal(HARD_RESET_SIG, 5); /* tHardResetComplete */ - if (!ret) - pd->hard_reset = true; + pd_phy_signal(HARD_RESET_SIG, 5); /* tHardResetComplete */ pd->in_pr_swap = false; } @@ -522,13 +524,15 @@ static void phy_sig_received(struct usbpd *pd, enum pd_sig_type type) /* Force CC logic to source/sink to keep Rp/Rd unchanged */ set_power_role(pd, pd->current_pr); - pd->hard_reset = true; + pd->hard_reset_recvd = true; kick_sm(pd, 0); } static void phy_msg_received(struct usbpd *pd, enum pd_msg_type type, u8 *buf, size_t len) { + struct rx_msg *rx_msg; + unsigned long flags; u16 header; if (type != SOP_MSG) { @@ -571,16 +575,20 @@ static void phy_msg_received(struct usbpd *pd, enum pd_msg_type type, return; } - /* block until previous message has been consumed by usbpd_sm */ - if (pd->rx_msg_type) - flush_work(&pd->sm_work); + rx_msg = kzalloc(sizeof(*rx_msg), GFP_KERNEL); + if (!rx_msg) + return; - pd->rx_msg_type = PD_MSG_HDR_TYPE(header); - pd->rx_msg_len = PD_MSG_HDR_COUNT(header); - memcpy(&pd->rx_payload, buf, len); + rx_msg->type = PD_MSG_HDR_TYPE(header); + rx_msg->len = PD_MSG_HDR_COUNT(header); + memcpy(&rx_msg->payload, buf, len); + + spin_lock_irqsave(&pd->rx_lock, flags); + list_add_tail(&rx_msg->entry, &pd->rx_q); + spin_unlock_irqrestore(&pd->rx_lock, flags); usbpd_dbg(&pd->dev, "received message: type(%d) len(%d)\n", - pd->rx_msg_type, pd->rx_msg_len); + rx_msg->type, rx_msg->len); kick_sm(pd, 0); } @@ -611,6 +619,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) FRAME_FILTER_EN_HARD_RESET }; union power_supply_propval val = {0}; + unsigned long flags; int ret; usbpd_dbg(&pd->dev, "%s -> %s\n", @@ -642,8 +651,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) power_supply_set_property(pd->usb_psy, POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val); - pd->rx_msg_len = 0; - pd->rx_msg_type = 0; pd->rx_msgid = -1; if (!pd->in_pr_swap) { @@ -753,40 +760,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE); break; - case PE_SRC_TRANSITION_TO_DEFAULT: - pd->hard_reset = false; - - if (pd->vconn_enabled) - regulator_disable(pd->vconn); - regulator_disable(pd->vbus); - - if (pd->current_dr != DR_DFP) { - extcon_set_cable_state_(pd->extcon, EXTCON_USB, 0); - pd->current_dr = DR_DFP; - pd_phy_update_roles(pd->current_dr, pd->current_pr); - } - - msleep(SRC_RECOVER_TIME); - - ret = regulator_enable(pd->vbus); - if (ret) - usbpd_err(&pd->dev, "Unable to enable vbus\n"); - - if (pd->vconn_enabled) { - ret = regulator_enable(pd->vconn); - if (ret) { - usbpd_err(&pd->dev, "Unable to enable vconn\n"); - pd->vconn_enabled = false; - } - } - - val.intval = 0; - power_supply_set_property(pd->usb_psy, - POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val); - - usbpd_set_state(pd, PE_SRC_STARTUP); - break; - case PE_SRC_HARD_RESET: case PE_SNK_HARD_RESET: /* hard reset may sleep; handle it in the workqueue */ @@ -842,8 +815,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) /* Reset protocol layer */ pd->tx_msgid = 0; pd->rx_msgid = -1; - pd->rx_msg_len = 0; - pd->rx_msg_type = 0; if (!pd->in_pr_swap) { if (pd->pd_phy_opened) { @@ -872,10 +843,10 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) /* fall-through */ case PE_SNK_WAIT_FOR_CAPABILITIES: - if (pd->rx_msg_len && pd->rx_msg_type) - kick_sm(pd, 0); - else + spin_lock_irqsave(&pd->rx_lock, flags); + if (list_empty(&pd->rx_q)) kick_sm(pd, SINK_WAIT_CAP_TIME); + spin_unlock_irqrestore(&pd->rx_lock, flags); break; case PE_SNK_EVALUATE_CAPABILITY: @@ -883,7 +854,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) pd->hard_reset_count = 0; /* evaluate PDOs and select one */ - ret = pd_eval_src_caps(pd, pd->rx_payload); + ret = pd_eval_src_caps(pd); if (ret < 0) { usbpd_err(&pd->dev, "Invalid src_caps received. Skipping request\n"); break; @@ -971,6 +942,13 @@ int usbpd_register_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr) return -EINVAL; } + /* require connect/disconnect callbacks be implemented */ + if (!hdlr->connect || !hdlr->disconnect) { + usbpd_err(&pd->dev, "SVID 0x%04x connect/disconnect must be non-NULL\n", + hdlr->svid); + return -EINVAL; + } + usbpd_dbg(&pd->dev, "registered handler for SVID 0x%04x\n", hdlr->svid); list_add_tail(&hdlr->entry, &pd->svid_handlers); @@ -981,8 +959,8 @@ int usbpd_register_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr) for (i = 0; i < pd->num_svids; i++) { if (pd->discovered_svids[i] == hdlr->svid) { - if (hdlr->connect) - hdlr->connect(hdlr); + hdlr->connect(hdlr); + hdlr->discovered = true; break; } } @@ -1037,13 +1015,13 @@ int usbpd_send_svdm(struct usbpd *pd, u16 svid, u8 cmd, } EXPORT_SYMBOL(usbpd_send_svdm); -static void handle_vdm_rx(struct usbpd *pd) +static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg) { - u32 vdm_hdr = pd->rx_payload[0]; - u32 *vdos = &pd->rx_payload[1]; + u32 vdm_hdr = rx_msg->payload[0]; + u32 *vdos = &rx_msg->payload[1]; u16 svid = VDM_HDR_SVID(vdm_hdr); u16 *psvid; - u8 i, num_vdos = pd->rx_msg_len - 1; /* num objects minus header */ + u8 i, num_vdos = rx_msg->len - 1; /* num objects minus header */ u8 cmd = SVDM_HDR_CMD(vdm_hdr); u8 cmd_type = SVDM_HDR_CMD_TYPE(vdm_hdr); struct usbpd_svid_handler *handler; @@ -1194,8 +1172,10 @@ static void handle_vdm_rx(struct usbpd *pd) svid = pd->discovered_svids[i]; if (svid) { handler = find_svid_handler(pd, svid); - if (handler && handler->connect) + if (handler) { handler->connect(handler); + handler->discovered = true; + } } } @@ -1300,10 +1280,14 @@ static void reset_vdm_state(struct usbpd *pd) { struct usbpd_svid_handler *handler; - pd->vdm_state = VDM_NONE; - list_for_each_entry(handler, &pd->svid_handlers, entry) - if (handler->disconnect) + list_for_each_entry(handler, &pd->svid_handlers, entry) { + if (handler->discovered) { handler->disconnect(handler); + handler->discovered = false; + } + } + + pd->vdm_state = VDM_NONE; kfree(pd->vdm_tx_retry); pd->vdm_tx_retry = NULL; kfree(pd->discovered_svids); @@ -1368,14 +1352,27 @@ static void vconn_swap(struct usbpd *pd) } } +static inline void rx_msg_cleanup(struct usbpd *pd) +{ + struct rx_msg *msg, *tmp; + unsigned long flags; + + spin_lock_irqsave(&pd->rx_lock, flags); + list_for_each_entry_safe(msg, tmp, &pd->rx_q, entry) { + list_del(&msg->entry); + kfree(msg); + } + spin_unlock_irqrestore(&pd->rx_lock, flags); +} + /* Handles current state and determines transitions */ static void usbpd_sm(struct work_struct *w) { struct usbpd *pd = container_of(w, struct usbpd, sm_work); union power_supply_propval val = {0}; int ret; - enum usbpd_control_msg_type ctrl_recvd = 0; - enum usbpd_data_msg_type data_recvd = 0; + struct rx_msg *rx_msg = NULL; + unsigned long flags; usbpd_dbg(&pd->dev, "handle state %s\n", usbpd_state_strings[pd->current_state]); @@ -1383,10 +1380,12 @@ static void usbpd_sm(struct work_struct *w) hrtimer_cancel(&pd->timer); pd->sm_queued = false; - if (pd->rx_msg_len) - data_recvd = pd->rx_msg_type; - else - ctrl_recvd = pd->rx_msg_type; + spin_lock_irqsave(&pd->rx_lock, flags); + if (!list_empty(&pd->rx_q)) { + rx_msg = list_first_entry(&pd->rx_q, struct rx_msg, entry); + list_del(&rx_msg->entry); + } + spin_unlock_irqrestore(&pd->rx_lock, flags); /* Disconnect? */ if (pd->typec_mode == POWER_SUPPLY_TYPEC_NONE && !pd->in_pr_swap) { @@ -1403,13 +1402,14 @@ static void usbpd_sm(struct work_struct *w) pd->in_pr_swap = false; pd->pd_connected = false; pd->in_explicit_contract = false; - pd->hard_reset = false; + pd->hard_reset_recvd = false; pd->caps_count = 0; pd->hard_reset_count = 0; pd->src_cap_id = 0; pd->requested_voltage = 0; pd->requested_current = 0; memset(&pd->received_pdos, 0, sizeof(pd->received_pdos)); + rx_msg_cleanup(pd); val.intval = 0; power_supply_set_property(pd->usb_psy, @@ -1456,24 +1456,29 @@ static void usbpd_sm(struct work_struct *w) } /* Hard reset? */ - if (pd->hard_reset) { + if (pd->hard_reset_recvd) { + pd->hard_reset_recvd = false; + val.intval = 1; power_supply_set_property(pd->usb_psy, POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val); pd->in_pr_swap = false; + rx_msg_cleanup(pd); reset_vdm_state(pd); - if (pd->current_pr == PR_SINK) + if (pd->current_pr == PR_SINK) { usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT); - else - usbpd_set_state(pd, PE_SRC_TRANSITION_TO_DEFAULT); + } else { + pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT; + kick_sm(pd, PS_HARD_RESET_TIME); + } goto sm_done; } /* Soft reset? */ - if (ctrl_recvd == MSG_SOFT_RESET) { + if (IS_CTRL(rx_msg, MSG_SOFT_RESET)) { usbpd_dbg(&pd->dev, "Handle soft reset\n"); if (pd->current_pr == PR_SRC) @@ -1553,10 +1558,10 @@ static void usbpd_sm(struct work_struct *w) break; case PE_SRC_SEND_CAPABILITIES_WAIT: - if (data_recvd == MSG_REQUEST) { - pd->rdo = pd->rx_payload[0]; + if (IS_DATA(rx_msg, MSG_REQUEST)) { + pd->rdo = rx_msg->payload[0]; usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY); - } else if (data_recvd || ctrl_recvd) { + } else if (rx_msg) { usbpd_err(&pd->dev, "Unexpected message received\n"); usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); } else { @@ -1565,7 +1570,7 @@ static void usbpd_sm(struct work_struct *w) break; case PE_SRC_READY: - if (ctrl_recvd == MSG_GET_SOURCE_CAP) { + if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP)) { ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES, default_src_caps, ARRAY_SIZE(default_src_caps), SOP_MSG); @@ -1574,7 +1579,7 @@ static void usbpd_sm(struct work_struct *w) usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); break; } - } else if (ctrl_recvd == MSG_GET_SINK_CAP) { + } else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) { ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES, default_snk_caps, ARRAY_SIZE(default_snk_caps), SOP_MSG); @@ -1582,10 +1587,10 @@ static void usbpd_sm(struct work_struct *w) usbpd_err(&pd->dev, "Error sending Sink Caps\n"); usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); } - } else if (data_recvd == MSG_REQUEST) { - pd->rdo = pd->rx_payload[0]; + } else if (IS_DATA(rx_msg, MSG_REQUEST)) { + pd->rdo = rx_msg->payload[0]; usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY); - } else if (ctrl_recvd == MSG_DR_SWAP) { + } else if (IS_CTRL(rx_msg, MSG_DR_SWAP)) { if (pd->vdm_state == MODE_ENTERED) { usbpd_set_state(pd, PE_SRC_HARD_RESET); break; @@ -1600,7 +1605,7 @@ static void usbpd_sm(struct work_struct *w) dr_swap(pd); kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE); - } else if (ctrl_recvd == MSG_PR_SWAP) { + } else if (IS_CTRL(rx_msg, MSG_PR_SWAP)) { /* lock in current mode */ set_power_role(pd, pd->current_pr); @@ -1615,7 +1620,7 @@ static void usbpd_sm(struct work_struct *w) pd->current_state = PE_PRS_SRC_SNK_TRANSITION_TO_OFF; kick_sm(pd, SRC_TRANSITION_TIME); break; - } else if (ctrl_recvd == MSG_VCONN_SWAP) { + } else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) { ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG); if (ret) { usbpd_err(&pd->dev, "Error sending Accept\n"); @@ -1625,13 +1630,45 @@ static void usbpd_sm(struct work_struct *w) vconn_swap(pd); } else { - if (data_recvd == MSG_VDM) - handle_vdm_rx(pd); + if (IS_DATA(rx_msg, MSG_VDM)) + handle_vdm_rx(pd, rx_msg); else handle_vdm_tx(pd); } break; + case PE_SRC_TRANSITION_TO_DEFAULT: + if (pd->vconn_enabled) + regulator_disable(pd->vconn); + regulator_disable(pd->vbus); + + if (pd->current_dr != DR_DFP) { + extcon_set_cable_state_(pd->extcon, EXTCON_USB, 0); + pd->current_dr = DR_DFP; + pd_phy_update_roles(pd->current_dr, pd->current_pr); + } + + msleep(SRC_RECOVER_TIME); + + ret = regulator_enable(pd->vbus); + if (ret) + usbpd_err(&pd->dev, "Unable to enable vbus\n"); + + if (pd->vconn_enabled) { + ret = regulator_enable(pd->vconn); + if (ret) { + usbpd_err(&pd->dev, "Unable to enable vconn\n"); + pd->vconn_enabled = false; + } + } + + val.intval = 0; + power_supply_set_property(pd->usb_psy, + POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val); + + usbpd_set_state(pd, PE_SRC_STARTUP); + break; + case PE_SRC_HARD_RESET: val.intval = 1; power_supply_set_property(pd->usb_psy, @@ -1639,11 +1676,11 @@ static void usbpd_sm(struct work_struct *w) pd_send_hard_reset(pd); pd->in_explicit_contract = false; + rx_msg_cleanup(pd); reset_vdm_state(pd); - usleep_range(PS_HARD_RESET_TIME * USEC_PER_MSEC, - (PS_HARD_RESET_TIME + 5) * USEC_PER_MSEC); - usbpd_set_state(pd, PE_SRC_TRANSITION_TO_DEFAULT); + pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT; + kick_sm(pd, PS_HARD_RESET_TIME); break; case PE_SNK_STARTUP: @@ -1651,7 +1688,7 @@ static void usbpd_sm(struct work_struct *w) break; case PE_SNK_WAIT_FOR_CAPABILITIES: - if (data_recvd == MSG_SOURCE_CAPABILITIES) { + if (IS_DATA(rx_msg, MSG_SOURCE_CAPABILITIES)) { val.intval = 0; power_supply_set_property(pd->usb_psy, POWER_SUPPLY_PROP_PD_IN_HARD_RESET, @@ -1661,6 +1698,11 @@ static void usbpd_sm(struct work_struct *w) power_supply_set_property(pd->usb_psy, POWER_SUPPLY_PROP_PD_ACTIVE, &val); + /* save the PDOs so userspace can further evaluate */ + memcpy(&pd->received_pdos, rx_msg->payload, + sizeof(pd->received_pdos)); + pd->src_cap_id++; + usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY); } else if (pd->hard_reset_count < 3) { usbpd_set_state(pd, PE_SNK_HARD_RESET); @@ -1688,7 +1730,7 @@ static void usbpd_sm(struct work_struct *w) break; case PE_SNK_SELECT_CAPABILITY: - if (ctrl_recvd == MSG_ACCEPT) { + if (IS_CTRL(rx_msg, MSG_ACCEPT)) { /* prepare for voltage increase/decrease */ val.intval = pd->requested_voltage; power_supply_set_property(pd->usb_psy, @@ -1708,13 +1750,14 @@ static void usbpd_sm(struct work_struct *w) pd->selected_pdo = pd->requested_pdo; usbpd_set_state(pd, PE_SNK_TRANSITION_SINK); - } else if (ctrl_recvd == MSG_REJECT || ctrl_recvd == MSG_WAIT) { + } else if (IS_CTRL(rx_msg, MSG_REJECT) || + IS_CTRL(rx_msg, MSG_WAIT)) { if (pd->in_explicit_contract) usbpd_set_state(pd, PE_SNK_READY); else usbpd_set_state(pd, PE_SNK_WAIT_FOR_CAPABILITIES); - } else if (pd->rx_msg_type) { + } else if (rx_msg) { usbpd_err(&pd->dev, "Invalid response to sink request\n"); usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); } else { @@ -1724,7 +1767,7 @@ static void usbpd_sm(struct work_struct *w) break; case PE_SNK_TRANSITION_SINK: - if (ctrl_recvd == MSG_PS_RDY) { + if (IS_CTRL(rx_msg, MSG_PS_RDY)) { val.intval = pd->requested_voltage; power_supply_set_property(pd->usb_psy, pd->requested_voltage >= pd->current_voltage ? @@ -1745,9 +1788,14 @@ static void usbpd_sm(struct work_struct *w) break; case PE_SNK_READY: - if (data_recvd == MSG_SOURCE_CAPABILITIES) { + if (IS_DATA(rx_msg, MSG_SOURCE_CAPABILITIES)) { + /* save the PDOs so userspace can further evaluate */ + memcpy(&pd->received_pdos, rx_msg->payload, + sizeof(pd->received_pdos)); + pd->src_cap_id++; + usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY); - } else if (ctrl_recvd == MSG_GET_SINK_CAP) { + } else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) { ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES, default_snk_caps, ARRAY_SIZE(default_snk_caps), SOP_MSG); @@ -1755,7 +1803,7 @@ static void usbpd_sm(struct work_struct *w) usbpd_err(&pd->dev, "Error sending Sink Caps\n"); usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); } - } else if (ctrl_recvd == MSG_GET_SOURCE_CAP) { + } else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP)) { ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES, default_src_caps, ARRAY_SIZE(default_src_caps), SOP_MSG); @@ -1764,7 +1812,7 @@ static void usbpd_sm(struct work_struct *w) usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); break; } - } else if (ctrl_recvd == MSG_DR_SWAP) { + } else if (IS_CTRL(rx_msg, MSG_DR_SWAP)) { if (pd->vdm_state == MODE_ENTERED) { usbpd_set_state(pd, PE_SNK_HARD_RESET); break; @@ -1779,7 +1827,7 @@ static void usbpd_sm(struct work_struct *w) dr_swap(pd); kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE); - } else if (ctrl_recvd == MSG_PR_SWAP) { + } else if (IS_CTRL(rx_msg, MSG_PR_SWAP)) { /* lock in current mode */ set_power_role(pd, pd->current_pr); @@ -1794,7 +1842,7 @@ static void usbpd_sm(struct work_struct *w) pd->in_pr_swap = true; usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF); break; - } else if (ctrl_recvd == MSG_VCONN_SWAP) { + } else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) { /* * if VCONN is connected to VBUS, make sure we are * not in high voltage contract, otherwise reject. @@ -1821,16 +1869,14 @@ static void usbpd_sm(struct work_struct *w) vconn_swap(pd); } else { - if (data_recvd == MSG_VDM) - handle_vdm_rx(pd); + if (IS_DATA(rx_msg, MSG_VDM)) + handle_vdm_rx(pd, rx_msg); else handle_vdm_tx(pd); } break; case PE_SNK_TRANSITION_TO_DEFAULT: - pd->hard_reset = false; - val.intval = 0; power_supply_set_property(pd->usb_psy, POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val); @@ -1870,7 +1916,7 @@ static void usbpd_sm(struct work_struct *w) case PE_SRC_SEND_SOFT_RESET: case PE_SNK_SEND_SOFT_RESET: - if (ctrl_recvd == MSG_ACCEPT) { + if (IS_CTRL(rx_msg, MSG_ACCEPT)) { usbpd_set_state(pd, pd->current_pr == PR_SRC ? PE_SRC_SEND_CAPABILITIES : PE_SNK_WAIT_FOR_CAPABILITIES); @@ -1907,7 +1953,7 @@ static void usbpd_sm(struct work_struct *w) break; case PE_DRS_SEND_DR_SWAP: - if (ctrl_recvd == MSG_ACCEPT) + if (IS_CTRL(rx_msg, MSG_ACCEPT)) dr_swap(pd); usbpd_set_state(pd, pd->current_pr == PR_SRC ? @@ -1915,7 +1961,7 @@ static void usbpd_sm(struct work_struct *w) break; case PE_PRS_SRC_SNK_SEND_SWAP: - if (ctrl_recvd != MSG_ACCEPT) { + if (!IS_CTRL(rx_msg, MSG_ACCEPT)) { pd->current_state = PE_SRC_READY; break; } @@ -1950,14 +1996,14 @@ static void usbpd_sm(struct work_struct *w) break; case PE_PRS_SRC_SNK_WAIT_SOURCE_ON: - if (ctrl_recvd == MSG_PS_RDY) + if (IS_CTRL(rx_msg, MSG_PS_RDY)) usbpd_set_state(pd, PE_SNK_STARTUP); else usbpd_set_state(pd, PE_ERROR_RECOVERY); break; case PE_PRS_SNK_SRC_SEND_SWAP: - if (ctrl_recvd != MSG_ACCEPT) { + if (!IS_CTRL(rx_msg, MSG_ACCEPT)) { pd->current_state = PE_SNK_READY; break; } @@ -1967,7 +2013,7 @@ static void usbpd_sm(struct work_struct *w) break; case PE_PRS_SNK_SRC_TRANSITION_TO_OFF: - if (ctrl_recvd != MSG_PS_RDY) { + if (!IS_CTRL(rx_msg, MSG_PS_RDY)) { usbpd_set_state(pd, PE_ERROR_RECOVERY); break; } @@ -1997,7 +2043,7 @@ static void usbpd_sm(struct work_struct *w) break; case PE_VCS_WAIT_FOR_VCONN: - if (ctrl_recvd == MSG_PS_RDY) { + if (IS_CTRL(rx_msg, MSG_PS_RDY)) { /* * hopefully redundant check but in case not enabled * avoids unbalanced regulator disable count @@ -2022,10 +2068,9 @@ static void usbpd_sm(struct work_struct *w) break; } - /* Rx message should have been consumed now */ - pd->rx_msg_type = pd->rx_msg_len = 0; - sm_done: + kfree(rx_msg); + if (!pd->sm_queued) pm_relax(&pd->dev); } @@ -2117,11 +2162,11 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr) * During hard reset when VBUS goes to 0 the CC logic * will report this as a disconnection. In those cases * it can be ignored, however the downside is that - * pd->hard_reset can be momentarily true even when a - * non-PD capable source is attached, and can't be - * distinguished from a physical disconnect. In that - * case, allow for the common case of disconnecting - * from an SDP. + * we can also happen to be in the SNK_Transition_to_default + * state due to a hard reset attempt even with a non-PD + * capable source, in which a physical disconnect may get + * masked. In that case, allow for the common case of + * disconnecting from an SDP. * * The less common case is a PD-capable SDP which will * result in a hard reset getting treated like a @@ -2662,6 +2707,8 @@ struct usbpd *usbpd_create(struct device *parent) pd->current_dr = DR_NONE; list_add_tail(&pd->instance, &_usbpd); + spin_lock_init(&pd->rx_lock); + INIT_LIST_HEAD(&pd->rx_q); INIT_LIST_HEAD(&pd->svid_handlers); /* force read initial power_supply values */ diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index c0866971db2b..1947ea0e0988 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -2856,14 +2856,16 @@ static int edge_startup(struct usb_serial *serial) /* not set up yet, so do it now */ edge_serial->interrupt_read_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!edge_serial->interrupt_read_urb) - return -ENOMEM; + if (!edge_serial->interrupt_read_urb) { + response = -ENOMEM; + break; + } edge_serial->interrupt_in_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!edge_serial->interrupt_in_buffer) { - usb_free_urb(edge_serial->interrupt_read_urb); - return -ENOMEM; + response = -ENOMEM; + break; } edge_serial->interrupt_in_endpoint = endpoint->bEndpointAddress; @@ -2891,14 +2893,16 @@ static int edge_startup(struct usb_serial *serial) /* not set up yet, so do it now */ edge_serial->read_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!edge_serial->read_urb) - return -ENOMEM; + if (!edge_serial->read_urb) { + response = -ENOMEM; + break; + } edge_serial->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!edge_serial->bulk_in_buffer) { - usb_free_urb(edge_serial->read_urb); - return -ENOMEM; + response = -ENOMEM; + break; } edge_serial->bulk_in_endpoint = endpoint->bEndpointAddress; @@ -2924,9 +2928,22 @@ static int edge_startup(struct usb_serial *serial) } } - if (!interrupt_in_found || !bulk_in_found || !bulk_out_found) { - dev_err(ddev, "Error - the proper endpoints were not found!\n"); - return -ENODEV; + if (response || !interrupt_in_found || !bulk_in_found || + !bulk_out_found) { + if (!response) { + dev_err(ddev, "expected endpoints not found\n"); + response = -ENODEV; + } + + usb_free_urb(edge_serial->interrupt_read_urb); + kfree(edge_serial->interrupt_in_buffer); + + usb_free_urb(edge_serial->read_urb); + kfree(edge_serial->bulk_in_buffer); + + kfree(edge_serial); + + return response; } /* start interrupt read for this edgeport this interrupt will @@ -2949,16 +2966,9 @@ static void edge_disconnect(struct usb_serial *serial) { struct edgeport_serial *edge_serial = usb_get_serial_data(serial); - /* stop reads and writes on all ports */ - /* free up our endpoint stuff */ if (edge_serial->is_epic) { usb_kill_urb(edge_serial->interrupt_read_urb); - usb_free_urb(edge_serial->interrupt_read_urb); - kfree(edge_serial->interrupt_in_buffer); - usb_kill_urb(edge_serial->read_urb); - usb_free_urb(edge_serial->read_urb); - kfree(edge_serial->bulk_in_buffer); } } @@ -2971,6 +2981,16 @@ static void edge_release(struct usb_serial *serial) { struct edgeport_serial *edge_serial = usb_get_serial_data(serial); + if (edge_serial->is_epic) { + usb_kill_urb(edge_serial->interrupt_read_urb); + usb_free_urb(edge_serial->interrupt_read_urb); + kfree(edge_serial->interrupt_in_buffer); + + usb_kill_urb(edge_serial->read_urb); + usb_free_urb(edge_serial->read_urb); + kfree(edge_serial->bulk_in_buffer); + } + kfree(edge_serial); } diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index e07b15ed5814..7faa901ee47f 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -2376,6 +2376,10 @@ static void keyspan_release(struct usb_serial *serial) s_priv = usb_get_serial_data(serial); + /* Make sure to unlink the URBs submitted in attach. */ + usb_kill_urb(s_priv->instat_urb); + usb_kill_urb(s_priv->indat_urb); + usb_free_urb(s_priv->instat_urb); usb_free_urb(s_priv->indat_urb); usb_free_urb(s_priv->glocont_urb); diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 78b4f64c6b00..06c7dbc1c802 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -2007,6 +2007,7 @@ static void mos7720_release(struct usb_serial *serial) urblist_entry) usb_unlink_urb(urbtrack->urb); spin_unlock_irqrestore(&mos_parport->listlock, flags); + parport_del_port(mos_parport->pp); kref_put(&mos_parport->ref_count, destroy_mos_parport); } diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c index 31a8b47f1ac6..c6596cbcc4b6 100644 --- a/drivers/usb/serial/mxuport.c +++ b/drivers/usb/serial/mxuport.c @@ -1259,6 +1259,15 @@ static int mxuport_attach(struct usb_serial *serial) return 0; } +static void mxuport_release(struct usb_serial *serial) +{ + struct usb_serial_port *port0 = serial->port[0]; + struct usb_serial_port *port1 = serial->port[1]; + + usb_serial_generic_close(port1); + usb_serial_generic_close(port0); +} + static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port) { struct mxuport_port *mxport = usb_get_serial_port_data(port); @@ -1361,6 +1370,7 @@ static struct usb_serial_driver mxuport_device = { .probe = mxuport_probe, .port_probe = mxuport_port_probe, .attach = mxuport_attach, + .release = mxuport_release, .calc_num_ports = mxuport_calc_num_ports, .open = mxuport_open, .close = mxuport_close, diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index c6f497f16526..d96d423d00e6 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -375,18 +375,22 @@ static void option_instat_callback(struct urb *urb); #define HAIER_PRODUCT_CE81B 0x10f8 #define HAIER_PRODUCT_CE100 0x2009 -/* Cinterion (formerly Siemens) products */ -#define SIEMENS_VENDOR_ID 0x0681 -#define CINTERION_VENDOR_ID 0x1e2d +/* Gemalto's Cinterion products (formerly Siemens) */ +#define SIEMENS_VENDOR_ID 0x0681 +#define CINTERION_VENDOR_ID 0x1e2d +#define CINTERION_PRODUCT_HC25_MDMNET 0x0040 #define CINTERION_PRODUCT_HC25_MDM 0x0047 -#define CINTERION_PRODUCT_HC25_MDMNET 0x0040 +#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */ #define CINTERION_PRODUCT_HC28_MDM 0x004C -#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */ #define CINTERION_PRODUCT_EU3_E 0x0051 #define CINTERION_PRODUCT_EU3_P 0x0052 #define CINTERION_PRODUCT_PH8 0x0053 #define CINTERION_PRODUCT_AHXX 0x0055 #define CINTERION_PRODUCT_PLXX 0x0060 +#define CINTERION_PRODUCT_PH8_2RMNET 0x0082 +#define CINTERION_PRODUCT_PH8_AUDIO 0x0083 +#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 +#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c @@ -633,6 +637,10 @@ static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = { .reserved = BIT(1) | BIT(2) | BIT(3), }; +static const struct option_blacklist_info cinterion_rmnet2_blacklist = { + .reserved = BIT(4) | BIT(5), +}; + static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -1602,7 +1610,79 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff45, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff46, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff47, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff48, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff49, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4a, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4b, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4c, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4d, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4e, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4f, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff50, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff51, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff52, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff53, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff54, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff55, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff56, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff57, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff58, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff59, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5a, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5b, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5c, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5d, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5e, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5f, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff60, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff61, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff62, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff63, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff64, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff65, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff66, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff67, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff68, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff69, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6a, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6b, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6c, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6d, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6e, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6f, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff70, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff71, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff72, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff73, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff74, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff75, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff76, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff77, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff78, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff79, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7a, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7b, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7c, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7d, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7e, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7f, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff80, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff81, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff82, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff83, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff84, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff85, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff86, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff87, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff88, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff89, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8a, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) }, @@ -1613,6 +1693,61 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff9f, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa0, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa1, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa2, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa3, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa4, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa5, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa6, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa7, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa8, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa9, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaa, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffab, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffac, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffae, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaf, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb0, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb1, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb2, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb3, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb4, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb5, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb6, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb7, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb8, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb9, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffba, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbb, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbc, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbd, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbe, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbf, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc0, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc1, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc2, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc3, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc4, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc5, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc6, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc7, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc8, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc9, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffca, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcb, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcc, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcd, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffce, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcf, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd0, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd1, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd2, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd3, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd4, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd5, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffec, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffee, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff6, 0xff, 0xff, 0xff) }, @@ -1712,7 +1847,13 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff), + .driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist }, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) }, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c index 504f5bff79c0..b18974cbd995 100644 --- a/drivers/usb/serial/quatech2.c +++ b/drivers/usb/serial/quatech2.c @@ -141,6 +141,7 @@ static void qt2_release(struct usb_serial *serial) serial_priv = usb_get_serial_data(serial); + usb_kill_urb(serial_priv->read_urb); usb_free_urb(serial_priv->read_urb); kfree(serial_priv->read_buffer); kfree(serial_priv); diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 9baf081174ce..e26e32169a36 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -811,6 +811,7 @@ static int uas_slave_configure(struct scsi_device *sdev) if (devinfo->flags & US_FL_BROKEN_FUA) sdev->broken_fua = 1; + scsi_change_queue_depth(sdev, devinfo->qdepth - 2); return 0; } diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c index fa2e47e06503..75e42ca8cd88 100644 --- a/drivers/video/fbdev/msm/mdss_dp.c +++ b/drivers/video/fbdev/msm/mdss_dp.c @@ -57,6 +57,10 @@ static u32 supported_modes[] = { HDMI_VFRMT_4096x2160p60_256_135, HDMI_EVFRMT_4096x2160p24_16_9 }; +static int mdss_dp_off_irq(struct mdss_dp_drv_pdata *dp_drv); +static void mdss_dp_mainlink_push_idle(struct mdss_panel_data *pdata); +static inline void mdss_dp_link_retraining(struct mdss_dp_drv_pdata *dp); + static void mdss_dp_put_dt_clk_data(struct device *dev, struct dss_module_power *module_power) { @@ -902,8 +906,6 @@ static int dp_audio_info_setup(struct platform_device *pdev, mdss_dp_set_safe_to_exit_level(&dp_ctrl->ctrl_io, dp_ctrl->lane_cnt); mdss_dp_audio_enable(&dp_ctrl->ctrl_io, true); - dp_ctrl->wait_for_audio_comp = true; - return rc; } /* dp_audio_info_setup */ @@ -926,17 +928,6 @@ static int dp_get_audio_edid_blk(struct platform_device *pdev, return rc; } /* dp_get_audio_edid_blk */ -static void dp_audio_codec_teardown_done(struct platform_device *pdev) -{ - struct mdss_dp_drv_pdata *dp = platform_get_drvdata(pdev); - - if (!dp) - pr_err("invalid input\n"); - - pr_debug("audio codec teardown done\n"); - complete_all(&dp->audio_comp); -} - static int mdss_dp_init_ext_disp(struct mdss_dp_drv_pdata *dp) { int ret = 0; @@ -958,8 +949,6 @@ static int mdss_dp_init_ext_disp(struct mdss_dp_drv_pdata *dp) dp_get_audio_edid_blk; dp->ext_audio_data.codec_ops.cable_status = dp_get_cable_status; - dp->ext_audio_data.codec_ops.teardown_done = - dp_audio_codec_teardown_done; if (!dp->pdev->dev.of_node) { pr_err("%s cannot find dp dev.of_node\n", __func__); @@ -1040,12 +1029,10 @@ static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic) return 0; } /* dp_init_panel_info */ -static inline void mdss_dp_set_audio_switch_node( - struct mdss_dp_drv_pdata *dp, int val) +static inline void mdss_dp_ack_state(struct mdss_dp_drv_pdata *dp, int val) { if (dp && dp->ext_audio_data.intf_ops.notify) - dp->ext_audio_data.intf_ops.notify(dp->ext_pdev, - val); + dp->ext_audio_data.intf_ops.notify(dp->ext_pdev, val); } /** @@ -1156,19 +1143,27 @@ static void mdss_dp_configure_source_params(struct mdss_dp_drv_pdata *dp, * * Initiates training of the DP main link and checks the state of the main * link after the training is complete. + * + * Return: error code. -EINVAL if any invalid data or -EAGAIN if retraining + * is required. */ -static void mdss_dp_train_main_link(struct mdss_dp_drv_pdata *dp) +static int mdss_dp_train_main_link(struct mdss_dp_drv_pdata *dp) { + int ret = 0; int ready = 0; pr_debug("enter\n"); + ret = mdss_dp_link_train(dp); + if (ret) + goto end; - mdss_dp_link_train(dp); mdss_dp_wait4train(dp); ready = mdss_dp_mainlink_ready(dp, BIT(0)); pr_debug("main link %s\n", ready ? "READY" : "NOT READY"); +end: + return ret; } static int mdss_dp_on_irq(struct mdss_dp_drv_pdata *dp_drv) @@ -1178,33 +1173,43 @@ static int mdss_dp_on_irq(struct mdss_dp_drv_pdata *dp_drv) struct lane_mapping ln_map; /* wait until link training is completed */ - mutex_lock(&dp_drv->train_mutex); - pr_debug("enter\n"); - orientation = usbpd_get_plug_orientation(dp_drv->pd); - pr_debug("plug orientation = %d\n", orientation); + do { + if (ret == -EAGAIN) { + mdss_dp_mainlink_push_idle(&dp_drv->panel_data); + mdss_dp_off_irq(dp_drv); + } - ret = mdss_dp_get_lane_mapping(dp_drv, orientation, &ln_map); - if (ret) - goto exit; + mutex_lock(&dp_drv->train_mutex); - mdss_dp_phy_share_lane_config(&dp_drv->phy_io, - orientation, dp_drv->dpcd.max_lane_count); + orientation = usbpd_get_plug_orientation(dp_drv->pd); + pr_debug("plug orientation = %d\n", orientation); - ret = mdss_dp_enable_mainlink_clocks(dp_drv); - if (ret) - goto exit; + ret = mdss_dp_get_lane_mapping(dp_drv, orientation, &ln_map); + if (ret) + goto exit; - mdss_dp_mainlink_reset(&dp_drv->ctrl_io); + mdss_dp_phy_share_lane_config(&dp_drv->phy_io, + orientation, dp_drv->dpcd.max_lane_count); - reinit_completion(&dp_drv->idle_comp); + ret = mdss_dp_enable_mainlink_clocks(dp_drv); + if (ret) + goto exit; - mdss_dp_configure_source_params(dp_drv, &ln_map); + mdss_dp_mainlink_reset(&dp_drv->ctrl_io); - mdss_dp_train_main_link(dp_drv); + reinit_completion(&dp_drv->idle_comp); + + mdss_dp_configure_source_params(dp_drv, &ln_map); + + dp_drv->power_on = true; + + ret = mdss_dp_train_main_link(dp_drv); + + mutex_unlock(&dp_drv->train_mutex); + } while (ret == -EAGAIN); - dp_drv->power_on = true; pr_debug("end\n"); exit: @@ -1273,12 +1278,19 @@ int mdss_dp_on_hpd(struct mdss_dp_drv_pdata *dp_drv) mdss_dp_configure_source_params(dp_drv, &ln_map); link_training: - mdss_dp_train_main_link(dp_drv); + dp_drv->power_on = true; + + if (-EAGAIN == mdss_dp_train_main_link(dp_drv)) { + mutex_unlock(&dp_drv->train_mutex); + + mdss_dp_link_retraining(dp_drv); + return 0; + } dp_drv->cont_splash = 0; dp_drv->power_on = true; - mdss_dp_set_audio_switch_node(dp_drv, true); + mdss_dp_ack_state(dp_drv, true); pr_debug("End-\n"); exit: @@ -1311,6 +1323,12 @@ static inline bool mdss_dp_is_link_status_updated(struct mdss_dp_drv_pdata *dp) return dp->link_status.link_status_updated; } +static inline bool mdss_dp_is_downstream_port_status_changed( + struct mdss_dp_drv_pdata *dp) +{ + return dp->link_status.downstream_port_status_changed; +} + static inline bool mdss_dp_is_link_training_requested( struct mdss_dp_drv_pdata *dp) { @@ -1390,6 +1408,7 @@ static int mdss_dp_off_hpd(struct mdss_dp_drv_pdata *dp_drv) dp_drv->dp_initialized = false; dp_drv->power_on = false; + mdss_dp_ack_state(dp_drv, false); mutex_unlock(&dp_drv->train_mutex); pr_debug("DP off done\n"); @@ -1413,52 +1432,30 @@ int mdss_dp_off(struct mdss_panel_data *pdata) return mdss_dp_off_hpd(dp); } -static void mdss_dp_send_cable_notification( +static int mdss_dp_send_cable_notification( struct mdss_dp_drv_pdata *dp, int val) { + int ret = 0; if (!dp) { DEV_ERR("%s: invalid input\n", __func__); - return; + ret = -EINVAL; + goto end; } if (dp && dp->ext_audio_data.intf_ops.hpd) - dp->ext_audio_data.intf_ops.hpd(dp->ext_pdev, + ret = dp->ext_audio_data.intf_ops.hpd(dp->ext_pdev, dp->ext_audio_data.type, val); -} -static void mdss_dp_audio_codec_wait(struct mdss_dp_drv_pdata *dp) -{ - const int audio_completion_timeout_ms = HZ * 3; - int ret = 0; - - if (!dp->wait_for_audio_comp) - return; - - reinit_completion(&dp->audio_comp); - ret = wait_for_completion_timeout(&dp->audio_comp, - audio_completion_timeout_ms); - if (ret <= 0) - pr_warn("audio codec teardown timed out\n"); - - dp->wait_for_audio_comp = false; +end: + return ret; } -static void mdss_dp_notify_clients(struct mdss_dp_drv_pdata *dp, bool enable) +static int mdss_dp_notify_clients(struct mdss_dp_drv_pdata *dp, bool enable) { - if (enable) { - mdss_dp_send_cable_notification(dp, enable); - } else { - mdss_dp_set_audio_switch_node(dp, enable); - mdss_dp_audio_codec_wait(dp); - mdss_dp_send_cable_notification(dp, enable); - } - - pr_debug("notify state %s done\n", - enable ? "ENABLE" : "DISABLE"); + return mdss_dp_send_cable_notification(dp, enable); } - static int mdss_dp_edid_init(struct mdss_panel_data *pdata) { struct mdss_dp_drv_pdata *dp_drv = NULL; @@ -1614,22 +1611,18 @@ end: return rc; } -static void mdss_dp_hdcp_cb(void *ptr, enum hdcp_states status) +static void mdss_dp_hdcp_cb_work(struct work_struct *work) { - struct mdss_dp_drv_pdata *dp = ptr; + struct mdss_dp_drv_pdata *dp; + struct delayed_work *dw = to_delayed_work(work); struct hdcp_ops *ops; int rc = 0; - if (!dp) { - pr_debug("invalid input\n"); - return; - } + dp = container_of(dw, struct mdss_dp_drv_pdata, hdcp_cb_work); ops = dp->hdcp.ops; - mutex_lock(&dp->train_mutex); - - switch (status) { + switch (dp->hdcp_status) { case HDCP_STATE_AUTHENTICATED: pr_debug("hdcp authenticated\n"); dp->hdcp.auth_state = true; @@ -1652,8 +1645,20 @@ static void mdss_dp_hdcp_cb(void *ptr, enum hdcp_states status) default: break; } +} - mutex_unlock(&dp->train_mutex); +static void mdss_dp_hdcp_cb(void *ptr, enum hdcp_states status) +{ + struct mdss_dp_drv_pdata *dp = ptr; + + if (!dp) { + pr_err("invalid input\n"); + return; + } + + dp->hdcp_status = status; + + queue_delayed_work(dp->workq, &dp->hdcp_cb_work, HZ/4); } static int mdss_dp_hdcp_init(struct mdss_panel_data *pdata) @@ -1691,19 +1696,19 @@ static int mdss_dp_hdcp_init(struct mdss_panel_data *pdata) hdcp_init_data.sec_access = true; hdcp_init_data.client_id = HDCP_CLIENT_DP; - dp_drv->hdcp.data = hdcp_1x_init(&hdcp_init_data); - if (IS_ERR_OR_NULL(dp_drv->hdcp.data)) { + dp_drv->hdcp.hdcp1 = hdcp_1x_init(&hdcp_init_data); + if (IS_ERR_OR_NULL(dp_drv->hdcp.hdcp1)) { pr_err("Error hdcp init\n"); rc = -EINVAL; goto error; } - dp_drv->panel_data.panel_info.hdcp_1x_data = dp_drv->hdcp.data; + dp_drv->panel_data.panel_info.hdcp_1x_data = dp_drv->hdcp.hdcp1; pr_debug("HDCP 1.3 initialized\n"); dp_drv->hdcp.hdcp2 = dp_hdcp2p2_init(&hdcp_init_data); - if (!IS_ERR_OR_NULL(dp_drv->hdcp.data)) + if (!IS_ERR_OR_NULL(dp_drv->hdcp.hdcp2)) pr_debug("HDCP 2.2 initialized\n"); dp_drv->hdcp.feature_enabled = true; @@ -1882,6 +1887,13 @@ static void mdss_dp_update_hdcp_info(struct mdss_dp_drv_pdata *dp) dp->hdcp.ops = ops; } +static inline bool dp_is_hdcp_enabled(struct mdss_dp_drv_pdata *dp_drv) +{ + return dp_drv->hdcp.feature_enabled && + (dp_drv->hdcp.hdcp1_present || dp_drv->hdcp.hdcp2_present) && + dp_drv->hdcp.ops; +} + static int mdss_dp_event_handler(struct mdss_panel_data *pdata, int event, void *arg) { @@ -1913,8 +1925,10 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata, rc = mdss_dp_off(pdata); break; case MDSS_EVENT_BLANK: - if (dp->hdcp.ops && dp->hdcp.ops->off) + if (dp_is_hdcp_enabled(dp) && dp->hdcp.ops->off) { + flush_delayed_work(&dp->hdcp_cb_work); dp->hdcp.ops->off(dp->hdcp.data); + } mdss_dp_mainlink_push_idle(pdata); break; @@ -2191,6 +2205,11 @@ irqreturn_t dp_isr(int irq, void *ptr) dp_aux_native_handler(dp, isr1); } + if (dp->hdcp.ops && dp->hdcp.ops->isr) { + if (dp->hdcp.ops->isr(dp->hdcp.data)) + pr_err("dp_hdcp_isr failed\n"); + } + return IRQ_HANDLED; } @@ -2205,6 +2224,7 @@ static int mdss_dp_event_setup(struct mdss_dp_drv_pdata *dp) } INIT_WORK(&dp->work, mdss_dp_event_work); + INIT_DELAYED_WORK(&dp->hdcp_cb_work, mdss_dp_hdcp_cb_work); return 0; } @@ -2307,14 +2327,20 @@ static int mdss_dp_hpd_irq_notify_clients(struct mdss_dp_drv_pdata *dp) int ret = 0; if (dp->hpd_irq_toggled) { - mdss_dp_notify_clients(dp, false); - - reinit_completion(&dp->irq_comp); - ret = wait_for_completion_timeout(&dp->irq_comp, - irq_comp_timeout); - if (ret <= 0) { - pr_warn("irq_comp timed out\n"); - return -EINVAL; + dp->hpd_irq_clients_notified = true; + + ret = mdss_dp_notify_clients(dp, false); + + if (!IS_ERR_VALUE(ret) && ret) { + reinit_completion(&dp->irq_comp); + ret = wait_for_completion_timeout(&dp->irq_comp, + irq_comp_timeout); + if (ret <= 0) { + pr_warn("irq_comp timed out\n"); + ret = -EINVAL; + } else { + ret = 0; + } } } @@ -2344,19 +2370,24 @@ static inline void mdss_dp_link_retraining(struct mdss_dp_drv_pdata *dp) * This function will check for changes in the link status, e.g. clock * recovery done on all lanes, and trigger link training if there is a * failure/error on the link. + * + * The function will return 0 if the a link status update has been processed, + * otherwise it will return -EINVAL. */ -static void mdss_dp_process_link_status_update(struct mdss_dp_drv_pdata *dp) +static int mdss_dp_process_link_status_update(struct mdss_dp_drv_pdata *dp) { if (!mdss_dp_is_link_status_updated(dp) || (mdss_dp_aux_channel_eq_done(dp) && mdss_dp_aux_clock_recovery_done(dp))) - return; + return -EINVAL; pr_info("channel_eq_done = %d, clock_recovery_done = %d\n", mdss_dp_aux_channel_eq_done(dp), mdss_dp_aux_clock_recovery_done(dp)); mdss_dp_link_retraining(dp); + + return 0; } /** @@ -2366,11 +2397,14 @@ static void mdss_dp_process_link_status_update(struct mdss_dp_drv_pdata *dp) * This function will handle new link training requests that are initiated by * the sink. In particular, it will update the requested lane count and link * link rate, and then trigger the link retraining procedure. + * + * The function will return 0 if a link training request has been processed, + * otherwise it will return -EINVAL. */ -static void mdss_dp_process_link_training_request(struct mdss_dp_drv_pdata *dp) +static int mdss_dp_process_link_training_request(struct mdss_dp_drv_pdata *dp) { if (!mdss_dp_is_link_training_requested(dp)) - return; + return -EINVAL; mdss_dp_send_test_response(dp); @@ -2383,6 +2417,28 @@ static void mdss_dp_process_link_training_request(struct mdss_dp_drv_pdata *dp) dp->link_rate = dp->test_data.test_link_rate; mdss_dp_link_retraining(dp); + + return 0; +} + +/** + * mdss_dp_process_downstream_port_status_change() - process port status changes + * @dp: Display Port Driver data + * + * This function will handle downstream port updates that are initiated by + * the sink. If the downstream port status has changed, the EDID is read via + * AUX. + * + * The function will return 0 if a downstream port update has been + * processed, otherwise it will return -EINVAL. + */ +static int mdss_dp_process_downstream_port_status_change( + struct mdss_dp_drv_pdata *dp) +{ + if (!mdss_dp_is_downstream_port_status_changed(dp)) + return -EINVAL; + + return mdss_dp_edid_read(dp); } /** @@ -2393,21 +2449,31 @@ static void mdss_dp_process_link_training_request(struct mdss_dp_drv_pdata *dp) * (including cases when there are back to back HPD IRQ HIGH) indicating * the start of a new link training request or sink status update. */ -static void mdss_dp_process_hpd_irq_high(struct mdss_dp_drv_pdata *dp) +static int mdss_dp_process_hpd_irq_high(struct mdss_dp_drv_pdata *dp) { - pr_debug("enter: HPD IRQ High\n"); + int ret = 0; dp->hpd_irq_on = true; mdss_dp_aux_parse_sink_status_field(dp); - mdss_dp_process_link_training_request(dp); + ret = mdss_dp_process_link_training_request(dp); + if (!ret) + goto exit; - mdss_dp_process_link_status_update(dp); + ret = mdss_dp_process_link_status_update(dp); + if (!ret) + goto exit; - mdss_dp_reset_test_data(dp); + ret = mdss_dp_process_downstream_port_status_change(dp); + if (!ret) + goto exit; pr_debug("done\n"); +exit: + mdss_dp_reset_test_data(dp); + + return ret; } /** @@ -2417,11 +2483,15 @@ static void mdss_dp_process_hpd_irq_high(struct mdss_dp_drv_pdata *dp) * This function will handle the HPD IRQ state transitions from HIGH to LOW, * indicating the end of a test request. */ -static void mdss_dp_process_hpd_irq_low(struct mdss_dp_drv_pdata *dp) +static int mdss_dp_process_hpd_irq_low(struct mdss_dp_drv_pdata *dp) { + if (!dp->hpd_irq_clients_notified) + return -EINVAL; + pr_debug("enter: HPD IRQ low\n"); dp->hpd_irq_on = false; + dp->hpd_irq_clients_notified = false; mdss_dp_update_cable_status(dp, false); mdss_dp_mainlink_push_idle(&dp->panel_data); @@ -2430,6 +2500,7 @@ static void mdss_dp_process_hpd_irq_low(struct mdss_dp_drv_pdata *dp) mdss_dp_reset_test_data(dp); pr_debug("done\n"); + return 0; } static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd, @@ -2471,14 +2542,17 @@ static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd, dp_drv->alt_mode.dp_status.hpd_irq; if (dp_drv->alt_mode.dp_status.hpd_irq) { - mdss_dp_process_hpd_irq_high(dp_drv); - break; - } + pr_debug("Attention: hpd_irq high\n"); - if (dp_drv->hpd_irq_toggled - && !dp_drv->alt_mode.dp_status.hpd_irq) { - mdss_dp_process_hpd_irq_low(dp_drv); - break; + if (dp_drv->power_on && dp_drv->hdcp.ops && + dp_drv->hdcp.ops->cp_irq) + dp_drv->hdcp.ops->cp_irq(dp_drv->hdcp.data); + + if (!mdss_dp_process_hpd_irq_high(dp_drv)) + break; + } else if (dp_drv->hpd_irq_toggled) { + if (!mdss_dp_process_hpd_irq_low(dp_drv)) + break; } if (!dp_drv->alt_mode.dp_status.hpd_high) { @@ -2500,9 +2574,6 @@ static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd, else dp_send_events(dp_drv, EV_USBPD_DP_CONFIGURE); - if (dp_drv->alt_mode.dp_status.hpd_irq && dp_drv->power_on && - dp_drv->hdcp.ops && dp_drv->hdcp.ops->isr) - dp_drv->hdcp.ops->isr(dp_drv->hdcp.data); break; case DP_VDM_STATUS: dp_drv->alt_mode.dp_status.response = *vdos; @@ -2677,10 +2748,8 @@ static int mdss_dp_probe(struct platform_device *pdev) mdss_dp_device_register(dp_drv); dp_drv->inited = true; - dp_drv->wait_for_audio_comp = false; dp_drv->hpd_irq_on = false; mdss_dp_reset_test_data(dp_drv); - init_completion(&dp_drv->audio_comp); init_completion(&dp_drv->irq_comp); pr_debug("done\n"); @@ -2718,13 +2787,6 @@ void *mdss_dp_get_hdcp_data(struct device *dev) return dp_drv->hdcp.data; } -static inline bool dp_is_hdcp_enabled(struct mdss_dp_drv_pdata *dp_drv) -{ - return dp_drv->hdcp.feature_enabled && - (dp_drv->hdcp.hdcp1_present || dp_drv->hdcp.hdcp2_present) && - dp_drv->hdcp.ops; -} - static inline bool dp_is_stream_shareable(struct mdss_dp_drv_pdata *dp_drv) { bool ret = 0; diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h index 4ba2d20d4261..e801eceeef1b 100644 --- a/drivers/video/fbdev/msm/mdss_dp.h +++ b/drivers/video/fbdev/msm/mdss_dp.h @@ -228,6 +228,18 @@ struct dp_alt_mode { #define DP_LINK_RATE_MULTIPLIER 27000000 #define DP_MAX_PIXEL_CLK_KHZ 675000 +struct downstream_port_config { + /* Byte 02205h */ + bool dfp_present; + u32 dfp_type; + bool format_conversion; + bool detailed_cap_info_available; + /* Byte 02207h */ + u32 dfp_count; + bool msa_timing_par_ignored; + bool oui_support; +}; + struct dpcd_cap { char major; char minor; @@ -240,6 +252,7 @@ struct dpcd_cap { u32 flags; u32 rx_port0_buf_size; u32 training_read_interval;/* us */ + struct downstream_port_config downstream_port; }; struct dpcd_link_status { @@ -437,7 +450,6 @@ struct mdss_dp_drv_pdata { struct completion train_comp; struct completion idle_comp; struct completion video_comp; - struct completion audio_comp; struct completion irq_comp; struct mutex aux_mutex; struct mutex train_mutex; @@ -463,13 +475,14 @@ struct mdss_dp_drv_pdata { char delay_start; u32 bpp; struct dp_statistic dp_stat; - bool wait_for_audio_comp; bool hpd_irq_on; bool hpd_irq_toggled; + bool hpd_irq_clients_notified; /* event */ struct workqueue_struct *workq; struct work_struct work; + struct delayed_work hdcp_cb_work; u32 current_event; spinlock_t event_lock; spinlock_t lock; @@ -480,6 +493,7 @@ struct mdss_dp_drv_pdata { u32 vic; u32 new_vic; int fb_node; + int hdcp_status; struct dpcd_test_request test_data; struct dpcd_sink_count sink_count; diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c index 91066662e793..9014e3a02d21 100644 --- a/drivers/video/fbdev/msm/mdss_dp_aux.c +++ b/drivers/video/fbdev/msm/mdss_dp_aux.c @@ -801,6 +801,8 @@ static void dp_sink_capability_read(struct mdss_dp_drv_pdata *ep, cap = &ep->dpcd; bp = rp->data; + memset(cap, 0, sizeof(*cap)); + data = *bp++; /* byte 0 */ cap->major = (data >> 4) & 0x0f; cap->minor = data & 0x0f; @@ -819,8 +821,13 @@ static void dp_sink_capability_read(struct mdss_dp_drv_pdata *ep, if (data & BIT(7)) cap->enhanced_frame++; - if (data & 0x40) + if (data & 0x40) { cap->flags |= DPCD_TPS3; + pr_debug("pattern 3 supported\n"); + } else { + pr_debug("pattern 3 not supported\n"); + } + data &= 0x0f; cap->max_lane_count = data; if (--rlen <= 0) @@ -846,11 +853,36 @@ static void dp_sink_capability_read(struct mdss_dp_drv_pdata *ep, if (--rlen <= 0) return; - bp += 3; /* skip 5, 6 and 7 */ - rlen -= 3; + data = *bp++; /* Byte 5: DOWN_STREAM_PORT_PRESENT */ + cap->downstream_port.dfp_present = data & BIT(0); + cap->downstream_port.dfp_type = data & 0x6; + cap->downstream_port.format_conversion = data & BIT(3); + cap->downstream_port.detailed_cap_info_available = data & BIT(4); + pr_debug("dfp_present = %d, dfp_type = %d\n", + cap->downstream_port.dfp_present, + cap->downstream_port.dfp_type); + pr_debug("format_conversion = %d, detailed_cap_info_available = %d\n", + cap->downstream_port.format_conversion, + cap->downstream_port.detailed_cap_info_available); + if (--rlen <= 0) + return; + + bp += 1; /* Skip Byte 6 */ + rlen -= 1; if (rlen <= 0) return; + data = *bp++; /* Byte 7: DOWN_STREAM_PORT_COUNT */ + cap->downstream_port.dfp_count = data & 0x7; + cap->downstream_port.msa_timing_par_ignored = data & BIT(6); + cap->downstream_port.oui_support = data & BIT(7); + pr_debug("dfp_count = %d, msa_timing_par_ignored = %d\n", + cap->downstream_port.dfp_count, + cap->downstream_port.msa_timing_par_ignored); + pr_debug("oui_support = %d\n", cap->downstream_port.oui_support); + if (--rlen <= 0) + return; + data = *bp++; /* byte 8 */ if (data & BIT(1)) { cap->flags |= DPCD_PORT_0_EDID_PRESENTED; @@ -861,7 +893,7 @@ static void dp_sink_capability_read(struct mdss_dp_drv_pdata *ep, data = *bp++; /* byte 9 */ cap->rx_port0_buf_size = (data + 1) * 32; - pr_debug("lane_buf_size=%d", cap->rx_port0_buf_size); + pr_debug("lane_buf_size=%d\n", cap->rx_port0_buf_size); if (--rlen <= 0) return; @@ -1431,6 +1463,7 @@ static int dp_start_link_train_1(struct mdss_dp_drv_pdata *ep) int tries, old_v_level; int ret = 0; int usleep_time; + int const maximum_retries = 5; pr_debug("Entered++"); @@ -1458,7 +1491,7 @@ static int dp_start_link_train_1(struct mdss_dp_drv_pdata *ep) if (old_v_level == ep->v_level) { tries++; - if (tries >= 5) { + if (tries >= maximum_retries) { ret = -1; break; /* quit */ } @@ -1480,6 +1513,7 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep) int ret = 0; int usleep_time; char pattern; + int const maximum_retries = 5; pr_debug("Entered++"); @@ -1505,7 +1539,7 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep) } tries++; - if (tries > 4) { + if (tries > maximum_retries) { ret = -1; break; } @@ -1518,47 +1552,27 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep) static int dp_link_rate_down_shift(struct mdss_dp_drv_pdata *ep) { - u32 prate, lrate; - int rate, lane, max_lane; - int changed = 0; - - rate = ep->link_rate; - lane = ep->lane_cnt; - max_lane = ep->dpcd.max_lane_count; - - prate = ep->pixel_rate; - prate /= 1000; /* avoid using 64 biits */ - prate *= ep->bpp; - prate /= 8; /* byte */ - - if (rate > DP_LINK_RATE_162 && rate <= DP_LINK_RATE_MAX) { - rate -= 4; /* reduce rate */ - changed++; - } + int ret = 0; - if (changed) { - if (lane >= 1 && lane < max_lane) - lane <<= 1; /* increase lane */ + if (!ep) + return -EINVAL; - lrate = 270000000; /* 270M */ - lrate /= 1000; /* avoid using 64 bits */ - lrate *= rate; - lrate /= 10; /* byte, 10 bits --> 8 bits */ - lrate *= lane; + switch (ep->link_rate) { + case DP_LINK_RATE_540: + ep->link_rate = DP_LINK_RATE_270; + break; + case DP_LINK_RATE_270: + ep->link_rate = DP_LINK_RATE_162; + break; + case DP_LINK_RATE_162: + default: + ret = -EINVAL; + break; + }; - pr_debug("new lrate=%u prate=%u rate=%d lane=%d p=%d b=%d\n", - lrate, prate, rate, lane, ep->pixel_rate, ep->bpp); + pr_debug("new rate=%d\n", ep->link_rate); - if (lrate > prate) { - ep->link_rate = rate; - ep->lane_cnt = lane; - pr_debug("new rate=%d %d\n", rate, lane); - return 0; - } - } - - /* add calculation later */ - return -EINVAL; + return ret; } int mdss_dp_aux_set_sink_power_state(struct mdss_dp_drv_pdata *ep, char state) @@ -1595,7 +1609,6 @@ int mdss_dp_link_train(struct mdss_dp_drv_pdata *dp) mdss_dp_aux_set_sink_power_state(dp, SINK_POWER_ON); -train_start: dp->v_level = 0; /* start from default level */ dp->p_level = 0; mdss_dp_config_ctrl(dp); @@ -1605,11 +1618,12 @@ train_start: ret = dp_start_link_train_1(dp); if (ret < 0) { - if (dp_link_rate_down_shift(dp) == 0) { - goto train_start; + if (!dp_link_rate_down_shift(dp)) { + pr_debug("retry with lower rate\n"); + return -EAGAIN; } else { pr_err("Training 1 failed\n"); - ret = -1; + ret = -EINVAL; goto clear; } } @@ -1618,21 +1632,21 @@ train_start: ret = dp_start_link_train_2(dp); if (ret < 0) { - if (dp_link_rate_down_shift(dp) == 0) { - goto train_start; + if (!dp_link_rate_down_shift(dp)) { + pr_debug("retry with lower rate\n"); + return -EAGAIN; } else { pr_err("Training 2 failed\n"); - ret = -1; + ret = -EINVAL; goto clear; } } pr_debug("Training 2 completed successfully\n"); - clear: dp_clear_training_pattern(dp); - if (ret != -1) { + if (ret != -EINVAL) { mdss_dp_setup_tr_unit(&dp->ctrl_io, dp->link_rate, dp->lane_cnt, dp->vic); mdss_dp_state_ctrl(&dp->ctrl_io, ST_SEND_VIDEO); diff --git a/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c b/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c index 3891806b09bb..79cd94cfbe88 100644 --- a/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c +++ b/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c @@ -53,7 +53,6 @@ struct dp_hdcp2p2_ctrl { struct kthread_work send_msg; struct kthread_work recv_msg; struct kthread_work link; - struct kthread_work poll; char *msg_buf; uint32_t send_msg_len; /* length of all parameters in msg */ uint32_t timeout; @@ -67,26 +66,6 @@ struct dp_hdcp2p2_ctrl { bool polling; }; -static inline char *dp_hdcp_cmd_to_str(uint32_t cmd) -{ - switch (cmd) { - case HDMI_HDCP_WKUP_CMD_SEND_MESSAGE: - return "HDMI_HDCP_WKUP_CMD_SEND_MESSAGE"; - case HDMI_HDCP_WKUP_CMD_RECV_MESSAGE: - return "HDMI_HDCP_WKUP_CMD_RECV_MESSAGE"; - case HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS: - return "HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS"; - case HDMI_HDCP_WKUP_CMD_STATUS_FAILED: - return "DP_HDCP_WKUP_CMD_STATUS_FAIL"; - case HDMI_HDCP_WKUP_CMD_LINK_POLL: - return "HDMI_HDCP_WKUP_CMD_LINK_POLL"; - case HDMI_HDCP_WKUP_CMD_AUTHENTICATE: - return "HDMI_HDCP_WKUP_CMD_AUTHENTICATE"; - default: - return "???"; - } -} - static inline bool dp_hdcp2p2_is_valid_state(struct dp_hdcp2p2_ctrl *ctrl) { if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_AUTHENTICATE) @@ -193,7 +172,7 @@ static int dp_hdcp2p2_wakeup(struct hdmi_hdcp_wakeup_data *data) queue_kthread_work(&ctrl->worker, &ctrl->status); break; case HDMI_HDCP_WKUP_CMD_LINK_POLL: - queue_kthread_work(&ctrl->worker, &ctrl->poll); + ctrl->polling = true; break; case HDMI_HDCP_WKUP_CMD_AUTHENTICATE: queue_kthread_work(&ctrl->worker, &ctrl->auth); @@ -203,10 +182,11 @@ static int dp_hdcp2p2_wakeup(struct hdmi_hdcp_wakeup_data *data) } exit: mutex_unlock(&ctrl->wakeup_mutex); + return 0; } -static inline int dp_hdcp2p2_wakeup_lib(struct dp_hdcp2p2_ctrl *ctrl, +static inline void dp_hdcp2p2_wakeup_lib(struct dp_hdcp2p2_ctrl *ctrl, struct hdcp_lib_wakeup_data *data) { int rc = 0; @@ -218,8 +198,6 @@ static inline int dp_hdcp2p2_wakeup_lib(struct dp_hdcp2p2_ctrl *ctrl, pr_err("error sending %s to lib\n", hdcp_lib_cmd_to_str(data->cmd)); } - - return rc; } static void dp_hdcp2p2_reset(struct dp_hdcp2p2_ctrl *ctrl) @@ -339,8 +317,6 @@ static void dp_hdcp2p2_auth_failed(struct dp_hdcp2p2_ctrl *ctrl) return; } - atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL); - /* notify DP about HDCP failure */ ctrl->init_data.notify_status(ctrl->init_data.cb_data, HDCP_STATE_AUTH_FAIL); @@ -349,8 +325,7 @@ static void dp_hdcp2p2_auth_failed(struct dp_hdcp2p2_ctrl *ctrl) static int dp_hdcp2p2_aux_read_message(struct dp_hdcp2p2_ctrl *ctrl, u8 *buf, int size, int offset, u32 timeout) { - int rc, max_size = 16, read_size, len = size; - u8 *buf_start = buf; + int rc, max_size = 16, read_size; if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) { pr_err("hdcp is off\n"); @@ -378,8 +353,6 @@ static int dp_hdcp2p2_aux_read_message(struct dp_hdcp2p2_ctrl *ctrl, size -= read_size; } while (size > 0); - print_hex_dump(KERN_DEBUG, "hdcp2p2: ", DUMP_PREFIX_NONE, - 16, 1, buf_start, len, false); return rc; } @@ -452,12 +425,9 @@ end: static void dp_hdcp2p2_send_msg_work(struct kthread_work *work) { int rc = 0; - int i; - int sent_bytes = 0; struct dp_hdcp2p2_ctrl *ctrl = container_of(work, struct dp_hdcp2p2_ctrl, send_msg); struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID}; - char *buf = NULL; if (!ctrl) { pr_err("invalid input\n"); @@ -474,20 +444,13 @@ static void dp_hdcp2p2_send_msg_work(struct kthread_work *work) mutex_lock(&ctrl->msg_lock); - /* Loop through number of parameters in the messages. */ - for (i = 0; i < ctrl->num_messages; i++) { - buf = ctrl->msg_buf + sent_bytes; - - /* Forward the message to the sink */ - rc = dp_hdcp2p2_aux_write_message(ctrl, buf, - (size_t)ctrl->msg_part[i].length, - ctrl->msg_part[i].offset, ctrl->timeout); - if (rc) { - pr_err("Error sending msg to sink %d\n", rc); - mutex_unlock(&ctrl->msg_lock); - goto exit; - } - sent_bytes += ctrl->msg_part[i].length; + rc = dp_hdcp2p2_aux_write_message(ctrl, ctrl->msg_buf, + ctrl->send_msg_len, ctrl->msg_part->offset, + ctrl->timeout); + if (rc) { + pr_err("Error sending msg to sink %d\n", rc); + mutex_unlock(&ctrl->msg_lock); + goto exit; } cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS; @@ -505,10 +468,9 @@ exit: static int dp_hdcp2p2_get_msg_from_sink(struct dp_hdcp2p2_ctrl *ctrl) { - int i, rc = 0; + int rc = 0; char *recvd_msg_buf = NULL; struct hdcp_lib_wakeup_data cdata = { HDCP_LIB_WKUP_CMD_INVALID }; - int bytes_read = 0; cdata.context = ctrl->lib_ctx; @@ -518,17 +480,12 @@ static int dp_hdcp2p2_get_msg_from_sink(struct dp_hdcp2p2_ctrl *ctrl) goto exit; } - for (i = 0; i < ctrl->num_messages; i++) { - rc = dp_hdcp2p2_aux_read_message( - ctrl, recvd_msg_buf + bytes_read, - ctrl->msg_part[i].length, - ctrl->msg_part[i].offset, - ctrl->timeout); - if (rc) { - pr_err("error reading message %d\n", rc); - goto exit; - } - bytes_read += ctrl->msg_part[i].length; + rc = dp_hdcp2p2_aux_read_message(ctrl, recvd_msg_buf, + ctrl->send_msg_len, ctrl->msg_part->offset, + ctrl->timeout); + if (rc) { + pr_err("error reading message %d\n", rc); + goto exit; } cdata.recvd_msg_buf = recvd_msg_buf; @@ -550,7 +507,6 @@ exit: static void dp_hdcp2p2_recv_msg_work(struct kthread_work *work) { - int rc = 0; struct hdcp_lib_wakeup_data cdata = { HDCP_LIB_WKUP_CMD_INVALID }; struct dp_hdcp2p2_ctrl *ctrl = container_of(work, struct dp_hdcp2p2_ctrl, recv_msg); @@ -559,44 +515,24 @@ static void dp_hdcp2p2_recv_msg_work(struct kthread_work *work) if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) { pr_err("hdcp is off\n"); - goto exit; + return; } - if (ctrl->sink_rx_status & ctrl->abort_mask) { - pr_err("reauth or Link fail triggered by sink\n"); - - ctrl->sink_rx_status = 0; - rc = -ENOLINK; - cdata.cmd = HDCP_LIB_WKUP_CMD_STOP; - - goto exit; - } + if (ctrl->rx_status) { + if (!ctrl->cp_irq_done) { + pr_debug("waiting for CP_IRQ\n"); + ctrl->polling = true; + return; + } - if (ctrl->rx_status && !ctrl->sink_rx_status) { - pr_debug("Recv msg for RxStatus, but no CP_IRQ yet\n"); - ctrl->polling = true; - goto exit; + if (ctrl->rx_status & ctrl->sink_rx_status) { + ctrl->cp_irq_done = false; + ctrl->sink_rx_status = 0; + ctrl->rx_status = 0; + } } dp_hdcp2p2_get_msg_from_sink(ctrl); - - return; -exit: - if (rc) - dp_hdcp2p2_wakeup_lib(ctrl, &cdata); -} - -static void dp_hdcp2p2_poll_work(struct kthread_work *work) -{ - struct dp_hdcp2p2_ctrl *ctrl = container_of(work, - struct dp_hdcp2p2_ctrl, poll); - - if (ctrl->cp_irq_done) { - ctrl->cp_irq_done = false; - dp_hdcp2p2_get_msg_from_sink(ctrl); - } else { - ctrl->polling = true; - } } static void dp_hdcp2p2_auth_status_work(struct kthread_work *work) @@ -645,44 +581,45 @@ static void dp_hdcp2p2_link_work(struct kthread_work *work) if (rc) { pr_err("failed to read rx status\n"); - cdata.cmd = HDCP_LIB_WKUP_CMD_STOP; + cdata.cmd = HDCP_LIB_WKUP_CMD_LINK_FAILED; + atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL); goto exit; } if (ctrl->sink_rx_status & ctrl->abort_mask) { - pr_err("reauth or Link fail triggered by sink\n"); + if (ctrl->sink_rx_status & BIT(3)) + pr_err("reauth_req set by sink\n"); + + if (ctrl->sink_rx_status & BIT(4)) + pr_err("link failure reported by sink\n"); ctrl->sink_rx_status = 0; ctrl->rx_status = 0; rc = -ENOLINK; - cdata.cmd = HDCP_LIB_WKUP_CMD_STOP; + + cdata.cmd = HDCP_LIB_WKUP_CMD_LINK_FAILED; + atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL); goto exit; } - /* if polling, get message from sink else let polling start */ if (ctrl->polling && (ctrl->sink_rx_status & ctrl->rx_status)) { ctrl->sink_rx_status = 0; ctrl->rx_status = 0; - rc = dp_hdcp2p2_get_msg_from_sink(ctrl); + dp_hdcp2p2_get_msg_from_sink(ctrl); ctrl->polling = false; } else { ctrl->cp_irq_done = true; } exit: - dp_hdcp2p2_wakeup_lib(ctrl, &cdata); - - if (rc) { - dp_hdcp2p2_auth_failed(ctrl); - return; - } + if (rc) + dp_hdcp2p2_wakeup_lib(ctrl, &cdata); } static void dp_hdcp2p2_auth_work(struct kthread_work *work) { - int rc = 0; struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID}; struct dp_hdcp2p2_ctrl *ctrl = container_of(work, struct dp_hdcp2p2_ctrl, auth); @@ -694,12 +631,10 @@ static void dp_hdcp2p2_auth_work(struct kthread_work *work) else cdata.cmd = HDCP_LIB_WKUP_CMD_STOP; - rc = dp_hdcp2p2_wakeup_lib(ctrl, &cdata); - if (rc) - dp_hdcp2p2_auth_failed(ctrl); + dp_hdcp2p2_wakeup_lib(ctrl, &cdata); } -static int dp_hdcp2p2_isr(void *input) +static int dp_hdcp2p2_cp_irq(void *input) { struct dp_hdcp2p2_ctrl *ctrl = input; @@ -748,7 +683,7 @@ void *dp_hdcp2p2_init(struct hdcp_init_data *init_data) .authenticate = dp_hdcp2p2_authenticate, .feature_supported = dp_hdcp2p2_feature_supported, .off = dp_hdcp2p2_off, - .isr = dp_hdcp2p2_isr + .cp_irq = dp_hdcp2p2_cp_irq, }; static struct hdcp_client_ops client_ops = { @@ -806,7 +741,6 @@ void *dp_hdcp2p2_init(struct hdcp_init_data *init_data) init_kthread_work(&ctrl->recv_msg, dp_hdcp2p2_recv_msg_work); init_kthread_work(&ctrl->status, dp_hdcp2p2_auth_status_work); init_kthread_work(&ctrl->link, dp_hdcp2p2_link_work); - init_kthread_work(&ctrl->poll, dp_hdcp2p2_poll_work); ctrl->thread = kthread_run(kthread_worker_fn, &ctrl->worker, "dp_hdcp2p2"); diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c index 0316f4e86d39..bcd23d3c19f2 100644 --- a/drivers/video/fbdev/msm/mdss_fb.c +++ b/drivers/video/fbdev/msm/mdss_fb.c @@ -2703,7 +2703,9 @@ static int mdss_fb_release_all(struct fb_info *info, bool release_all) * enabling ahead of unblank. for some special cases like * adb shell stop/start. */ + mutex_lock(&mfd->bl_lock); mdss_fb_set_backlight(mfd, 0); + mutex_unlock(&mfd->bl_lock); ret = mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info, mfd->op_enable); diff --git a/drivers/video/fbdev/msm/mdss_hdcp.h b/drivers/video/fbdev/msm/mdss_hdcp.h index d373d22384e8..6e347a867366 100644 --- a/drivers/video/fbdev/msm/mdss_hdcp.h +++ b/drivers/video/fbdev/msm/mdss_hdcp.h @@ -55,6 +55,7 @@ struct hdcp_init_data { struct hdcp_ops { int (*isr)(void *ptr); + int (*cp_irq)(void *ptr); int (*reauthenticate)(void *input); int (*authenticate)(void *hdcp_ctrl); bool (*feature_supported)(void *input); diff --git a/drivers/video/fbdev/msm/mdss_hdcp_1x.c b/drivers/video/fbdev/msm/mdss_hdcp_1x.c index 1e502cf750a6..a8182c2f0e76 100644 --- a/drivers/video/fbdev/msm/mdss_hdcp_1x.c +++ b/drivers/video/fbdev/msm/mdss_hdcp_1x.c @@ -153,6 +153,7 @@ struct hdcp_reg_set { u32 sec_data12; u32 reset; + u32 reset_bit; }; #define HDCP_REG_SET_CLIENT_HDMI \ @@ -175,7 +176,7 @@ struct hdcp_reg_set { HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA10, \ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA11, \ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA12, \ - HDMI_HDCP_RESET} + HDMI_HDCP_RESET, BIT(0)} #define HDCP_REG_SET_CLIENT_DP \ {DP_HDCP_STATUS, 16, 14, 13, DP_HDCP_CTRL, \ @@ -193,7 +194,8 @@ struct hdcp_reg_set { HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9, \ HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10, \ HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11, \ - HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12, 0} + HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12, \ + DP_SW_RESET, BIT(1)} #define HDCP_HDMI_SINK_ADDR_MAP \ {{"bcaps", 0x40, 1}, {"bksv", 0x00, 5}, {"r0'", 0x08, 2}, \ @@ -1295,6 +1297,9 @@ static void hdcp_1x_int_work(struct work_struct *work) return; } + if (hdcp_ctrl->hdcp_state == HDCP_STATE_AUTHENTICATED) + hdcp1_set_enc(false); + mutex_lock(hdcp_ctrl->init_data.mutex); hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAIL; mutex_unlock(hdcp_ctrl->init_data.mutex); @@ -1383,6 +1388,8 @@ error: hdcp_ctrl->init_data.cb_data, hdcp_ctrl->hdcp_state); } + + hdcp1_set_enc(true); } else { DEV_DBG("%s: %s: HDCP state changed during authentication\n", __func__, HDCP_STATE_NAME); @@ -1431,7 +1438,7 @@ int hdcp_1x_reauthenticate(void *input) struct hdcp_reg_set *reg_set; struct hdcp_int_set *isr; u32 hdmi_hw_version; - u32 ret = 0; + u32 ret = 0, reg; if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) { DEV_ERR("%s: invalid input\n", __func__); @@ -1462,15 +1469,17 @@ int hdcp_1x_reauthenticate(void *input) /* Disable HDCP interrupts */ DSS_REG_W(io, isr->int_reg, DSS_REG_R(io, isr->int_reg) & ~HDCP_INT_EN); - if (reg_set->reset) - DSS_REG_W(io, reg_set->reset, BIT(0)); + reg = DSS_REG_R(io, reg_set->reset); + DSS_REG_W(io, reg_set->reset, reg | reg_set->reset_bit); /* Disable encryption and disable the HDCP block */ DSS_REG_W(io, reg_set->ctrl, 0); + DSS_REG_W(io, reg_set->reset, reg & ~reg_set->reset_bit); + if (!hdcp_1x_load_keys(input)) queue_delayed_work(hdcp_ctrl->init_data.workq, - &hdcp_ctrl->hdcp_auth_work, HZ/2); + &hdcp_ctrl->hdcp_auth_work, HZ); else queue_work(hdcp_ctrl->init_data.workq, &hdcp_ctrl->hdcp_int_work); @@ -1485,6 +1494,7 @@ void hdcp_1x_off(void *input) struct hdcp_reg_set *reg_set; struct hdcp_int_set *isr; int rc = 0; + u32 reg; if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) { DEV_ERR("%s: invalid input\n", __func__); @@ -1501,6 +1511,9 @@ void hdcp_1x_off(void *input) return; } + if (hdcp_ctrl->hdcp_state == HDCP_STATE_AUTHENTICATED) + hdcp1_set_enc(false); + /* * Disable HDCP interrupts. * Also, need to set the state to inactive here so that any ongoing @@ -1527,12 +1540,15 @@ void hdcp_1x_off(void *input) DEV_DBG("%s: %s: Deleted hdcp int work\n", __func__, HDCP_STATE_NAME); - if (reg_set->reset) - DSS_REG_W(io, reg_set->reset, BIT(0)); + + reg = DSS_REG_R(io, reg_set->reset); + DSS_REG_W(io, reg_set->reset, reg | reg_set->reset_bit); /* Disable encryption and disable the HDCP block */ DSS_REG_W(io, reg_set->ctrl, 0); + DSS_REG_W(io, reg_set->reset, reg & ~reg_set->reset_bit); + DEV_DBG("%s: %s: HDCP: Off\n", __func__, HDCP_STATE_NAME); } /* hdcp_1x_off */ diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h index a561fed80ce6..93f5f9a51a63 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.h +++ b/drivers/video/fbdev/msm/mdss_mdp.h @@ -235,9 +235,13 @@ enum mdss_mdp_csc_type { MDSS_MDP_CSC_YUV2RGB_601L, MDSS_MDP_CSC_YUV2RGB_601FR, MDSS_MDP_CSC_YUV2RGB_709L, + MDSS_MDP_CSC_YUV2RGB_2020L, + MDSS_MDP_CSC_YUV2RGB_2020FR, MDSS_MDP_CSC_RGB2YUV_601L, MDSS_MDP_CSC_RGB2YUV_601FR, MDSS_MDP_CSC_RGB2YUV_709L, + MDSS_MDP_CSC_RGB2YUV_2020L, + MDSS_MDP_CSC_RGB2YUV_2020FR, MDSS_MDP_CSC_YUV2YUV, MDSS_MDP_CSC_RGB2RGB, MDSS_MDP_MAX_CSC @@ -1408,6 +1412,10 @@ static inline uint8_t pp_vig_csc_pipe_val(struct mdss_mdp_pipe *pipe) return MDSS_MDP_CSC_YUV2RGB_601L; case MDP_CSC_ITU_R_601_FR: return MDSS_MDP_CSC_YUV2RGB_601FR; + case MDP_CSC_ITU_R_2020: + return MDSS_MDP_CSC_YUV2RGB_2020L; + case MDP_CSC_ITU_R_2020_FR: + return MDSS_MDP_CSC_YUV2RGB_2020FR; case MDP_CSC_ITU_R_709: default: return MDSS_MDP_CSC_YUV2RGB_709L; diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c index 47edc320233a..f79212ea740d 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_pp.c +++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c @@ -60,6 +60,30 @@ struct mdp_csc_cfg mdp_csc_8bit_convert[MDSS_MDP_MAX_CSC] = { { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,}, { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,}, }, + [MDSS_MDP_CSC_YUV2RGB_2020L] = { + 0, + { + 0x0256, 0x0000, 0x035e, + 0x0256, 0xffa0, 0xfeb2, + 0x0256, 0x044c, 0x0000, + }, + { 0xfff0, 0xff80, 0xff80,}, + { 0x0, 0x0, 0x0,}, + { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,}, + { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,}, + }, + [MDSS_MDP_CSC_YUV2RGB_2020FR] = { + 0, + { + 0x0200, 0x0000, 0x02f3, + 0x0200, 0xffac, 0xfedb, + 0x0200, 0x03c3, 0x0000, + }, + { 0x0000, 0xff80, 0xff80,}, + { 0x0, 0x0, 0x0,}, + { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,}, + { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,}, + }, [MDSS_MDP_CSC_RGB2YUV_601L] = { 0, { @@ -96,6 +120,30 @@ struct mdp_csc_cfg mdp_csc_8bit_convert[MDSS_MDP_MAX_CSC] = { { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,}, { 0x0010, 0x00eb, 0x0010, 0x00f0, 0x0010, 0x00f0,}, }, + [MDSS_MDP_CSC_RGB2YUV_2020L] = { + 0, + { + 0x0073, 0x0129, 0x001a, + 0xffc1, 0xff5e, 0x00e0, + 0x00e0, 0xff32, 0xffee + }, + { 0x0, 0x0, 0x0,}, + { 0x0010, 0x0080, 0x0080,}, + { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,}, + { 0x0010, 0x00eb, 0x0010, 0x00f0, 0x0010, 0x00f0,}, + }, + [MDSS_MDP_CSC_RGB2YUV_2020FR] = { + 0, + { + 0x0086, 0x015b, 0x001e, + 0xffb9, 0xff47, 0x0100, + 0x0100, 0xff15, 0xffeb + }, + { 0x0, 0x0, 0x0,}, + { 0x0, 0x0080, 0x0080,}, + { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,}, + { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,}, + }, [MDSS_MDP_CSC_YUV2YUV] = { 0, { @@ -159,6 +207,30 @@ struct mdp_csc_cfg mdp_csc_10bit_convert[MDSS_MDP_MAX_CSC] = { { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,}, { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, }, + [MDSS_MDP_CSC_YUV2RGB_2020L] = { + 0, + { + 0x0256, 0x0000, 0x035e, + 0x0256, 0xffa0, 0xfeb2, + 0x0256, 0x044c, 0x0000, + }, + { 0xffc0, 0xfe00, 0xfe00,}, + { 0x0, 0x0, 0x0,}, + { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + }, + [MDSS_MDP_CSC_YUV2RGB_2020FR] = { + 0, + { + 0x0200, 0x0000, 0x02f3, + 0x0200, 0xffac, 0xfedb, + 0x0200, 0x03c3, 0x0000, + }, + { 0x0000, 0xfe00, 0xfe00,}, + { 0x0, 0x0, 0x0,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + }, [MDSS_MDP_CSC_RGB2YUV_601L] = { 0, { @@ -195,6 +267,30 @@ struct mdp_csc_cfg mdp_csc_10bit_convert[MDSS_MDP_MAX_CSC] = { { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,}, }, + [MDSS_MDP_CSC_RGB2YUV_2020L] = { + 0, + { + 0x0073, 0x0129, 0x001a, + 0xffc1, 0xff5e, 0x00e0, + 0x00e0, 0xff32, 0xffee + }, + { 0x0, 0x0, 0x0,}, + { 0x0040, 0x0200, 0x0200,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,}, + }, + [MDSS_MDP_CSC_RGB2YUV_2020FR] = { + 0, + { + 0x0086, 0x015b, 0x001e, + 0xffb9, 0xff47, 0x0100, + 0x0100, 0xff15, 0xffeb + }, + { 0x0, 0x0, 0x0,}, + { 0x0, 0x0200, 0x0200,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + }, [MDSS_MDP_CSC_YUV2YUV] = { 0, { diff --git a/drivers/video/fbdev/msm/msm_ext_display.c b/drivers/video/fbdev/msm/msm_ext_display.c index f6c6548bdaa5..ca01ee6345d2 100644 --- a/drivers/video/fbdev/msm/msm_ext_display.c +++ b/drivers/video/fbdev/msm/msm_ext_display.c @@ -38,15 +38,18 @@ struct msm_ext_disp { struct switch_dev hdmi_sdev; struct switch_dev audio_sdev; bool ack_enabled; - atomic_t ack_pending; + bool audio_session_on; struct list_head display_list; struct mutex lock; + struct completion hpd_comp; }; static int msm_ext_disp_get_intf_data(struct msm_ext_disp *ext_disp, enum msm_ext_disp_type type, struct msm_ext_disp_init_data **data); static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack); +static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp, + enum msm_ext_disp_cable_state state); static int msm_ext_disp_switch_dev_register(struct msm_ext_disp *ext_disp) { @@ -313,14 +316,14 @@ static void msm_ext_disp_remove_intf_data(struct msm_ext_disp *ext_disp, } } -static void msm_ext_disp_send_cable_notification(struct msm_ext_disp *ext_disp, +static int msm_ext_disp_send_cable_notification(struct msm_ext_disp *ext_disp, enum msm_ext_disp_cable_state new_state) { int state = EXT_DISPLAY_CABLE_STATE_MAX; if (!ext_disp) { pr_err("Invalid params\n"); - return; + return -EINVAL; } state = ext_disp->hdmi_sdev.state; @@ -330,6 +333,77 @@ static void msm_ext_disp_send_cable_notification(struct msm_ext_disp *ext_disp, ext_disp->hdmi_sdev.state == state ? "is same" : "switched to", ext_disp->hdmi_sdev.state); + + return ext_disp->hdmi_sdev.state == state ? 0 : 1; +} + +static int msm_ext_disp_send_audio_notification(struct msm_ext_disp *ext_disp, + enum msm_ext_disp_cable_state new_state) +{ + int state = EXT_DISPLAY_CABLE_STATE_MAX; + + if (!ext_disp) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + state = ext_disp->audio_sdev.state; + switch_set_state(&ext_disp->audio_sdev, !!new_state); + + pr_debug("Audio state %s %d\n", + ext_disp->audio_sdev.state == state ? + "is same" : "switched to", + ext_disp->audio_sdev.state); + + return ext_disp->audio_sdev.state == state ? 0 : 1; +} + +static int msm_ext_disp_process_display(struct msm_ext_disp *ext_disp, + enum msm_ext_disp_cable_state state) +{ + int ret = msm_ext_disp_send_cable_notification(ext_disp, state); + + /* positive ret value means audio node was switched */ + if (IS_ERR_VALUE(ret) || !ret) { + pr_debug("not waiting for display\n"); + goto end; + } + + reinit_completion(&ext_disp->hpd_comp); + ret = wait_for_completion_timeout(&ext_disp->hpd_comp, HZ * 2); + if (!ret) { + pr_err("display timeout\n"); + ret = -EINVAL; + goto end; + } + + ret = 0; +end: + return ret; +} + +static int msm_ext_disp_process_audio(struct msm_ext_disp *ext_disp, + enum msm_ext_disp_cable_state state) +{ + int ret = msm_ext_disp_send_audio_notification(ext_disp, state); + + /* positive ret value means audio node was switched */ + if (IS_ERR_VALUE(ret) || !ret || !ext_disp->ack_enabled) { + pr_debug("not waiting for audio\n"); + goto end; + } + + reinit_completion(&ext_disp->hpd_comp); + ret = wait_for_completion_timeout(&ext_disp->hpd_comp, HZ * 2); + if (!ret) { + pr_err("audio timeout\n"); + ret = -EINVAL; + goto end; + } + + ret = 0; +end: + return ret; } static int msm_ext_disp_hpd(struct platform_device *pdev, @@ -337,7 +411,6 @@ static int msm_ext_disp_hpd(struct platform_device *pdev, enum msm_ext_disp_cable_state state) { int ret = 0; - struct msm_ext_disp_init_data *data = NULL; struct msm_ext_disp *ext_disp = NULL; if (!pdev) { @@ -379,27 +452,28 @@ static int msm_ext_disp_hpd(struct platform_device *pdev, goto end; } - ret = msm_ext_disp_get_intf_data(ext_disp, type, &data); - if (ret) - goto end; - if (state == EXT_DISPLAY_CABLE_CONNECT) { - ext_disp->current_disp = data->type; - } else if ((state == EXT_DISPLAY_CABLE_DISCONNECT) && - !ext_disp->ack_enabled) { - if (ext_disp->ops) { - ext_disp->ops->audio_info_setup = NULL; - ext_disp->ops->get_audio_edid_blk = NULL; - ext_disp->ops->cable_status = NULL; - ext_disp->ops->get_intf_id = NULL; - ext_disp->ops->teardown_done = NULL; - } + ext_disp->current_disp = type; + + ret = msm_ext_disp_process_display(ext_disp, state); + if (ret) + goto end; + + msm_ext_disp_update_audio_ops(ext_disp, state); + if (ret) + goto end; + + ret = msm_ext_disp_process_audio(ext_disp, state); + if (ret) + goto end; + } else { + msm_ext_disp_process_audio(ext_disp, state); + msm_ext_disp_update_audio_ops(ext_disp, state); + msm_ext_disp_process_display(ext_disp, state); ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX; } - msm_ext_disp_send_cable_notification(ext_disp, state); - pr_debug("Hpd (%d) for display (%s)\n", state, msm_ext_disp_name(type)); @@ -427,23 +501,18 @@ static int msm_ext_disp_get_intf_data_helper(struct platform_device *pdev, goto end; } - mutex_lock(&ext_disp->lock); - if (ext_disp->current_disp == EXT_DISPLAY_TYPE_MAX) { ret = -EINVAL; pr_err("No display connected\n"); - goto error; + goto end; } ret = msm_ext_disp_get_intf_data(ext_disp, ext_disp->current_disp, data); - if (ret) - goto error; -error: - mutex_unlock(&ext_disp->lock); end: return ret; } + static int msm_ext_disp_cable_status(struct platform_device *pdev, u32 vote) { int ret = 0; @@ -480,11 +549,21 @@ static int msm_ext_disp_audio_info_setup(struct platform_device *pdev, { int ret = 0; struct msm_ext_disp_init_data *data = NULL; + struct msm_ext_disp *ext_disp = NULL; ret = msm_ext_disp_get_intf_data_helper(pdev, &data); if (ret || !data) goto end; + ext_disp = platform_get_drvdata(pdev); + if (!ext_disp) { + pr_err("No drvdata found\n"); + ret = -EINVAL; + goto end; + } + + ext_disp->audio_session_on = true; + ret = data->codec_ops.audio_info_setup(data->pdev, params); end: @@ -495,6 +574,7 @@ static void msm_ext_disp_teardown_done(struct platform_device *pdev) { int ret = 0; struct msm_ext_disp_init_data *data = NULL; + struct msm_ext_disp *ext_disp = NULL; ret = msm_ext_disp_get_intf_data_helper(pdev, &data); if (ret || !data) { @@ -502,7 +582,21 @@ static void msm_ext_disp_teardown_done(struct platform_device *pdev) return; } - data->codec_ops.teardown_done(data->pdev); + ext_disp = platform_get_drvdata(pdev); + if (!ext_disp) { + pr_err("No drvdata found\n"); + return; + } + + if (data->codec_ops.teardown_done) + data->codec_ops.teardown_done(data->pdev); + + ext_disp->audio_session_on = false; + + pr_debug("%s tearing down audio\n", + msm_ext_disp_name(ext_disp->current_disp)); + + complete_all(&ext_disp->hpd_comp); } static int msm_ext_disp_get_intf_id(struct platform_device *pdev) @@ -523,93 +617,78 @@ static int msm_ext_disp_get_intf_id(struct platform_device *pdev) goto end; } - mutex_lock(&ext_disp->lock); ret = ext_disp->current_disp; - mutex_unlock(&ext_disp->lock); end: return ret; } +static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp, + enum msm_ext_disp_cable_state state) +{ + int ret = 0; + struct msm_ext_disp_audio_codec_ops *ops = ext_disp->ops; + + if (!ops) { + pr_err("Invalid audio ops\n"); + ret = -EINVAL; + goto end; + } + + if (state == EXT_DISPLAY_CABLE_CONNECT) { + ops->audio_info_setup = msm_ext_disp_audio_info_setup; + ops->get_audio_edid_blk = msm_ext_disp_get_audio_edid_blk; + ops->cable_status = msm_ext_disp_cable_status; + ops->get_intf_id = msm_ext_disp_get_intf_id; + ops->teardown_done = msm_ext_disp_teardown_done; + } else { + ops->audio_info_setup = NULL; + ops->get_audio_edid_blk = NULL; + ops->cable_status = NULL; + ops->get_intf_id = NULL; + ops->teardown_done = NULL; + } +end: + return ret; +} + static int msm_ext_disp_notify(struct platform_device *pdev, - enum msm_ext_disp_cable_state new_state) + enum msm_ext_disp_cable_state state) { int ret = 0; - int state = 0; - bool switched; - struct msm_ext_disp_init_data *data = NULL; struct msm_ext_disp *ext_disp = NULL; if (!pdev) { pr_err("Invalid platform device\n"); - return -EINVAL; + ret = -EINVAL; + goto end; } ext_disp = platform_get_drvdata(pdev); if (!ext_disp) { pr_err("Invalid drvdata\n"); - return -EINVAL; - } - - mutex_lock(&ext_disp->lock); - - if (state < EXT_DISPLAY_CABLE_DISCONNECT || - state >= EXT_DISPLAY_CABLE_STATE_MAX) { - pr_err("Invalid state (%d)\n", state); ret = -EINVAL; goto end; } - state = ext_disp->audio_sdev.state; - if (state == new_state) - goto end; - - if (ext_disp->ack_enabled && - atomic_read(&ext_disp->ack_pending)) { + if (state < EXT_DISPLAY_CABLE_DISCONNECT || + state >= EXT_DISPLAY_CABLE_STATE_MAX) { + pr_err("Invalid state (%d)\n", state); ret = -EINVAL; - pr_err("%s ack pending, not notifying %s\n", - state ? "connect" : "disconnect", - new_state ? "connect" : "disconnect"); goto end; } - ret = msm_ext_disp_get_intf_data(ext_disp, ext_disp->current_disp, - &data); - if (ret) - goto end; - - if (new_state == EXT_DISPLAY_CABLE_CONNECT && ext_disp->ops) { - ext_disp->ops->audio_info_setup = - msm_ext_disp_audio_info_setup; - ext_disp->ops->get_audio_edid_blk = - msm_ext_disp_get_audio_edid_blk; - ext_disp->ops->cable_status = - msm_ext_disp_cable_status; - ext_disp->ops->get_intf_id = - msm_ext_disp_get_intf_id; - ext_disp->ops->teardown_done = - msm_ext_disp_teardown_done; - } - - switch_set_state(&ext_disp->audio_sdev, (int)new_state); - switched = ext_disp->audio_sdev.state != state; - - if (ext_disp->ack_enabled && switched) - atomic_set(&ext_disp->ack_pending, 1); - - pr_debug("audio %s %s\n", switched ? "switched to" : "same as", - ext_disp->audio_sdev.state ? "HDMI" : "SPKR"); + pr_debug("%s notifying hpd (%d)\n", + msm_ext_disp_name(ext_disp->current_disp), state); + complete_all(&ext_disp->hpd_comp); end: - mutex_unlock(&ext_disp->lock); - return ret; } static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack) { u32 ack_hpd; - u32 hpd; int ret = 0; struct msm_ext_disp *ext_disp = NULL; @@ -624,10 +703,6 @@ static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack) return -EINVAL; } - mutex_lock(&ext_disp->lock); - - hpd = ext_disp->current_disp != EXT_DISPLAY_TYPE_MAX; - if (ack & AUDIO_ACK_SET_ENABLE) { ext_disp->ack_enabled = ack & AUDIO_ACK_ENABLE ? true : false; @@ -640,44 +715,14 @@ static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack) if (!ext_disp->ack_enabled) goto end; - atomic_set(&ext_disp->ack_pending, 0); - ack_hpd = ack & AUDIO_ACK_CONNECT; - pr_debug("acknowledging %s\n", - ack_hpd ? "connect" : "disconnect"); - - /** - * If the ack feature is enabled and we receive an ack for - * disconnect then we reset the current display state to - * empty. - */ - if (!ack_hpd) { - if (ext_disp->ops) { - ext_disp->ops->audio_info_setup = NULL; - ext_disp->ops->get_audio_edid_blk = NULL; - ext_disp->ops->cable_status = NULL; - ext_disp->ops->get_intf_id = NULL; - ext_disp->ops->teardown_done = NULL; - } - - ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX; - } - - if (ack_hpd != hpd) { - pr_err("unbalanced audio state, ack %d, hpd %d\n", - ack_hpd, hpd); - - mutex_unlock(&ext_disp->lock); - - ret = msm_ext_disp_notify(pdev, hpd); - - return ret; - } + pr_debug("%s acknowledging audio (%d)\n", + msm_ext_disp_name(ext_disp->current_disp), ack_hpd); + if (!ext_disp->audio_session_on) + complete_all(&ext_disp->hpd_comp); end: - mutex_unlock(&ext_disp->lock); - return ret; } @@ -850,6 +895,7 @@ static int msm_ext_disp_probe(struct platform_device *pdev) mutex_init(&ext_disp->lock); INIT_LIST_HEAD(&ext_disp->display_list); + init_completion(&ext_disp->hpd_comp); ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX; return ret; diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 7d3e5d0e9aa4..8ab6238c9299 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -73,7 +73,7 @@ struct virtio_balloon { /* The array of pfns we tell the Host about. */ unsigned int num_pfns; - u32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; + __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; /* Memory statistics */ int need_stats_update; @@ -125,14 +125,16 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) wait_event(vb->acked, virtqueue_get_buf(vq, &len)); } -static void set_page_pfns(u32 pfns[], struct page *page) +static void set_page_pfns(struct virtio_balloon *vb, + __virtio32 pfns[], struct page *page) { unsigned int i; /* Set balloon pfns pointing at this page. * Note that the first pfn points at start of the page. */ for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++) - pfns[i] = page_to_balloon_pfn(page) + i; + pfns[i] = cpu_to_virtio32(vb->vdev, + page_to_balloon_pfn(page) + i); } static void fill_balloon(struct virtio_balloon *vb, size_t num) @@ -155,7 +157,7 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num) msleep(200); break; } - set_page_pfns(vb->pfns + vb->num_pfns, page); + set_page_pfns(vb, vb->pfns + vb->num_pfns, page); vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) @@ -171,10 +173,12 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num) static void release_pages_balloon(struct virtio_balloon *vb) { unsigned int i; + struct page *page; /* Find pfns pointing at start of each page, get pages and free them. */ for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { - struct page *page = balloon_pfn_to_page(vb->pfns[i]); + page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev, + vb->pfns[i])); if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) adjust_managed_page_count(page, 1); @@ -197,7 +201,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) page = balloon_page_dequeue(vb_dev_info); if (!page) break; - set_page_pfns(vb->pfns + vb->num_pfns, page); + set_page_pfns(vb, vb->pfns + vb->num_pfns, page); vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; } @@ -465,13 +469,13 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, __count_vm_event(BALLOON_MIGRATE); spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; - set_page_pfns(vb->pfns, newpage); + set_page_pfns(vb, vb->pfns, newpage); tell_host(vb, vb->inflate_vq); /* balloon's page migration 2nd step -- deflate "page" */ balloon_page_delete(page); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; - set_page_pfns(vb->pfns, page); + set_page_pfns(vb, vb->pfns, page); tell_host(vb, vb->deflate_vq); mutex_unlock(&vb->balloon_lock); diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 364bc44610c1..cfab1d24e4bc 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -152,8 +152,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); static void balloon_process(struct work_struct *work); static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); -static void release_memory_resource(struct resource *resource); - /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ @@ -249,6 +247,19 @@ static enum bp_state update_schedule(enum bp_state state) } #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG +static void release_memory_resource(struct resource *resource) +{ + if (!resource) + return; + + /* + * No need to reset region to identity mapped since we now + * know that no I/O can be in this region + */ + release_resource(resource); + kfree(resource); +} + static struct resource *additional_memory_resource(phys_addr_t size) { struct resource *res; @@ -287,19 +298,6 @@ static struct resource *additional_memory_resource(phys_addr_t size) return res; } -static void release_memory_resource(struct resource *resource) -{ - if (!resource) - return; - - /* - * No need to reset region to identity mapped since we now - * know that no I/O can be in this region - */ - release_resource(resource); - kfree(resource); -} - static enum bp_state reserve_additional_memory(void) { long credit; diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 44367783f07a..83ec7b89d308 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -487,7 +487,8 @@ static void eoi_pirq(struct irq_data *data) if (!VALID_EVTCHN(evtchn)) return; - if (unlikely(irqd_is_setaffinity_pending(data))) { + if (unlikely(irqd_is_setaffinity_pending(data)) && + likely(!irqd_irq_disabled(data))) { int masked = test_and_set_mask(evtchn); clear_evtchn(evtchn); @@ -1370,7 +1371,8 @@ static void ack_dynirq(struct irq_data *data) if (!VALID_EVTCHN(evtchn)) return; - if (unlikely(irqd_is_setaffinity_pending(data))) { + if (unlikely(irqd_is_setaffinity_pending(data)) && + likely(!irqd_irq_disabled(data))) { int masked = test_and_set_mask(evtchn); clear_evtchn(evtchn); diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index 70fa438000af..611f9c11da85 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c @@ -423,36 +423,7 @@ upload: return 0; } -static int __init check_prereq(void) -{ - struct cpuinfo_x86 *c = &cpu_data(0); - - if (!xen_initial_domain()) - return -ENODEV; - - if (!acpi_gbl_FADT.smi_command) - return -ENODEV; - - if (c->x86_vendor == X86_VENDOR_INTEL) { - if (!cpu_has(c, X86_FEATURE_EST)) - return -ENODEV; - return 0; - } - if (c->x86_vendor == X86_VENDOR_AMD) { - /* Copied from powernow-k8.h, can't include ../cpufreq/powernow - * as we get compile warnings for the static functions. - */ -#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 -#define USE_HW_PSTATE 0x00000080 - u32 eax, ebx, ecx, edx; - cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); - if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE) - return -ENODEV; - return 0; - } - return -ENODEV; -} /* acpi_perf_data is a pointer to percpu data. */ static struct acpi_processor_performance __percpu *acpi_perf_data; @@ -509,10 +480,10 @@ struct notifier_block xen_acpi_processor_resume_nb = { static int __init xen_acpi_processor_init(void) { unsigned int i; - int rc = check_prereq(); + int rc; - if (rc) - return rc; + if (!xen_initial_domain()) + return -ENODEV; nr_acpi_bits = get_max_acpi_id() + 1; acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL); diff --git a/fs/affs/super.c b/fs/affs/super.c index 5b50c4ca43a7..f90c535703ce 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -528,7 +528,7 @@ affs_remount(struct super_block *sb, int *flags, char *data) char *prefix = NULL; new_opts = kstrdup(data, GFP_KERNEL); - if (!new_opts) + if (data && !new_opts) return -ENOMEM; pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data); @@ -546,7 +546,8 @@ affs_remount(struct super_block *sb, int *flags, char *data) } flush_delayed_work(&sbi->sb_work); - replace_mount_options(sb, new_opts); + if (new_opts) + replace_mount_options(sb, new_opts); sbi->s_flags = mount_flags; sbi->s_mode = mode; diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 5b8e235c4b6d..0f2b7c622ce3 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1551,6 +1551,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, trans->transid, root->fs_info->generation); if (!should_cow_block(trans, root, buf)) { + trans->dirty = true; *cow_ret = buf; return 0; } @@ -2773,8 +2774,10 @@ again: * then we don't want to set the path blocking, * so we test it here */ - if (!should_cow_block(trans, root, b)) + if (!should_cow_block(trans, root, b)) { + trans->dirty = true; goto cow_done; + } /* * must have write locks on this node and the diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 2368cac1115a..47cdc6f3390b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7856,7 +7856,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, set_extent_dirty(&trans->transaction->dirty_pages, buf->start, buf->start + buf->len - 1, GFP_NOFS); } - trans->blocks_used++; + trans->dirty = true; /* this returns a buffer locked for blocking */ return buf; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f07d01bc4875..bfcd87ee8ff5 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1648,7 +1648,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file, src_inode = file_inode(src.file); if (src_inode->i_sb != file_inode(file)->i_sb) { - btrfs_info(BTRFS_I(src_inode)->root->fs_info, + btrfs_info(BTRFS_I(file_inode(file))->root->fs_info, "Snapshot src from another FS"); ret = -EXDEV; } else if (!inode_owner_or_capable(src_inode)) { diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index fe609b81dd1b..5d34a062ca4f 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -239,7 +239,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, trans->aborted = errno; /* Nothing used. The other threads that have joined this * transaction may be able to continue. */ - if (!trans->blocks_used && list_empty(&trans->new_bgs)) { + if (!trans->dirty && list_empty(&trans->new_bgs)) { const char *errstr; errstr = btrfs_decode_error(errno); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 64c8221b6165..1e872923ec2c 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -110,7 +110,6 @@ struct btrfs_trans_handle { u64 chunk_bytes_reserved; unsigned long use_count; unsigned long blocks_reserved; - unsigned long blocks_used; unsigned long delayed_ref_updates; struct btrfs_transaction *transaction; struct btrfs_block_rsv *block_rsv; @@ -121,6 +120,7 @@ struct btrfs_trans_handle { bool can_flush_pending_bgs; bool reloc_reserved; bool sync; + bool dirty; unsigned int type; /* * this root is only needed to validate that the root passed to diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 5a53ac6b1e02..02b071bf3732 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -101,6 +101,12 @@ convert_sfm_char(const __u16 src_char, char *target) case SFM_SLASH: *target = '\\'; break; + case SFM_SPACE: + *target = ' '; + break; + case SFM_PERIOD: + *target = '.'; + break; default: return false; } @@ -404,7 +410,7 @@ static __le16 convert_to_sfu_char(char src_char) return dest_char; } -static __le16 convert_to_sfm_char(char src_char) +static __le16 convert_to_sfm_char(char src_char, bool end_of_string) { __le16 dest_char; @@ -427,6 +433,18 @@ static __le16 convert_to_sfm_char(char src_char) case '|': dest_char = cpu_to_le16(SFM_PIPE); break; + case '.': + if (end_of_string) + dest_char = cpu_to_le16(SFM_PERIOD); + else + dest_char = 0; + break; + case ' ': + if (end_of_string) + dest_char = cpu_to_le16(SFM_SPACE); + else + dest_char = 0; + break; default: dest_char = 0; } @@ -469,9 +487,16 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, /* see if we must remap this char */ if (map_chars == SFU_MAP_UNI_RSVD) dst_char = convert_to_sfu_char(src_char); - else if (map_chars == SFM_MAP_UNI_RSVD) - dst_char = convert_to_sfm_char(src_char); - else + else if (map_chars == SFM_MAP_UNI_RSVD) { + bool end_of_string; + + if (i == srclen - 1) + end_of_string = true; + else + end_of_string = false; + + dst_char = convert_to_sfm_char(src_char, end_of_string); + } else dst_char = 0; /* * FIXME: We can not handle remapping backslash (UNI_SLASH) diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h index bdc52cb9a676..479bc0a941f3 100644 --- a/fs/cifs/cifs_unicode.h +++ b/fs/cifs/cifs_unicode.h @@ -64,6 +64,8 @@ #define SFM_LESSTHAN ((__u16) 0xF023) #define SFM_PIPE ((__u16) 0xF027) #define SFM_SLASH ((__u16) 0xF026) +#define SFM_PERIOD ((__u16) 0xF028) +#define SFM_SPACE ((__u16) 0xF029) /* * Mapping mechanism to use when one of the seven reserved characters is diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 3c194ff0d2f0..5481a6eb9a95 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -425,7 +425,9 @@ cifs_echo_request(struct work_struct *work) * server->ops->need_neg() == true. Also, no need to ping if * we got a response recently. */ - if (!server->ops->need_neg || server->ops->need_neg(server) || + + if (server->tcpStatus == CifsNeedReconnect || + server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew || (server->ops->can_echo && !server->ops->can_echo(server)) || time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) goto requeue_echo; diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h index 848249fa120f..3079b38f0afb 100644 --- a/fs/cifs/ntlmssp.h +++ b/fs/cifs/ntlmssp.h @@ -133,6 +133,6 @@ typedef struct _AUTHENTICATE_MESSAGE { int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses); void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses); -int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen, +int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen, struct cifs_ses *ses, const struct nls_table *nls_cp); diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 59727e32ed0f..e88ffe1da045 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -364,19 +364,43 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, sec_blob->DomainName.MaximumLength = 0; } -/* We do not malloc the blob, it is passed in pbuffer, because its - maximum possible size is fixed and small, making this approach cleaner. - This function returns the length of the data in the blob */ -int build_ntlmssp_auth_blob(unsigned char *pbuffer, +static int size_of_ntlmssp_blob(struct cifs_ses *ses) +{ + int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len + - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2; + + if (ses->domainName) + sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN); + else + sz += 2; + + if (ses->user_name) + sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN); + else + sz += 2; + + return sz; +} + +int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen, struct cifs_ses *ses, const struct nls_table *nls_cp) { int rc; - AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer; + AUTHENTICATE_MESSAGE *sec_blob; __u32 flags; unsigned char *tmp; + rc = setup_ntlmv2_rsp(ses, nls_cp); + if (rc) { + cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc); + *buflen = 0; + goto setup_ntlmv2_ret; + } + *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL); + sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer; + memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); sec_blob->MessageType = NtLmAuthenticate; @@ -391,7 +415,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer, flags |= NTLMSSP_NEGOTIATE_KEY_XCH; } - tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE); + tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE); sec_blob->NegotiateFlags = cpu_to_le32(flags); sec_blob->LmChallengeResponse.BufferOffset = @@ -399,23 +423,27 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer, sec_blob->LmChallengeResponse.Length = 0; sec_blob->LmChallengeResponse.MaximumLength = 0; - sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer); - rc = setup_ntlmv2_rsp(ses, nls_cp); - if (rc) { - cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc); - goto setup_ntlmv2_ret; + sec_blob->NtChallengeResponse.BufferOffset = + cpu_to_le32(tmp - *pbuffer); + if (ses->user_name != NULL) { + memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE, + ses->auth_key.len - CIFS_SESS_KEY_SIZE); + tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE; + + sec_blob->NtChallengeResponse.Length = + cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); + sec_blob->NtChallengeResponse.MaximumLength = + cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); + } else { + /* + * don't send an NT Response for anonymous access + */ + sec_blob->NtChallengeResponse.Length = 0; + sec_blob->NtChallengeResponse.MaximumLength = 0; } - memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE, - ses->auth_key.len - CIFS_SESS_KEY_SIZE); - tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE; - - sec_blob->NtChallengeResponse.Length = - cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); - sec_blob->NtChallengeResponse.MaximumLength = - cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); if (ses->domainName == NULL) { - sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); + sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer); sec_blob->DomainName.Length = 0; sec_blob->DomainName.MaximumLength = 0; tmp += 2; @@ -424,14 +452,14 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer, len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName, CIFS_MAX_USERNAME_LEN, nls_cp); len *= 2; /* unicode is 2 bytes each */ - sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); + sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer); sec_blob->DomainName.Length = cpu_to_le16(len); sec_blob->DomainName.MaximumLength = cpu_to_le16(len); tmp += len; } if (ses->user_name == NULL) { - sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); + sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer); sec_blob->UserName.Length = 0; sec_blob->UserName.MaximumLength = 0; tmp += 2; @@ -440,13 +468,13 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer, len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name, CIFS_MAX_USERNAME_LEN, nls_cp); len *= 2; /* unicode is 2 bytes each */ - sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); + sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer); sec_blob->UserName.Length = cpu_to_le16(len); sec_blob->UserName.MaximumLength = cpu_to_le16(len); tmp += len; } - sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - pbuffer); + sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer); sec_blob->WorkstationName.Length = 0; sec_blob->WorkstationName.MaximumLength = 0; tmp += 2; @@ -455,19 +483,19 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer, (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) && !calc_seckey(ses)) { memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE); - sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer); + sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer); sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE); sec_blob->SessionKey.MaximumLength = cpu_to_le16(CIFS_CPHTXT_SIZE); tmp += CIFS_CPHTXT_SIZE; } else { - sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer); + sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer); sec_blob->SessionKey.Length = 0; sec_blob->SessionKey.MaximumLength = 0; } + *buflen = tmp - *pbuffer; setup_ntlmv2_ret: - *buflen = tmp - pbuffer; return rc; } @@ -670,20 +698,24 @@ sess_auth_lanman(struct sess_data *sess_data) pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; - /* no capabilities flags in old lanman negotiation */ - pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); - - /* Calculate hash with password and copy into bcc_ptr. - * Encryption Key (stored as in cryptkey) gets used if the - * security mode bit in Negottiate Protocol response states - * to use challenge/response method (i.e. Password bit is 1). - */ - rc = calc_lanman_hash(ses->password, ses->server->cryptkey, - ses->server->sec_mode & SECMODE_PW_ENCRYPT ? - true : false, lnm_session_key); - - memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE); - bcc_ptr += CIFS_AUTH_RESP_SIZE; + if (ses->user_name != NULL) { + /* no capabilities flags in old lanman negotiation */ + pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); + + /* Calculate hash with password and copy into bcc_ptr. + * Encryption Key (stored as in cryptkey) gets used if the + * security mode bit in Negottiate Protocol response states + * to use challenge/response method (i.e. Password bit is 1). + */ + rc = calc_lanman_hash(ses->password, ses->server->cryptkey, + ses->server->sec_mode & SECMODE_PW_ENCRYPT ? + true : false, lnm_session_key); + + memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE); + bcc_ptr += CIFS_AUTH_RESP_SIZE; + } else { + pSMB->old_req.PasswordLength = 0; + } /* * can not sign if LANMAN negotiated so no need @@ -769,27 +801,32 @@ sess_auth_ntlm(struct sess_data *sess_data) capabilities = cifs_ssetup_hdr(ses, pSMB); pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); - pSMB->req_no_secext.CaseInsensitivePasswordLength = - cpu_to_le16(CIFS_AUTH_RESP_SIZE); - pSMB->req_no_secext.CaseSensitivePasswordLength = - cpu_to_le16(CIFS_AUTH_RESP_SIZE); + if (ses->user_name != NULL) { + pSMB->req_no_secext.CaseInsensitivePasswordLength = + cpu_to_le16(CIFS_AUTH_RESP_SIZE); + pSMB->req_no_secext.CaseSensitivePasswordLength = + cpu_to_le16(CIFS_AUTH_RESP_SIZE); + + /* calculate ntlm response and session key */ + rc = setup_ntlm_response(ses, sess_data->nls_cp); + if (rc) { + cifs_dbg(VFS, "Error %d during NTLM authentication\n", + rc); + goto out; + } - /* calculate ntlm response and session key */ - rc = setup_ntlm_response(ses, sess_data->nls_cp); - if (rc) { - cifs_dbg(VFS, "Error %d during NTLM authentication\n", - rc); - goto out; + /* copy ntlm response */ + memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, + CIFS_AUTH_RESP_SIZE); + bcc_ptr += CIFS_AUTH_RESP_SIZE; + memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, + CIFS_AUTH_RESP_SIZE); + bcc_ptr += CIFS_AUTH_RESP_SIZE; + } else { + pSMB->req_no_secext.CaseInsensitivePasswordLength = 0; + pSMB->req_no_secext.CaseSensitivePasswordLength = 0; } - /* copy ntlm response */ - memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, - CIFS_AUTH_RESP_SIZE); - bcc_ptr += CIFS_AUTH_RESP_SIZE; - memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, - CIFS_AUTH_RESP_SIZE); - bcc_ptr += CIFS_AUTH_RESP_SIZE; - if (ses->capabilities & CAP_UNICODE) { /* unicode strings must be word aligned */ if (sess_data->iov[0].iov_len % 2) { @@ -878,22 +915,26 @@ sess_auth_ntlmv2(struct sess_data *sess_data) /* LM2 password would be here if we supported it */ pSMB->req_no_secext.CaseInsensitivePasswordLength = 0; - /* calculate nlmv2 response and session key */ - rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp); - if (rc) { - cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc); - goto out; - } + if (ses->user_name != NULL) { + /* calculate nlmv2 response and session key */ + rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp); + if (rc) { + cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc); + goto out; + } - memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, - ses->auth_key.len - CIFS_SESS_KEY_SIZE); - bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE; + memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, + ses->auth_key.len - CIFS_SESS_KEY_SIZE); + bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE; - /* set case sensitive password length after tilen may get - * assigned, tilen is 0 otherwise. - */ - pSMB->req_no_secext.CaseSensitivePasswordLength = - cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); + /* set case sensitive password length after tilen may get + * assigned, tilen is 0 otherwise. + */ + pSMB->req_no_secext.CaseSensitivePasswordLength = + cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); + } else { + pSMB->req_no_secext.CaseSensitivePasswordLength = 0; + } if (ses->capabilities & CAP_UNICODE) { if (sess_data->iov[0].iov_len % 2) { @@ -1245,7 +1286,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data) struct cifs_ses *ses = sess_data->ses; __u16 bytes_remaining; char *bcc_ptr; - char *ntlmsspblob = NULL; + unsigned char *ntlmsspblob = NULL; u16 blob_len; cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n"); @@ -1258,19 +1299,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data) /* Build security blob before we assemble the request */ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; smb_buf = (struct smb_hdr *)pSMB; - /* - * 5 is an empirical value, large enough to hold - * authenticate message plus max 10 of av paris, - * domain, user, workstation names, flags, etc. - */ - ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE), - GFP_KERNEL); - if (!ntlmsspblob) { - rc = -ENOMEM; - goto out; - } - - rc = build_ntlmssp_auth_blob(ntlmsspblob, + rc = build_ntlmssp_auth_blob(&ntlmsspblob, &blob_len, ses, sess_data->nls_cp); if (rc) goto out_free_ntlmsspblob; diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h index bc0bb9c34f72..0ffa18094335 100644 --- a/fs/cifs/smb2glob.h +++ b/fs/cifs/smb2glob.h @@ -44,6 +44,7 @@ #define SMB2_OP_DELETE 7 #define SMB2_OP_HARDLINK 8 #define SMB2_OP_SET_EOF 9 +#define SMB2_OP_RMDIR 10 /* Used when constructing chained read requests. */ #define CHAINED_REQUEST 1 diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index 899bbc86f73e..4f0231e685a9 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -80,6 +80,10 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon, * SMB2_open() call. */ break; + case SMB2_OP_RMDIR: + tmprc = SMB2_rmdir(xid, tcon, fid.persistent_fid, + fid.volatile_fid); + break; case SMB2_OP_RENAME: tmprc = SMB2_rename(xid, tcon, fid.persistent_fid, fid.volatile_fid, (__le16 *)data); @@ -191,8 +195,8 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN, - CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE, - NULL, SMB2_OP_DELETE); + CREATE_NOT_FILE, + NULL, SMB2_OP_RMDIR); } int diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 373b5cd1c913..0b6dc1942bdc 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -591,7 +591,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, u16 blob_length = 0; struct key *spnego_key = NULL; char *security_blob = NULL; - char *ntlmssp_blob = NULL; + unsigned char *ntlmssp_blob = NULL; bool use_spnego = false; /* else use raw ntlmssp */ cifs_dbg(FYI, "Session Setup\n"); @@ -716,13 +716,7 @@ ssetup_ntlmssp_authenticate: iov[1].iov_len = blob_length; } else if (phase == NtLmAuthenticate) { req->hdr.SessionId = ses->Suid; - ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500, - GFP_KERNEL); - if (ntlmssp_blob == NULL) { - rc = -ENOMEM; - goto ssetup_exit; - } - rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses, + rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses, nls_cp); if (rc) { cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", @@ -1820,6 +1814,33 @@ SMB2_echo(struct TCP_Server_Info *server) cifs_dbg(FYI, "In echo request\n"); + if (server->tcpStatus == CifsNeedNegotiate) { + struct list_head *tmp, *tmp2; + struct cifs_ses *ses; + struct cifs_tcon *tcon; + + cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n"); + spin_lock(&cifs_tcp_ses_lock); + list_for_each(tmp, &server->smb_ses_list) { + ses = list_entry(tmp, struct cifs_ses, smb_ses_list); + list_for_each(tmp2, &ses->tcon_list) { + tcon = list_entry(tmp2, struct cifs_tcon, + tcon_list); + /* add check for persistent handle reconnect */ + if (tcon && tcon->need_reconnect) { + spin_unlock(&cifs_tcp_ses_lock); + rc = smb2_reconnect(SMB2_ECHO, tcon); + spin_lock(&cifs_tcp_ses_lock); + } + } + } + spin_unlock(&cifs_tcp_ses_lock); + } + + /* if no session, renegotiate failed above */ + if (server->tcpStatus == CifsNeedNegotiate) + return -EIO; + rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req); if (rc) return rc; @@ -2577,6 +2598,22 @@ SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon, } int +SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, + u64 persistent_fid, u64 volatile_fid) +{ + __u8 delete_pending = 1; + void *data; + unsigned int size; + + data = &delete_pending; + size = 1; /* sizeof __u8 */ + + return send_set_info(xid, tcon, persistent_fid, volatile_fid, + current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data, + &size); +} + +int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, __le16 *target_file) { diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 79dc650c18b2..9bc59f9c12fb 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -140,6 +140,8 @@ extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, extern int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, __le16 *target_file); +extern int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, + u64 persistent_fid, u64 volatile_fid); extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, __le16 *target_file); diff --git a/fs/dcache.c b/fs/dcache.c index 240935d77844..d04920036269 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1618,7 +1618,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) struct dentry *dentry = __d_alloc(parent->d_sb, name); if (!dentry) return NULL; - + dentry->d_flags |= DCACHE_RCUACCESS; spin_lock(&parent->d_lock); /* * don't need child lock because it is not subject @@ -2413,7 +2413,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) { BUG_ON(!d_unhashed(entry)); hlist_bl_lock(b); - entry->d_flags |= DCACHE_RCUACCESS; hlist_bl_add_head_rcu(&entry->d_hash, b); hlist_bl_unlock(b); } @@ -2632,6 +2631,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target, /* ... and switch them in the tree */ if (IS_ROOT(dentry)) { /* splicing a tree */ + dentry->d_flags |= DCACHE_RCUACCESS; dentry->d_parent = target->d_parent; target->d_parent = target; list_del_init(&target->d_child); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 4c999ce7e73a..3ab9c68b8bce 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1588,7 +1588,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, { int res = 0, eavail, timed_out = 0; unsigned long flags; - long slack = 0; + u64 slack = 0; wait_queue_t wait; ktime_t expires, *to = NULL; diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 53f2b98a69f3..5388207d2832 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -1143,25 +1143,20 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count); ext4_group_t block_group; int bit; - struct buffer_head *bitmap_bh; + struct buffer_head *bitmap_bh = NULL; struct inode *inode = NULL; - long err = -EIO; + int err = -EFSCORRUPTED; - /* Error cases - e2fsck has already cleaned up for us */ - if (ino > max_ino) { - ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino); - err = -EFSCORRUPTED; - goto error; - } + if (ino < EXT4_FIRST_INO(sb) || ino > max_ino) + goto bad_orphan; block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); bitmap_bh = ext4_read_inode_bitmap(sb, block_group); if (IS_ERR(bitmap_bh)) { - err = PTR_ERR(bitmap_bh); - ext4_warning(sb, "inode bitmap error %ld for orphan %lu", - ino, err); - goto error; + ext4_error(sb, "inode bitmap error %ld for orphan %lu", + ino, PTR_ERR(bitmap_bh)); + return (struct inode *) bitmap_bh; } /* Having the inode bit set should be a 100% indicator that this @@ -1172,15 +1167,21 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) goto bad_orphan; inode = ext4_iget(sb, ino); - if (IS_ERR(inode)) - goto iget_failed; + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + ext4_error(sb, "couldn't read orphan inode %lu (err %d)", + ino, err); + return inode; + } /* - * If the orphans has i_nlinks > 0 then it should be able to be - * truncated, otherwise it won't be removed from the orphan list - * during processing and an infinite loop will result. + * If the orphans has i_nlinks > 0 then it should be able to + * be truncated, otherwise it won't be removed from the orphan + * list during processing and an infinite loop will result. + * Similarly, it must not be a bad inode. */ - if (inode->i_nlink && !ext4_can_truncate(inode)) + if ((inode->i_nlink && !ext4_can_truncate(inode)) || + is_bad_inode(inode)) goto bad_orphan; if (NEXT_ORPHAN(inode) > max_ino) @@ -1188,29 +1189,25 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) brelse(bitmap_bh); return inode; -iget_failed: - err = PTR_ERR(inode); - inode = NULL; bad_orphan: - ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino); - printk(KERN_WARNING "ext4_test_bit(bit=%d, block=%llu) = %d\n", - bit, (unsigned long long)bitmap_bh->b_blocknr, - ext4_test_bit(bit, bitmap_bh->b_data)); - printk(KERN_WARNING "inode=%p\n", inode); + ext4_error(sb, "bad orphan inode %lu", ino); + if (bitmap_bh) + printk(KERN_ERR "ext4_test_bit(bit=%d, block=%llu) = %d\n", + bit, (unsigned long long)bitmap_bh->b_blocknr, + ext4_test_bit(bit, bitmap_bh->b_data)); if (inode) { - printk(KERN_WARNING "is_bad_inode(inode)=%d\n", + printk(KERN_ERR "is_bad_inode(inode)=%d\n", is_bad_inode(inode)); - printk(KERN_WARNING "NEXT_ORPHAN(inode)=%u\n", + printk(KERN_ERR "NEXT_ORPHAN(inode)=%u\n", NEXT_ORPHAN(inode)); - printk(KERN_WARNING "max_ino=%lu\n", max_ino); - printk(KERN_WARNING "i_nlink=%u\n", inode->i_nlink); + printk(KERN_ERR "max_ino=%lu\n", max_ino); + printk(KERN_ERR "i_nlink=%u\n", inode->i_nlink); /* Avoid freeing blocks if we got a bad deleted inode */ if (inode->i_nlink == 0) inode->i_blocks = 0; iput(inode); } brelse(bitmap_bh); -error: return ERR_PTR(err); } diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index a235f3c20433..2f53c3822daa 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -1259,6 +1259,7 @@ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) { int order = 1; + int bb_incr = 1 << (e4b->bd_blkbits - 1); void *bb; BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); @@ -1271,7 +1272,8 @@ static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) /* this block is part of buddy of order 'order' */ return order; } - bb += 1 << (e4b->bd_blkbits - order); + bb += bb_incr; + bb_incr >>= 1; order++; } return 0; @@ -2576,7 +2578,7 @@ int ext4_mb_init(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned i, j; - unsigned offset; + unsigned offset, offset_incr; unsigned max; int ret; @@ -2605,11 +2607,13 @@ int ext4_mb_init(struct super_block *sb) i = 1; offset = 0; + offset_incr = 1 << (sb->s_blocksize_bits - 1); max = sb->s_blocksize << 2; do { sbi->s_mb_offsets[i] = offset; sbi->s_mb_maxs[i] = max; - offset += 1 << (sb->s_blocksize_bits - i); + offset += offset_incr; + offset_incr = offset_incr >> 1; max = max >> 1; i++; } while (i <= sb->s_blocksize_bits + 1); diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 9bdbf98240a0..796ff0eafd3c 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -390,6 +390,7 @@ data_copy: *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0); if (*err < 0) break; + bh = bh->b_this_page; } if (!*err) *err = block_commit_write(pagep[0], from, from + replaced_size); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index c9aad3b8951f..d5a634ed6c93 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2824,7 +2824,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode) * list entries can cause panics at unmount time. */ mutex_lock(&sbi->s_orphan_lock); - list_del(&EXT4_I(inode)->i_orphan); + list_del_init(&EXT4_I(inode)->i_orphan); mutex_unlock(&sbi->s_orphan_lock); } } diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 1398674f0614..5f83efb824a3 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -14,6 +14,7 @@ #include <linux/poll.h> #include <linux/uio.h> #include <linux/miscdevice.h> +#include <linux/namei.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/slab.h> @@ -1941,6 +1942,10 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud, cs->move_pages = 0; err = copy_out_args(cs, &req->out, nbytes); + if (req->in.h.opcode == FUSE_CANONICAL_PATH) { + req->out.h.error = kern_path((char *)req->out.args[0].value, 0, + req->canonical_path); + } fuse_copy_finish(cs); fuse_setup_passthrough(fc, req); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 640f66719314..48fb86bc153d 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -267,6 +267,50 @@ invalid: goto out; } +/* + * Get the canonical path. Since we must translate to a path, this must be done + * in the context of the userspace daemon, however, the userspace daemon cannot + * look up paths on its own. Instead, we handle the lookup as a special case + * inside of the write request. + */ +static void fuse_dentry_canonical_path(const struct path *path, struct path *canonical_path) { + struct inode *inode = path->dentry->d_inode; + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_req *req; + int err; + char *path_name; + + req = fuse_get_req(fc, 1); + err = PTR_ERR(req); + if (IS_ERR(req)) + goto default_path; + + path_name = (char*)__get_free_page(GFP_KERNEL); + if (!path_name) { + fuse_put_request(fc, req); + goto default_path; + } + + req->in.h.opcode = FUSE_CANONICAL_PATH; + req->in.h.nodeid = get_node_id(inode); + req->in.numargs = 0; + req->out.numargs = 1; + req->out.args[0].size = PATH_MAX; + req->out.args[0].value = path_name; + req->canonical_path = canonical_path; + req->out.argvar = 1; + fuse_request_send(fc, req); + err = req->out.h.error; + fuse_put_request(fc, req); + free_page((unsigned long)path_name); + if (!err) + return; +default_path: + canonical_path->dentry = path->dentry; + canonical_path->mnt = path->mnt; + path_get(canonical_path); +} + static int invalid_nodeid(u64 nodeid) { return !nodeid || nodeid == FUSE_ROOT_ID; @@ -274,6 +318,7 @@ static int invalid_nodeid(u64 nodeid) const struct dentry_operations fuse_dentry_operations = { .d_revalidate = fuse_dentry_revalidate, + .d_canonical_path = fuse_dentry_canonical_path, }; int fuse_valid_type(int m) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 0cbeea6ee831..1cc0dce47a2f 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -377,6 +377,9 @@ struct fuse_req { /** Inode used in the request or NULL */ struct inode *inode; + /** Path used for completing d_canonical_path */ + struct path *canonical_path; + /** AIO control block */ struct fuse_io_priv *io; diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index a561591896bd..3713fd52b44b 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -15,6 +15,7 @@ #include <linux/sched.h> #include <linux/bitmap.h> #include <linux/slab.h> +#include <linux/seq_file.h> /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */ @@ -453,10 +454,6 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data) int lowercase, eas, chk, errs, chkdsk, timeshift; int o; struct hpfs_sb_info *sbi = hpfs_sb(s); - char *new_opts = kstrdup(data, GFP_KERNEL); - - if (!new_opts) - return -ENOMEM; sync_filesystem(s); @@ -493,17 +490,44 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data) if (!(*flags & MS_RDONLY)) mark_dirty(s, 1); - replace_mount_options(s, new_opts); - hpfs_unlock(s); return 0; out_err: hpfs_unlock(s); - kfree(new_opts); return -EINVAL; } +static int hpfs_show_options(struct seq_file *seq, struct dentry *root) +{ + struct hpfs_sb_info *sbi = hpfs_sb(root->d_sb); + + seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, sbi->sb_uid)); + seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbi->sb_gid)); + seq_printf(seq, ",umask=%03o", (~sbi->sb_mode & 0777)); + if (sbi->sb_lowercase) + seq_printf(seq, ",case=lower"); + if (!sbi->sb_chk) + seq_printf(seq, ",check=none"); + if (sbi->sb_chk == 2) + seq_printf(seq, ",check=strict"); + if (!sbi->sb_err) + seq_printf(seq, ",errors=continue"); + if (sbi->sb_err == 2) + seq_printf(seq, ",errors=panic"); + if (!sbi->sb_chkdsk) + seq_printf(seq, ",chkdsk=no"); + if (sbi->sb_chkdsk == 2) + seq_printf(seq, ",chkdsk=always"); + if (!sbi->sb_eas) + seq_printf(seq, ",eas=no"); + if (sbi->sb_eas == 1) + seq_printf(seq, ",eas=ro"); + if (sbi->sb_timeshift) + seq_printf(seq, ",timeshift=%d", sbi->sb_timeshift); + return 0; +} + /* Super operations */ static const struct super_operations hpfs_sops = @@ -514,7 +538,7 @@ static const struct super_operations hpfs_sops = .put_super = hpfs_put_super, .statfs = hpfs_statfs, .remount_fs = hpfs_remount_fs, - .show_options = generic_show_options, + .show_options = hpfs_show_options, }; static int hpfs_fill_super(struct super_block *s, void *options, int silent) @@ -537,8 +561,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) int o; - save_mount_options(s, options); - sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) { return -ENOMEM; diff --git a/fs/namespace.c b/fs/namespace.c index 0570729c87fd..33064fcbfff9 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2401,8 +2401,10 @@ static int do_new_mount(struct path *path, const char *fstype, int flags, mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV; } if (type->fs_flags & FS_USERNS_VISIBLE) { - if (!fs_fully_visible(type, &mnt_flags)) + if (!fs_fully_visible(type, &mnt_flags)) { + put_filesystem(type); return -EPERM; + } } } @@ -3236,6 +3238,10 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags) if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC) mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC); + /* Don't miss readonly hidden in the superblock flags */ + if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY) + mnt_flags |= MNT_LOCK_READONLY; + /* Verify the mount flags are equal to or more permissive * than the proposed new mount. */ @@ -3262,7 +3268,7 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags) list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { struct inode *inode = child->mnt_mountpoint->d_inode; /* Only worry about locked mounts */ - if (!(mnt_flags & MNT_LOCKED)) + if (!(child->mnt.mnt_flags & MNT_LOCKED)) continue; /* Is the directory permanetly empty? */ if (!is_empty_dir_inode(inode)) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 5fc2162afb67..46cfed63d229 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1531,9 +1531,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, err = PTR_ERR(inode); trace_nfs_atomic_open_exit(dir, ctx, open_flags, err); put_nfs_open_context(ctx); + d_drop(dentry); switch (err) { case -ENOENT: - d_drop(dentry); d_add(dentry, NULL); nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); break; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 98a44157353a..fc215ab4dcd5 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2854,12 +2854,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) call_close |= is_wronly; else if (is_wronly) calldata->arg.fmode |= FMODE_WRITE; + if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE)) + call_close |= is_rdwr; } else if (is_rdwr) calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; - if (calldata->arg.fmode == 0) - call_close |= is_rdwr; - if (!nfs4_valid_open_stateid(state)) call_close = 0; spin_unlock(&state->owner->so_lock); diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c index 1580ea6fd64d..d08cd88155c7 100644 --- a/fs/nfsd/nfs2acl.c +++ b/fs/nfsd/nfs2acl.c @@ -104,22 +104,21 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp, goto out; inode = d_inode(fh->fh_dentry); - if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) { - error = -EOPNOTSUPP; - goto out_errno; - } error = fh_want_write(fh); if (error) goto out_errno; - error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS); + fh_lock(fh); + + error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access); if (error) - goto out_drop_write; - error = inode->i_op->set_acl(inode, argp->acl_default, - ACL_TYPE_DEFAULT); + goto out_drop_lock; + error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default); if (error) - goto out_drop_write; + goto out_drop_lock; + + fh_unlock(fh); fh_drop_write(fh); @@ -131,7 +130,8 @@ out: posix_acl_release(argp->acl_access); posix_acl_release(argp->acl_default); return nfserr; -out_drop_write: +out_drop_lock: + fh_unlock(fh); fh_drop_write(fh); out_errno: nfserr = nfserrno(error); diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c index 01df4cd7c753..0c890347cde3 100644 --- a/fs/nfsd/nfs3acl.c +++ b/fs/nfsd/nfs3acl.c @@ -95,22 +95,20 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp, goto out; inode = d_inode(fh->fh_dentry); - if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) { - error = -EOPNOTSUPP; - goto out_errno; - } error = fh_want_write(fh); if (error) goto out_errno; - error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS); + fh_lock(fh); + + error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access); if (error) - goto out_drop_write; - error = inode->i_op->set_acl(inode, argp->acl_default, - ACL_TYPE_DEFAULT); + goto out_drop_lock; + error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default); -out_drop_write: +out_drop_lock: + fh_unlock(fh); fh_drop_write(fh); out_errno: nfserr = nfserrno(error); diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c index 6adabd6049b7..71292a0d6f09 100644 --- a/fs/nfsd/nfs4acl.c +++ b/fs/nfsd/nfs4acl.c @@ -770,9 +770,6 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp, dentry = fhp->fh_dentry; inode = d_inode(dentry); - if (!inode->i_op->set_acl || !IS_POSIXACL(inode)) - return nfserr_attrnotsupp; - if (S_ISDIR(inode->i_mode)) flags = NFS4_ACL_DIR; @@ -782,16 +779,19 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp, if (host_error < 0) goto out_nfserr; - host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS); + fh_lock(fhp); + + host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl); if (host_error < 0) - goto out_release; + goto out_drop_lock; if (S_ISDIR(inode->i_mode)) { - host_error = inode->i_op->set_acl(inode, dpacl, - ACL_TYPE_DEFAULT); + host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl); } -out_release: +out_drop_lock: + fh_unlock(fhp); + posix_acl_release(pacl); posix_acl_release(dpacl); out_nfserr: diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index e7f50c4081d6..15bdc2d48cfe 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -710,22 +710,6 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc } } -static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args) -{ - struct rpc_xprt *xprt; - - if (args->protocol != XPRT_TRANSPORT_BC_TCP) - return rpc_create(args); - - xprt = args->bc_xprt->xpt_bc_xprt; - if (xprt) { - xprt_get(xprt); - return rpc_create_xprt(args, xprt); - } - - return rpc_create(args); -} - static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses) { int maxtime = max_cb_time(clp->net); @@ -768,7 +752,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c args.authflavor = ses->se_cb_sec.flavor; } /* Create RPC client */ - client = create_backchannel_client(&args); + client = rpc_create(&args); if (IS_ERR(client)) { dprintk("NFSD: couldn't create callback client: %ld\n", PTR_ERR(client)); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 6b800b5b8fed..ed2f64ca49de 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3452,6 +3452,10 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfs4_openowner *oo = open->op_openowner; struct nfs4_ol_stateid *retstp = NULL; + /* We are moving these outside of the spinlocks to avoid the warnings */ + mutex_init(&stp->st_mutex); + mutex_lock(&stp->st_mutex); + spin_lock(&oo->oo_owner.so_client->cl_lock); spin_lock(&fp->fi_lock); @@ -3467,13 +3471,17 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, stp->st_access_bmap = 0; stp->st_deny_bmap = 0; stp->st_openstp = NULL; - init_rwsem(&stp->st_rwsem); list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); list_add(&stp->st_perfile, &fp->fi_stateids); out_unlock: spin_unlock(&fp->fi_lock); spin_unlock(&oo->oo_owner.so_client->cl_lock); + if (retstp) { + mutex_lock(&retstp->st_mutex); + /* Not that we need to, just for neatness */ + mutex_unlock(&stp->st_mutex); + } return retstp; } @@ -4300,32 +4308,34 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf */ if (stp) { /* Stateid was found, this is an OPEN upgrade */ - down_read(&stp->st_rwsem); + mutex_lock(&stp->st_mutex); status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); if (status) { - up_read(&stp->st_rwsem); + mutex_unlock(&stp->st_mutex); goto out; } } else { stp = open->op_stp; open->op_stp = NULL; + /* + * init_open_stateid() either returns a locked stateid + * it found, or initializes and locks the new one we passed in + */ swapstp = init_open_stateid(stp, fp, open); if (swapstp) { nfs4_put_stid(&stp->st_stid); stp = swapstp; - down_read(&stp->st_rwsem); status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); if (status) { - up_read(&stp->st_rwsem); + mutex_unlock(&stp->st_mutex); goto out; } goto upgrade_out; } - down_read(&stp->st_rwsem); status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); if (status) { - up_read(&stp->st_rwsem); + mutex_unlock(&stp->st_mutex); release_open_stateid(stp); goto out; } @@ -4337,7 +4347,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf } upgrade_out: nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); - up_read(&stp->st_rwsem); + mutex_unlock(&stp->st_mutex); if (nfsd4_has_session(&resp->cstate)) { if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { @@ -4950,12 +4960,12 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_ * revoked delegations are kept only for free_stateid. */ return nfserr_bad_stateid; - down_write(&stp->st_rwsem); + mutex_lock(&stp->st_mutex); status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); if (status == nfs_ok) status = nfs4_check_fh(current_fh, &stp->st_stid); if (status != nfs_ok) - up_write(&stp->st_rwsem); + mutex_unlock(&stp->st_mutex); return status; } @@ -5003,7 +5013,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs return status; oo = openowner(stp->st_stateowner); if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { - up_write(&stp->st_rwsem); + mutex_unlock(&stp->st_mutex); nfs4_put_stid(&stp->st_stid); return nfserr_bad_stateid; } @@ -5035,12 +5045,12 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, oo = openowner(stp->st_stateowner); status = nfserr_bad_stateid; if (oo->oo_flags & NFS4_OO_CONFIRMED) { - up_write(&stp->st_rwsem); + mutex_unlock(&stp->st_mutex); goto put_stateid; } oo->oo_flags |= NFS4_OO_CONFIRMED; nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); - up_write(&stp->st_rwsem); + mutex_unlock(&stp->st_mutex); dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); @@ -5116,7 +5126,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp, nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); status = nfs_ok; put_stateid: - up_write(&stp->st_rwsem); + mutex_unlock(&stp->st_mutex); nfs4_put_stid(&stp->st_stid); out: nfsd4_bump_seqid(cstate, status); @@ -5169,7 +5179,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, if (status) goto out; nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); - up_write(&stp->st_rwsem); + mutex_unlock(&stp->st_mutex); nfsd4_close_open_stateid(stp); @@ -5395,7 +5405,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, stp->st_access_bmap = 0; stp->st_deny_bmap = open_stp->st_deny_bmap; stp->st_openstp = open_stp; - init_rwsem(&stp->st_rwsem); + mutex_init(&stp->st_mutex); list_add(&stp->st_locks, &open_stp->st_locks); list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); spin_lock(&fp->fi_lock); @@ -5564,7 +5574,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, &open_stp, nn); if (status) goto out; - up_write(&open_stp->st_rwsem); + mutex_unlock(&open_stp->st_mutex); open_sop = openowner(open_stp->st_stateowner); status = nfserr_bad_stateid; if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, @@ -5573,7 +5583,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, status = lookup_or_create_lock_state(cstate, open_stp, lock, &lock_stp, &new); if (status == nfs_ok) - down_write(&lock_stp->st_rwsem); + mutex_lock(&lock_stp->st_mutex); } else { status = nfs4_preprocess_seqid_op(cstate, lock->lk_old_lock_seqid, @@ -5677,7 +5687,7 @@ out: seqid_mutating_err(ntohl(status))) lock_sop->lo_owner.so_seqid++; - up_write(&lock_stp->st_rwsem); + mutex_unlock(&lock_stp->st_mutex); /* * If this is a new, never-before-used stateid, and we are @@ -5847,7 +5857,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, fput: fput(filp); put_stateid: - up_write(&stp->st_rwsem); + mutex_unlock(&stp->st_mutex); nfs4_put_stid(&stp->st_stid); out: nfsd4_bump_seqid(cstate, status); diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 77fdf4de91ba..77860b75da9d 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -535,7 +535,7 @@ struct nfs4_ol_stateid { unsigned char st_access_bmap; unsigned char st_deny_bmap; struct nfs4_ol_stateid *st_openstp; - struct rw_semaphore st_rwsem; + struct mutex st_mutex; }; static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s) diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index f72f3b25b3f2..e2893f17dde2 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -746,7 +746,7 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, /* support stacked filesystems */ if(path.dentry && path.dentry->d_op) { if (path.dentry->d_op->d_canonical_path) { - path.dentry->d_op->d_canonical_path(path.dentry, &alteredpath); + path.dentry->d_op->d_canonical_path(&path, &alteredpath); canonical_path = &alteredpath; path_put(&path); } diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index a2b1d7ce3e1a..ba5ef733951f 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -511,6 +511,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir) struct dentry *upper; struct dentry *opaquedir = NULL; int err; + int flags = 0; if (WARN_ON(!workdir)) return -EROFS; @@ -540,46 +541,39 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir) if (err) goto out_dput; - whiteout = ovl_whiteout(workdir, dentry); - err = PTR_ERR(whiteout); - if (IS_ERR(whiteout)) + upper = lookup_one_len(dentry->d_name.name, upperdir, + dentry->d_name.len); + err = PTR_ERR(upper); + if (IS_ERR(upper)) goto out_unlock; - upper = ovl_dentry_upper(dentry); - if (!upper) { - upper = lookup_one_len(dentry->d_name.name, upperdir, - dentry->d_name.len); - err = PTR_ERR(upper); - if (IS_ERR(upper)) - goto kill_whiteout; - - err = ovl_do_rename(wdir, whiteout, udir, upper, 0); - dput(upper); - if (err) - goto kill_whiteout; - } else { - int flags = 0; + err = -ESTALE; + if ((opaquedir && upper != opaquedir) || + (!opaquedir && ovl_dentry_upper(dentry) && + upper != ovl_dentry_upper(dentry))) { + goto out_dput_upper; + } - if (opaquedir) - upper = opaquedir; - err = -ESTALE; - if (upper->d_parent != upperdir) - goto kill_whiteout; + whiteout = ovl_whiteout(workdir, dentry); + err = PTR_ERR(whiteout); + if (IS_ERR(whiteout)) + goto out_dput_upper; - if (is_dir) - flags |= RENAME_EXCHANGE; + if (d_is_dir(upper)) + flags = RENAME_EXCHANGE; - err = ovl_do_rename(wdir, whiteout, udir, upper, flags); - if (err) - goto kill_whiteout; + err = ovl_do_rename(wdir, whiteout, udir, upper, flags); + if (err) + goto kill_whiteout; + if (flags) + ovl_cleanup(wdir, upper); - if (is_dir) - ovl_cleanup(wdir, upper); - } ovl_dentry_version_inc(dentry->d_parent); out_d_drop: d_drop(dentry); dput(whiteout); +out_dput_upper: + dput(upper); out_unlock: unlock_rename(workdir, upperdir); out_dput: @@ -596,21 +590,25 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir) { struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); struct inode *dir = upperdir->d_inode; - struct dentry *upper = ovl_dentry_upper(dentry); + struct dentry *upper; int err; mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); + upper = lookup_one_len(dentry->d_name.name, upperdir, + dentry->d_name.len); + err = PTR_ERR(upper); + if (IS_ERR(upper)) + goto out_unlock; + err = -ESTALE; - if (upper->d_parent == upperdir) { - /* Don't let d_delete() think it can reset d_inode */ - dget(upper); + if (upper == ovl_dentry_upper(dentry)) { if (is_dir) err = vfs_rmdir(dir, upper); else err = vfs_unlink(dir, upper, NULL); - dput(upper); ovl_dentry_version_inc(dentry->d_parent); } + dput(upper); /* * Keeping this dentry hashed would mean having to release @@ -620,6 +618,7 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir) */ if (!err) d_drop(dentry); +out_unlock: mutex_unlock(&dir->i_mutex); return err; @@ -840,29 +839,39 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, trap = lock_rename(new_upperdir, old_upperdir); - olddentry = ovl_dentry_upper(old); - newdentry = ovl_dentry_upper(new); - if (newdentry) { + + olddentry = lookup_one_len(old->d_name.name, old_upperdir, + old->d_name.len); + err = PTR_ERR(olddentry); + if (IS_ERR(olddentry)) + goto out_unlock; + + err = -ESTALE; + if (olddentry != ovl_dentry_upper(old)) + goto out_dput_old; + + newdentry = lookup_one_len(new->d_name.name, new_upperdir, + new->d_name.len); + err = PTR_ERR(newdentry); + if (IS_ERR(newdentry)) + goto out_dput_old; + + err = -ESTALE; + if (ovl_dentry_upper(new)) { if (opaquedir) { - newdentry = opaquedir; - opaquedir = NULL; + if (newdentry != opaquedir) + goto out_dput; } else { - dget(newdentry); + if (newdentry != ovl_dentry_upper(new)) + goto out_dput; } } else { new_create = true; - newdentry = lookup_one_len(new->d_name.name, new_upperdir, - new->d_name.len); - err = PTR_ERR(newdentry); - if (IS_ERR(newdentry)) - goto out_unlock; + if (!d_is_negative(newdentry) && + (!new_opaque || !ovl_is_whiteout(newdentry))) + goto out_dput; } - err = -ESTALE; - if (olddentry->d_parent != old_upperdir) - goto out_dput; - if (newdentry->d_parent != new_upperdir) - goto out_dput; if (olddentry == trap) goto out_dput; if (newdentry == trap) @@ -925,6 +934,8 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, out_dput: dput(newdentry); +out_dput_old: + dput(olddentry); out_unlock: unlock_rename(new_upperdir, old_upperdir); out_revert_creds: diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 05ac9a95e881..0597820f5d9d 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -412,12 +412,11 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, if (!inode) return NULL; - mode &= S_IFMT; - inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_flags |= S_NOATIME | S_NOCMTIME; + mode &= S_IFMT; switch (mode) { case S_IFDIR: inode->i_private = oe; diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index e17154aeaae4..735e1d49b301 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -181,6 +181,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to) { to->i_uid = from->i_uid; to->i_gid = from->i_gid; + to->i_mode = from->i_mode; } /* dir.c */ diff --git a/fs/pipe.c b/fs/pipe.c index 42cf8ddf0e55..ab8dad3ccb6a 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -38,6 +38,12 @@ unsigned int pipe_max_size = 1048576; */ unsigned int pipe_min_size = PAGE_SIZE; +/* Maximum allocatable pages per user. Hard limit is unset by default, soft + * matches default values. + */ +unsigned long pipe_user_pages_hard; +unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR; + /* * We use a start+len construction, which provides full use of the * allocated memory. @@ -583,20 +589,49 @@ pipe_fasync(int fd, struct file *filp, int on) return retval; } +static void account_pipe_buffers(struct pipe_inode_info *pipe, + unsigned long old, unsigned long new) +{ + atomic_long_add(new - old, &pipe->user->pipe_bufs); +} + +static bool too_many_pipe_buffers_soft(struct user_struct *user) +{ + return pipe_user_pages_soft && + atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft; +} + +static bool too_many_pipe_buffers_hard(struct user_struct *user) +{ + return pipe_user_pages_hard && + atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard; +} + struct pipe_inode_info *alloc_pipe_info(void) { struct pipe_inode_info *pipe; pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL); if (pipe) { - pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL); + unsigned long pipe_bufs = PIPE_DEF_BUFFERS; + struct user_struct *user = get_current_user(); + + if (!too_many_pipe_buffers_hard(user)) { + if (too_many_pipe_buffers_soft(user)) + pipe_bufs = 1; + pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL); + } + if (pipe->bufs) { init_waitqueue_head(&pipe->wait); pipe->r_counter = pipe->w_counter = 1; - pipe->buffers = PIPE_DEF_BUFFERS; + pipe->buffers = pipe_bufs; + pipe->user = user; + account_pipe_buffers(pipe, 0, pipe_bufs); mutex_init(&pipe->mutex); return pipe; } + free_uid(user); kfree(pipe); } @@ -607,6 +642,8 @@ void free_pipe_info(struct pipe_inode_info *pipe) { int i; + account_pipe_buffers(pipe, pipe->buffers, 0); + free_uid(pipe->user); for (i = 0; i < pipe->buffers; i++) { struct pipe_buffer *buf = pipe->bufs + i; if (buf->ops) @@ -998,6 +1035,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages) memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer)); } + account_pipe_buffers(pipe, pipe->buffers, nr_pages); pipe->curbuf = 0; kfree(pipe->bufs); pipe->bufs = bufs; @@ -1069,6 +1107,11 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) { ret = -EPERM; goto out; + } else if ((too_many_pipe_buffers_hard(pipe->user) || + too_many_pipe_buffers_soft(pipe->user)) && + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) { + ret = -EPERM; + goto out; } ret = pipe_set_size(pipe, nr_pages); break; diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 4adde1e2cbec..34bd1bd354e6 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -788,6 +788,28 @@ posix_acl_xattr_get(const struct xattr_handler *handler, return error; } +int +set_posix_acl(struct inode *inode, int type, struct posix_acl *acl) +{ + if (!IS_POSIXACL(inode)) + return -EOPNOTSUPP; + if (!inode->i_op->set_acl) + return -EOPNOTSUPP; + + if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) + return acl ? -EACCES : 0; + if (!inode_owner_or_capable(inode)) + return -EPERM; + + if (acl) { + int ret = posix_acl_valid(acl); + if (ret) + return ret; + } + return inode->i_op->set_acl(inode, acl, type); +} +EXPORT_SYMBOL(set_posix_acl); + static int posix_acl_xattr_set(const struct xattr_handler *handler, struct dentry *dentry, const char *name, @@ -799,30 +821,13 @@ posix_acl_xattr_set(const struct xattr_handler *handler, if (strcmp(name, "") != 0) return -EINVAL; - if (!IS_POSIXACL(inode)) - return -EOPNOTSUPP; - if (!inode->i_op->set_acl) - return -EOPNOTSUPP; - - if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) - return value ? -EACCES : 0; - if (!inode_owner_or_capable(inode)) - return -EPERM; if (value) { acl = posix_acl_from_xattr(&init_user_ns, value, size); if (IS_ERR(acl)) return PTR_ERR(acl); - - if (acl) { - ret = posix_acl_valid(acl); - if (ret) - goto out; - } } - - ret = inode->i_op->set_acl(inode, acl, handler->flags); -out: + ret = set_posix_acl(inode, handler->flags, acl); posix_acl_release(acl); return ret; } diff --git a/fs/proc/base.c b/fs/proc/base.c index de2dcc1d1167..7d61792c053a 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2443,6 +2443,72 @@ static const struct file_operations proc_timers_operations = { .release = seq_release_private, }; +static ssize_t timerslack_ns_write(struct file *file, const char __user *buf, + size_t count, loff_t *offset) +{ + struct inode *inode = file_inode(file); + struct task_struct *p; + u64 slack_ns; + int err; + + err = kstrtoull_from_user(buf, count, 10, &slack_ns); + if (err < 0) + return err; + + p = get_proc_task(inode); + if (!p) + return -ESRCH; + + if (ptrace_may_access(p, PTRACE_MODE_ATTACH_FSCREDS)) { + task_lock(p); + if (slack_ns == 0) + p->timer_slack_ns = p->default_timer_slack_ns; + else + p->timer_slack_ns = slack_ns; + task_unlock(p); + } else + count = -EPERM; + + put_task_struct(p); + + return count; +} + +static int timerslack_ns_show(struct seq_file *m, void *v) +{ + struct inode *inode = m->private; + struct task_struct *p; + int err = 0; + + p = get_proc_task(inode); + if (!p) + return -ESRCH; + + if (ptrace_may_access(p, PTRACE_MODE_ATTACH_FSCREDS)) { + task_lock(p); + seq_printf(m, "%llu\n", p->timer_slack_ns); + task_unlock(p); + } else + err = -EPERM; + + put_task_struct(p); + + return err; +} + +static int timerslack_ns_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, timerslack_ns_show, inode); +} + +static const struct file_operations proc_pid_set_timerslack_ns_operations = { + .open = timerslack_ns_open, + .read = seq_read, + .write = timerslack_ns_write, + .llseek = seq_lseek, + .release = single_release, +}; + static int proc_pident_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { @@ -3030,6 +3096,7 @@ static const struct pid_entry tgid_base_stuff[] = { #ifdef CONFIG_CHECKPOINT_RESTORE REG("timers", S_IRUGO, proc_timers_operations), #endif + REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations), }; static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) @@ -3287,6 +3354,44 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx) } /* + * proc_tid_comm_permission is a special permission function exclusively + * used for the node /proc/<pid>/task/<tid>/comm. + * It bypasses generic permission checks in the case where a task of the same + * task group attempts to access the node. + * The rational behind this is that glibc and bionic access this node for + * cross thread naming (pthread_set/getname_np(!self)). However, if + * PR_SET_DUMPABLE gets set to 0 this node among others becomes uid=0 gid=0, + * which locks out the cross thread naming implementation. + * This function makes sure that the node is always accessible for members of + * same thread group. + */ +static int proc_tid_comm_permission(struct inode *inode, int mask) +{ + bool is_same_tgroup; + struct task_struct *task; + + task = get_proc_task(inode); + if (!task) + return -ESRCH; + is_same_tgroup = same_thread_group(current, task); + put_task_struct(task); + + if (likely(is_same_tgroup && !(mask & MAY_EXEC))) { + /* This file (/proc/<pid>/task/<tid>/comm) can always be + * read or written by the members of the corresponding + * thread group. + */ + return 0; + } + + return generic_permission(inode, mask); +} + +static const struct inode_operations proc_tid_comm_inode_operations = { + .permission = proc_tid_comm_permission, +}; + +/* * Tasks */ static const struct pid_entry tid_base_stuff[] = { @@ -3304,7 +3409,9 @@ static const struct pid_entry tid_base_stuff[] = { #ifdef CONFIG_SCHED_DEBUG REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), #endif - REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), + NOD("comm", S_IFREG|S_IRUGO|S_IWUSR, + &proc_tid_comm_inode_operations, + &proc_pid_set_comm_operations, {}), #ifdef CONFIG_HAVE_ARCH_TRACEHOOK ONE("syscall", S_IRUSR, proc_pid_syscall), #endif diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c index ba165ef11e27..971928ab6c21 100644 --- a/fs/sdcardfs/dentry.c +++ b/fs/sdcardfs/dentry.c @@ -172,11 +172,15 @@ static int sdcardfs_cmp_ci(const struct dentry *parent, return 1; } +static void sdcardfs_canonical_path(const struct path *path, struct path *actual_path) { + sdcardfs_get_real_lower(path->dentry, actual_path); +} + const struct dentry_operations sdcardfs_ci_dops = { .d_revalidate = sdcardfs_d_revalidate, .d_release = sdcardfs_d_release, .d_hash = sdcardfs_hash_ci, .d_compare = sdcardfs_cmp_ci, - .d_canonical_path = sdcardfs_get_real_lower, + .d_canonical_path = sdcardfs_canonical_path, }; diff --git a/fs/sdcardfs/packagelist.c b/fs/sdcardfs/packagelist.c index 10f0d6be718b..9c3340528eee 100644 --- a/fs/sdcardfs/packagelist.c +++ b/fs/sdcardfs/packagelist.c @@ -335,13 +335,20 @@ static ssize_t packages_attr_show(struct config_item *item, struct hashtable_entry *hash_cur; struct hlist_node *h_t; int i; - int count = 0; + int count = 0, written = 0; + char errormsg[] = "<truncated>\n"; + mutex_lock(&pkgl_data_all->hashtable_lock); - hash_for_each_safe(pkgl_data_all->package_to_appid, i, h_t, hash_cur, hlist) - count += snprintf(page + count, PAGE_SIZE - count, "%s %d\n", (char *)hash_cur->key, hash_cur->value); + hash_for_each_safe(pkgl_data_all->package_to_appid, i, h_t, hash_cur, hlist) { + written = scnprintf(page + count, PAGE_SIZE - sizeof(errormsg) - count, "%s %d\n", (char *)hash_cur->key, hash_cur->value); + if (count + written == PAGE_SIZE - sizeof(errormsg)) { + count += scnprintf(page + count, PAGE_SIZE - count, errormsg); + break; + } + count += written; + } mutex_unlock(&pkgl_data_all->hashtable_lock); - return count; } diff --git a/fs/select.c b/fs/select.c index 015547330e88..09e71a00a9b8 100644 --- a/fs/select.c +++ b/fs/select.c @@ -70,9 +70,9 @@ static long __estimate_accuracy(struct timespec *tv) return slack; } -long select_estimate_accuracy(struct timespec *tv) +u64 select_estimate_accuracy(struct timespec *tv) { - unsigned long ret; + u64 ret; struct timespec now; /* @@ -402,7 +402,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) struct poll_wqueues table; poll_table *wait; int retval, i, timed_out = 0; - unsigned long slack = 0; + u64 slack = 0; unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; unsigned long busy_end = 0; @@ -784,7 +784,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, poll_table* pt = &wait->pt; ktime_t expire, *to = NULL; int timed_out = 0, count = 0; - unsigned long slack = 0; + u64 slack = 0; unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; unsigned long busy_end = 0; diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 0edc12856147..b895af7d8d80 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -52,6 +52,7 @@ #include "ubifs.h" #include <linux/mount.h> #include <linux/slab.h> +#include <linux/migrate.h> static int read_block(struct inode *inode, void *addr, unsigned int block, struct ubifs_data_node *dn) @@ -1452,6 +1453,26 @@ static int ubifs_set_page_dirty(struct page *page) return ret; } +#ifdef CONFIG_MIGRATION +static int ubifs_migrate_page(struct address_space *mapping, + struct page *newpage, struct page *page, enum migrate_mode mode) +{ + int rc; + + rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); + if (rc != MIGRATEPAGE_SUCCESS) + return rc; + + if (PagePrivate(page)) { + ClearPagePrivate(page); + SetPagePrivate(newpage); + } + + migrate_page_copy(newpage, page); + return MIGRATEPAGE_SUCCESS; +} +#endif + static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) { /* @@ -1591,6 +1612,9 @@ const struct address_space_operations ubifs_file_address_operations = { .write_end = ubifs_write_end, .invalidatepage = ubifs_invalidatepage, .set_page_dirty = ubifs_set_page_dirty, +#ifdef CONFIG_MIGRATION + .migratepage = ubifs_migrate_page, +#endif .releasepage = ubifs_releasepage, }; diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 3479294c1d58..e1e7fe3b5424 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -535,6 +535,7 @@ xfs_agfl_write_verify( } const struct xfs_buf_ops xfs_agfl_buf_ops = { + .name = "xfs_agfl", .verify_read = xfs_agfl_read_verify, .verify_write = xfs_agfl_write_verify, }; @@ -2339,6 +2340,7 @@ xfs_agf_write_verify( } const struct xfs_buf_ops xfs_agf_buf_ops = { + .name = "xfs_agf", .verify_read = xfs_agf_read_verify, .verify_write = xfs_agf_write_verify, }; diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c index 90de071dd4c2..eb8bbfe85484 100644 --- a/fs/xfs/libxfs/xfs_alloc_btree.c +++ b/fs/xfs/libxfs/xfs_alloc_btree.c @@ -379,6 +379,7 @@ xfs_allocbt_write_verify( } const struct xfs_buf_ops xfs_allocbt_buf_ops = { + .name = "xfs_allocbt", .verify_read = xfs_allocbt_read_verify, .verify_write = xfs_allocbt_write_verify, }; diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index aa187f7ba2dd..01a5ecfedfcf 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -328,6 +328,7 @@ xfs_attr3_leaf_read_verify( } const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = { + .name = "xfs_attr3_leaf", .verify_read = xfs_attr3_leaf_read_verify, .verify_write = xfs_attr3_leaf_write_verify, }; diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c index 5ab95ffa4ae9..f3ed9bf0b065 100644 --- a/fs/xfs/libxfs/xfs_attr_remote.c +++ b/fs/xfs/libxfs/xfs_attr_remote.c @@ -201,6 +201,7 @@ xfs_attr3_rmt_write_verify( } const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = { + .name = "xfs_attr3_rmt", .verify_read = xfs_attr3_rmt_read_verify, .verify_write = xfs_attr3_rmt_write_verify, }; diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c index 6b0cf6546a82..1637c37bfbaa 100644 --- a/fs/xfs/libxfs/xfs_bmap_btree.c +++ b/fs/xfs/libxfs/xfs_bmap_btree.c @@ -720,6 +720,7 @@ xfs_bmbt_write_verify( } const struct xfs_buf_ops xfs_bmbt_buf_ops = { + .name = "xfs_bmbt", .verify_read = xfs_bmbt_read_verify, .verify_write = xfs_bmbt_write_verify, }; diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c index e89a0f8f827c..097bf7717d80 100644 --- a/fs/xfs/libxfs/xfs_da_btree.c +++ b/fs/xfs/libxfs/xfs_da_btree.c @@ -245,6 +245,7 @@ xfs_da3_node_read_verify( } const struct xfs_buf_ops xfs_da3_node_buf_ops = { + .name = "xfs_da3_node", .verify_read = xfs_da3_node_read_verify, .verify_write = xfs_da3_node_write_verify, }; diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c index 9c10e2b8cfcb..aa17cb788946 100644 --- a/fs/xfs/libxfs/xfs_dir2_block.c +++ b/fs/xfs/libxfs/xfs_dir2_block.c @@ -123,6 +123,7 @@ xfs_dir3_block_write_verify( } const struct xfs_buf_ops xfs_dir3_block_buf_ops = { + .name = "xfs_dir3_block", .verify_read = xfs_dir3_block_read_verify, .verify_write = xfs_dir3_block_write_verify, }; diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c index af71a84f343c..725fc7841fde 100644 --- a/fs/xfs/libxfs/xfs_dir2_data.c +++ b/fs/xfs/libxfs/xfs_dir2_data.c @@ -305,11 +305,13 @@ xfs_dir3_data_write_verify( } const struct xfs_buf_ops xfs_dir3_data_buf_ops = { + .name = "xfs_dir3_data", .verify_read = xfs_dir3_data_read_verify, .verify_write = xfs_dir3_data_write_verify, }; static const struct xfs_buf_ops xfs_dir3_data_reada_buf_ops = { + .name = "xfs_dir3_data_reada", .verify_read = xfs_dir3_data_reada_verify, .verify_write = xfs_dir3_data_write_verify, }; diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c index 3923e1f94697..b887fb2a2bcf 100644 --- a/fs/xfs/libxfs/xfs_dir2_leaf.c +++ b/fs/xfs/libxfs/xfs_dir2_leaf.c @@ -245,11 +245,13 @@ xfs_dir3_leafn_write_verify( } const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops = { + .name = "xfs_dir3_leaf1", .verify_read = xfs_dir3_leaf1_read_verify, .verify_write = xfs_dir3_leaf1_write_verify, }; const struct xfs_buf_ops xfs_dir3_leafn_buf_ops = { + .name = "xfs_dir3_leafn", .verify_read = xfs_dir3_leafn_read_verify, .verify_write = xfs_dir3_leafn_write_verify, }; diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c index 70b0cb2fd556..63ee03db796c 100644 --- a/fs/xfs/libxfs/xfs_dir2_node.c +++ b/fs/xfs/libxfs/xfs_dir2_node.c @@ -150,6 +150,7 @@ xfs_dir3_free_write_verify( } const struct xfs_buf_ops xfs_dir3_free_buf_ops = { + .name = "xfs_dir3_free", .verify_read = xfs_dir3_free_read_verify, .verify_write = xfs_dir3_free_write_verify, }; diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c index 5331b7f0460c..3cc3cf767474 100644 --- a/fs/xfs/libxfs/xfs_dquot_buf.c +++ b/fs/xfs/libxfs/xfs_dquot_buf.c @@ -54,7 +54,7 @@ xfs_dqcheck( xfs_dqid_t id, uint type, /* used only when IO_dorepair is true */ uint flags, - char *str) + const char *str) { xfs_dqblk_t *d = (xfs_dqblk_t *)ddq; int errs = 0; @@ -207,7 +207,8 @@ xfs_dquot_buf_verify_crc( STATIC bool xfs_dquot_buf_verify( struct xfs_mount *mp, - struct xfs_buf *bp) + struct xfs_buf *bp, + int warn) { struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr; xfs_dqid_t id = 0; @@ -240,8 +241,7 @@ xfs_dquot_buf_verify( if (i == 0) id = be32_to_cpu(ddq->d_id); - error = xfs_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN, - "xfs_dquot_buf_verify"); + error = xfs_dqcheck(mp, ddq, id + i, 0, warn, __func__); if (error) return false; } @@ -256,7 +256,7 @@ xfs_dquot_buf_read_verify( if (!xfs_dquot_buf_verify_crc(mp, bp)) xfs_buf_ioerror(bp, -EFSBADCRC); - else if (!xfs_dquot_buf_verify(mp, bp)) + else if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN)) xfs_buf_ioerror(bp, -EFSCORRUPTED); if (bp->b_error) @@ -264,6 +264,25 @@ xfs_dquot_buf_read_verify( } /* + * readahead errors are silent and simply leave the buffer as !done so a real + * read will then be run with the xfs_dquot_buf_ops verifier. See + * xfs_inode_buf_verify() for why we use EIO and ~XBF_DONE here rather than + * reporting the failure. + */ +static void +xfs_dquot_buf_readahead_verify( + struct xfs_buf *bp) +{ + struct xfs_mount *mp = bp->b_target->bt_mount; + + if (!xfs_dquot_buf_verify_crc(mp, bp) || + !xfs_dquot_buf_verify(mp, bp, 0)) { + xfs_buf_ioerror(bp, -EIO); + bp->b_flags &= ~XBF_DONE; + } +} + +/* * we don't calculate the CRC here as that is done when the dquot is flushed to * the buffer after the update is done. This ensures that the dquot in the * buffer always has an up-to-date CRC value. @@ -274,7 +293,7 @@ xfs_dquot_buf_write_verify( { struct xfs_mount *mp = bp->b_target->bt_mount; - if (!xfs_dquot_buf_verify(mp, bp)) { + if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN)) { xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_verifier_error(bp); return; @@ -282,7 +301,13 @@ xfs_dquot_buf_write_verify( } const struct xfs_buf_ops xfs_dquot_buf_ops = { + .name = "xfs_dquot", .verify_read = xfs_dquot_buf_read_verify, .verify_write = xfs_dquot_buf_write_verify, }; +const struct xfs_buf_ops xfs_dquot_buf_ra_ops = { + .name = "xfs_dquot_ra", + .verify_read = xfs_dquot_buf_readahead_verify, + .verify_write = xfs_dquot_buf_write_verify, +}; diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 70c1db99f6a7..66d702e6b9ff 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -2572,6 +2572,7 @@ xfs_agi_write_verify( } const struct xfs_buf_ops xfs_agi_buf_ops = { + .name = "xfs_agi", .verify_read = xfs_agi_read_verify, .verify_write = xfs_agi_write_verify, }; diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c index f39b285beb19..6dd44f9ea727 100644 --- a/fs/xfs/libxfs/xfs_ialloc_btree.c +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c @@ -304,6 +304,7 @@ xfs_inobt_write_verify( } const struct xfs_buf_ops xfs_inobt_buf_ops = { + .name = "xfs_inobt", .verify_read = xfs_inobt_read_verify, .verify_write = xfs_inobt_write_verify, }; diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index 65485cfc4ade..1aabfda669b0 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -68,6 +68,8 @@ xfs_inobp_check( * recovery and we don't get unnecssary panics on debug kernels. We use EIO here * because all we want to do is say readahead failed; there is no-one to report * the error to, so this will distinguish it from a non-ra verifier failure. + * Changes to this readahead error behavour also need to be reflected in + * xfs_dquot_buf_readahead_verify(). */ static void xfs_inode_buf_verify( @@ -134,11 +136,13 @@ xfs_inode_buf_write_verify( } const struct xfs_buf_ops xfs_inode_buf_ops = { + .name = "xfs_inode", .verify_read = xfs_inode_buf_read_verify, .verify_write = xfs_inode_buf_write_verify, }; const struct xfs_buf_ops xfs_inode_buf_ra_ops = { + .name = "xxfs_inode_ra", .verify_read = xfs_inode_buf_readahead_verify, .verify_write = xfs_inode_buf_write_verify, }; diff --git a/fs/xfs/libxfs/xfs_quota_defs.h b/fs/xfs/libxfs/xfs_quota_defs.h index 1b0a08379759..f51078f1e92a 100644 --- a/fs/xfs/libxfs/xfs_quota_defs.h +++ b/fs/xfs/libxfs/xfs_quota_defs.h @@ -153,7 +153,7 @@ typedef __uint16_t xfs_qwarncnt_t; #define XFS_QMOPT_RESBLK_MASK (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS) extern int xfs_dqcheck(struct xfs_mount *mp, xfs_disk_dquot_t *ddq, - xfs_dqid_t id, uint type, uint flags, char *str); + xfs_dqid_t id, uint type, uint flags, const char *str); extern int xfs_calc_dquots_per_chunk(unsigned int nbblks); #endif /* __XFS_QUOTA_H__ */ diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index a0b071d881a0..8a53eaa349f4 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c @@ -679,11 +679,13 @@ xfs_sb_write_verify( } const struct xfs_buf_ops xfs_sb_buf_ops = { + .name = "xfs_sb", .verify_read = xfs_sb_read_verify, .verify_write = xfs_sb_write_verify, }; const struct xfs_buf_ops xfs_sb_quiet_buf_ops = { + .name = "xfs_sb_quiet", .verify_read = xfs_sb_quiet_read_verify, .verify_write = xfs_sb_write_verify, }; diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h index 5be529707903..15c3ceb845b9 100644 --- a/fs/xfs/libxfs/xfs_shared.h +++ b/fs/xfs/libxfs/xfs_shared.h @@ -49,6 +49,7 @@ extern const struct xfs_buf_ops xfs_inobt_buf_ops; extern const struct xfs_buf_ops xfs_inode_buf_ops; extern const struct xfs_buf_ops xfs_inode_buf_ra_ops; extern const struct xfs_buf_ops xfs_dquot_buf_ops; +extern const struct xfs_buf_ops xfs_dquot_buf_ra_ops; extern const struct xfs_buf_ops xfs_sb_buf_ops; extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops; extern const struct xfs_buf_ops xfs_symlink_buf_ops; diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c index cb6fd20a4d3d..2e2c6716b623 100644 --- a/fs/xfs/libxfs/xfs_symlink_remote.c +++ b/fs/xfs/libxfs/xfs_symlink_remote.c @@ -168,6 +168,7 @@ xfs_symlink_write_verify( } const struct xfs_buf_ops xfs_symlink_buf_ops = { + .name = "xfs_symlink", .verify_read = xfs_symlink_read_verify, .verify_write = xfs_symlink_write_verify, }; diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index c79b717d9b88..c75721acd867 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -132,6 +132,7 @@ struct xfs_buf_map { struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) }; struct xfs_buf_ops { + char *name; void (*verify_read)(struct xfs_buf *); void (*verify_write)(struct xfs_buf *); }; diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c index 74d0e5966ebc..88693a98fac5 100644 --- a/fs/xfs/xfs_error.c +++ b/fs/xfs/xfs_error.c @@ -164,9 +164,9 @@ xfs_verifier_error( { struct xfs_mount *mp = bp->b_target->bt_mount; - xfs_alert(mp, "Metadata %s detected at %pF, block 0x%llx", + xfs_alert(mp, "Metadata %s detected at %pF, %s block 0x%llx", bp->b_error == -EFSBADCRC ? "CRC error" : "corruption", - __return_address, bp->b_bn); + __return_address, bp->b_ops->name, bp->b_bn); xfs_alert(mp, "Unmount and run xfs_repair"); diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index ee3aaa0a5317..ca0d3eb44925 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -243,8 +243,8 @@ xfs_growfs_data_private( agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp)); agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1); agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1); - agf->agf_flfirst = 0; - agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1); + agf->agf_flfirst = cpu_to_be32(1); + agf->agf_fllast = 0; agf->agf_flcount = 0; tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp); agf->agf_freeblks = cpu_to_be32(tmpsize); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 8ee393996b7d..f0ce28cd311d 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -3220,13 +3220,14 @@ xfs_iflush_cluster( * We need to check under the i_flags_lock for a valid inode * here. Skip it if it is not valid or the wrong inode. */ - spin_lock(&ip->i_flags_lock); - if (!ip->i_ino || + spin_lock(&iq->i_flags_lock); + if (!iq->i_ino || + __xfs_iflags_test(iq, XFS_ISTALE) || (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) { - spin_unlock(&ip->i_flags_lock); + spin_unlock(&iq->i_flags_lock); continue; } - spin_unlock(&ip->i_flags_lock); + spin_unlock(&iq->i_flags_lock); /* * Do an un-protected check to see if the inode is dirty and @@ -3342,7 +3343,7 @@ xfs_iflush( struct xfs_buf **bpp) { struct xfs_mount *mp = ip->i_mount; - struct xfs_buf *bp; + struct xfs_buf *bp = NULL; struct xfs_dinode *dip; int error; @@ -3384,14 +3385,22 @@ xfs_iflush( } /* - * Get the buffer containing the on-disk inode. + * Get the buffer containing the on-disk inode. We are doing a try-lock + * operation here, so we may get an EAGAIN error. In that case, we + * simply want to return with the inode still dirty. + * + * If we get any other error, we effectively have a corruption situation + * and we cannot flush the inode, so we treat it the same as failing + * xfs_iflush_int(). */ error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK, 0); - if (error || !bp) { + if (error == -EAGAIN) { xfs_ifunlock(ip); return error; } + if (error) + goto corrupt_out; /* * First flush out the inode that xfs_iflush was called with. @@ -3419,7 +3428,8 @@ xfs_iflush( return 0; corrupt_out: - xfs_buf_relse(bp); + if (bp) + xfs_buf_relse(bp); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); cluster_corrupt_out: error = -EFSCORRUPTED; diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index c5ecaacdd218..5991cdcb9040 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3204,6 +3204,7 @@ xlog_recover_dquot_ra_pass2( struct xfs_disk_dquot *recddq; struct xfs_dq_logformat *dq_f; uint type; + int len; if (mp->m_qflags == 0) @@ -3224,8 +3225,12 @@ xlog_recover_dquot_ra_pass2( ASSERT(dq_f); ASSERT(dq_f->qlf_len == 1); - xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, - XFS_FSB_TO_BB(mp, dq_f->qlf_len), NULL); + len = XFS_FSB_TO_BB(mp, dq_f->qlf_len); + if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0)) + return; + + xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len, + &xfs_dquot_buf_ra_ops); } STATIC void diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 36bd8825bfb0..ef64a1e1a66a 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1233,6 +1233,16 @@ xfs_fs_remount( return -EINVAL; } + if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 && + xfs_sb_has_ro_compat_feature(sbp, + XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { + xfs_warn(mp, +"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem", + (sbp->sb_features_ro_compat & + XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); + return -EINVAL; + } + mp->m_flags &= ~XFS_MOUNT_RDONLY; /* diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index e2aadbc7151f..1885fc44b1bc 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -21,14 +21,33 @@ #include <asm-generic/qspinlock_types.h> /** + * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock + * @lock : Pointer to queued spinlock structure + * + * There is a very slight possibility of live-lock if the lockers keep coming + * and the waiter is just unfortunate enough to not see any unlock state. + */ +#ifndef queued_spin_unlock_wait +extern void queued_spin_unlock_wait(struct qspinlock *lock); +#endif + +/** * queued_spin_is_locked - is the spinlock locked? * @lock: Pointer to queued spinlock structure * Return: 1 if it is locked, 0 otherwise */ +#ifndef queued_spin_is_locked static __always_inline int queued_spin_is_locked(struct qspinlock *lock) { + /* + * See queued_spin_unlock_wait(). + * + * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL + * isn't immediately observable. + */ return atomic_read(&lock->val); } +#endif /** * queued_spin_value_unlocked - is the spinlock structure unlocked? @@ -98,19 +117,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock) } #endif -/** - * queued_spin_unlock_wait - wait until current lock holder releases the lock - * @lock : Pointer to queued spinlock structure - * - * There is a very slight possibility of live-lock if the lockers keep coming - * and the waiter is just unfortunate enough to not see any unlock state. - */ -static inline void queued_spin_unlock_wait(struct qspinlock *lock) -{ - while (atomic_read(&lock->val) & _Q_LOCKED_MASK) - cpu_relax(); -} - #ifndef virt_spin_lock static __always_inline bool virt_spin_lock(struct qspinlock *lock) { diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h index 3d1a3af5cf59..a2508a8f9a9c 100644 --- a/include/asm-generic/siginfo.h +++ b/include/asm-generic/siginfo.h @@ -17,21 +17,6 @@ struct siginfo; void do_schedule_next_timer(struct siginfo *info); -#ifndef HAVE_ARCH_COPY_SIGINFO - -#include <linux/string.h> - -static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) -{ - if (from->si_code < 0) - memcpy(to, from, sizeof(*to)); - else - /* _sigchld is currently the largest know union member */ - memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); -} - -#endif - extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from); #endif diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index c768ddfbe53c..b7bfa513e6ed 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -316,6 +316,20 @@ ttm_bo_reference(struct ttm_buffer_object *bo) */ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, bool interruptible, bool no_wait); + +/** + * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo + * + * @placement: Return immediately if buffer is busy. + * @mem: The struct ttm_mem_reg indicating the region where the bo resides + * @new_flags: Describes compatible placement found + * + * Returns true if the placement is compatible + */ +extern bool ttm_bo_mem_compat(struct ttm_placement *placement, + struct ttm_mem_reg *mem, + uint32_t *new_flags); + /** * ttm_bo_validate * diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 9006c4e75cf7..3d8dcdd1aeae 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -163,4 +163,13 @@ struct amba_device name##_device = { \ #define module_amba_driver(__amba_drv) \ module_driver(__amba_drv, amba_driver_register, amba_driver_unregister) +/* + * builtin_amba_driver() - Helper macro for drivers that don't do anything + * special in driver initcall. This eliminates a lot of boilerplate. Each + * driver may only use this macro once, and calling it replaces the instance + * device_initcall(). + */ +#define builtin_amba_driver(__amba_drv) \ + builtin_driver(__amba_drv, amba_driver_register) + #endif diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h index 8bfd21c13d59..eff56cb0016a 100644 --- a/include/linux/amba/mmci.h +++ b/include/linux/amba/mmci.h @@ -40,10 +40,7 @@ struct mmci_platform_data { int gpio_wp; int gpio_cd; bool cd_invert; - unsigned int status_irq; struct embedded_sdio_data *embedded_sdio; - int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id); - }; #endif diff --git a/include/linux/bluetooth-power.h b/include/linux/bluetooth-power.h index 7be94d298b88..a822ba8c07d1 100644 --- a/include/linux/bluetooth-power.h +++ b/include/linux/bluetooth-power.h @@ -85,4 +85,5 @@ struct bluetooth_power_platform_data { int bt_register_slimdev(struct device *dev); #define BT_CMD_SLIM_TEST 0xbfac +#define BT_CMD_PWR_CTRL 0xbfad #endif /* __LINUX_BLUETOOTH_POWER_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 67bc2da5d233..4f6d29c8e3d8 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -198,6 +198,10 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd) static inline void bpf_prog_put(struct bpf_prog *prog) { } + +static inline void bpf_prog_put_rcu(struct bpf_prog *prog) +{ +} #endif /* CONFIG_BPF_SYSCALL */ /* verifier prototypes for helper functions called from eBPF programs */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 735f9f8c4e43..5261751f6bd4 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -40,8 +40,11 @@ struct can_priv { struct can_clock clock; enum can_state state; - u32 ctrlmode; - u32 ctrlmode_supported; + + /* CAN controller features - see include/uapi/linux/can/netlink.h */ + u32 ctrlmode; /* current options setting */ + u32 ctrlmode_supported; /* options that can be modified by netlink */ + u32 ctrlmode_static; /* static enabled options for driver/hardware */ int restart_ms; struct timer_list restart_timer; @@ -108,6 +111,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb) return skb->len == CANFD_MTU; } +/* helper to define static CAN controller features at device creation time */ +static inline void can_set_static_ctrlmode(struct net_device *dev, + u32 static_mode) +{ + struct can_priv *priv = netdev_priv(dev); + + /* alloc_candev() succeeded => netdev_priv() is valid at this point */ + priv->ctrlmode = static_mode; + priv->ctrlmode_static = static_mode; + + /* override MTU which was set by default in can_setup()? */ + if (static_mode & CAN_CTRLMODE_FD) + dev->mtu = CANFD_MTU; +} + /* get data length from can_dlc with sanitized can_dlc */ u8 can_dlc2len(u8 can_dlc); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 9a4a80ae5eaf..2a5acbdc6327 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -13,6 +13,7 @@ #include <linux/io.h> #include <linux/of.h> +#include <linux/mutex.h> #ifdef CONFIG_COMMON_CLK @@ -227,6 +228,9 @@ struct clk_ops { * @parent_names: array of string names for all possible parents * @num_parents: number of possible parents * @flags: framework-level hints and quirks + * @vdd_class: voltage scaling requirement class + * @rate_max: maximum clock rate in Hz supported at each voltage level + * @num_rate_max: number of maximum voltage level supported */ struct clk_init_data { const char *name; @@ -234,8 +238,69 @@ struct clk_init_data { const char * const *parent_names; u8 num_parents; unsigned long flags; + struct clk_vdd_class *vdd_class; + unsigned long *rate_max; + int num_rate_max; }; +struct regulator; + +/** + * struct clk_vdd_class - Voltage scaling class + * @class_name: name of the class + * @regulator: array of regulators + * @num_regulators: size of regulator array. Standard regulator APIs will be + used if this field > 0 + * @set_vdd: function to call when applying a new voltage setting + * @vdd_uv: sorted 2D array of legal voltage settings. Indexed by level, then + regulator + * @level_votes: array of votes for each level + * @num_levels: specifies the size of level_votes array + * @cur_level: the currently set voltage level + * @lock: lock to protect this struct + */ +struct clk_vdd_class { + const char *class_name; + struct regulator **regulator; + int num_regulators; + int (*set_vdd)(struct clk_vdd_class *v_class, int level); + int *vdd_uv; + int *level_votes; + int num_levels; + unsigned long cur_level; + struct mutex lock; +}; + +#define DEFINE_VDD_CLASS(_name, _set_vdd, _num_levels) \ + struct clk_vdd_class _name = { \ + .class_name = #_name, \ + .set_vdd = _set_vdd, \ + .level_votes = (int [_num_levels]) {}, \ + .num_levels = _num_levels, \ + .cur_level = _num_levels, \ + .lock = __MUTEX_INITIALIZER(_name.lock) \ + } + +#define DEFINE_VDD_REGULATORS(_name, _num_levels, _num_regulators, _vdd_uv) \ + struct clk_vdd_class _name = { \ + .class_name = #_name, \ + .vdd_uv = _vdd_uv, \ + .regulator = (struct regulator * [_num_regulators]) {}, \ + .num_regulators = _num_regulators, \ + .level_votes = (int [_num_levels]) {}, \ + .num_levels = _num_levels, \ + .cur_level = _num_levels, \ + .lock = __MUTEX_INITIALIZER(_name.lock) \ + } + +#define DEFINE_VDD_REGS_INIT(_name, _num_regulators) \ + struct clk_vdd_class _name = { \ + .class_name = #_name, \ + .regulator = (struct regulator * [_num_regulators]) {}, \ + .num_regulators = _num_regulators, \ + .lock = __MUTEX_INITIALIZER(_name.lock) \ + } + /** * struct clk_hw - handle for traversing from a struct clk to its corresponding * hardware-specific structure. struct clk_hw should be declared within struct diff --git a/include/linux/clk/msm-clk.h b/include/linux/clk/msm-clk.h index 22587e8852e2..964909d25021 100644 --- a/include/linux/clk/msm-clk.h +++ b/include/linux/clk/msm-clk.h @@ -14,6 +14,16 @@ #include <linux/notifier.h> +#if defined(CONFIG_COMMON_CLK_QCOM) +enum branch_mem_flags { + CLKFLAG_RETAIN_PERIPH, + CLKFLAG_NORETAIN_PERIPH, + CLKFLAG_RETAIN_MEM, + CLKFLAG_NORETAIN_MEM, + CLKFLAG_PERIPH_OFF_SET, + CLKFLAG_PERIPH_OFF_CLEAR, +}; +#elif defined(CONFIG_COMMON_CLK_MSM) #define CLKFLAG_INVERT 0x00000001 #define CLKFLAG_NOINVERT 0x00000002 #define CLKFLAG_NONEST 0x00000004 @@ -32,6 +42,7 @@ #define CLKFLAG_EPROBE_DEFER 0x00010000 #define CLKFLAG_PERIPH_OFF_SET 0x00020000 #define CLKFLAG_PERIPH_OFF_CLEAR 0x00040000 +#endif struct clk_lookup; struct clk; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index d184e283cf81..702b6c53c12f 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -161,7 +161,7 @@ struct dentry_operations { struct vfsmount *(*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool); struct inode *(*d_select_inode)(struct dentry *, unsigned); - void (*d_canonical_path)(const struct dentry *, struct path *); + void (*d_canonical_path)(const struct path *, struct path *); struct dentry *(*d_real)(struct dentry *, struct inode *); } ____cacheline_aligned; @@ -604,5 +604,17 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry, return inode; } +/** + * d_real_inode - Return the real inode + * @dentry: The dentry to query + * + * If dentry is on an union/overlay, then return the underlying, real inode. + * Otherwise return d_inode(). + */ +static inline struct inode *d_real_inode(struct dentry *dentry) +{ + return d_backing_inode(d_real(dentry)); +} + #endif /* __LINUX_DCACHE_H */ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 010b5ad2ac5d..5c720864db89 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -383,6 +383,12 @@ void dm_set_mdptr(struct mapped_device *md, void *ptr); void *dm_get_mdptr(struct mapped_device *md); /* + * Export the device via the ioctl interface (uses mdptr). + */ +int dm_ioctl_export(struct mapped_device *md, const char *name, + const char *uuid); + +/* * A device can still be used while suspended, but I/O is deferred. */ int dm_suspend(struct mapped_device *md, unsigned suspend_flags); diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 6b7fd9cf5ea2..dd03e837ebb7 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -231,7 +231,7 @@ static inline long freezable_schedule_timeout_killable_unsafe(long timeout) * call this with locks held. */ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires, - unsigned long delta, const enum hrtimer_mode mode) + u64 delta, const enum hrtimer_mode mode) { int __retval; freezer_do_not_count(); diff --git a/include/linux/hdcp_qseecom.h b/include/linux/hdcp_qseecom.h index f66264bc935a..68f2dd993170 100644 --- a/include/linux/hdcp_qseecom.h +++ b/include/linux/hdcp_qseecom.h @@ -26,6 +26,7 @@ enum hdcp_lib_wakeup_cmd { HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED, HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT, HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE, + HDCP_LIB_WKUP_CMD_LINK_FAILED, }; enum hdmi_hdcp_wakeup_cmd { @@ -47,6 +48,7 @@ struct hdcp_lib_wakeup_data { }; struct hdcp_msg_part { + char *name; uint32_t offset; uint32_t length; }; @@ -106,6 +108,8 @@ static inline char *hdcp_lib_cmd_to_str(uint32_t cmd) return "HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT"; case HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE: return "HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE"; + case HDCP_LIB_WKUP_CMD_LINK_FAILED: + return "HDCP_LIB_WKUP_CMD_LINK_FAILED"; default: return "???"; } diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 952adcacc4cf..74921a39edee 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -223,7 +223,7 @@ static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time timer->node.expires = ktime_add_safe(time, delta); } -static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta) +static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta) { timer->_softexpires = time; timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta)); @@ -384,7 +384,7 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } /* Basic timer operations: */ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, - unsigned long range_ns, const enum hrtimer_mode mode); + u64 range_ns, const enum hrtimer_mode mode); /** * hrtimer_start - (re)start an hrtimer on the current CPU @@ -405,7 +405,7 @@ extern int hrtimer_try_to_cancel(struct hrtimer *timer); static inline void hrtimer_start_expires(struct hrtimer *timer, enum hrtimer_mode mode) { - unsigned long delta; + u64 delta; ktime_t soft, hard; soft = hrtimer_get_softexpires(timer); hard = hrtimer_get_expires(timer); @@ -483,10 +483,12 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block); extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *tsk); -extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, +extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, const enum hrtimer_mode mode); extern int schedule_hrtimeout_range_clock(ktime_t *expires, - unsigned long delta, const enum hrtimer_mode mode, int clock); + u64 delta, + const enum hrtimer_mode mode, + int clock); extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); /* Soft interrupt function to run the hrtimer queues: */ diff --git a/include/linux/ipa_usb.h b/include/linux/ipa_usb.h index 0fe0e36c551f..de1163348c05 100644 --- a/include/linux/ipa_usb.h +++ b/include/linux/ipa_usb.h @@ -253,6 +253,7 @@ int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot); * @dl_clnt_hdl: client handle previously obtained from * ipa_usb_xdci_connect() for IN channel * @teth_prot: tethering protocol + * @with_remote_wakeup: Does host support remote wakeup? * * Note: Should not be called from atomic context * Note: for DPL, the ul will be ignored as irrelevant @@ -260,7 +261,8 @@ int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot); * @Return 0 on success, negative on failure */ int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, - enum ipa_usb_teth_prot teth_prot); + enum ipa_usb_teth_prot teth_prot, + bool with_remote_wakeup); /** * ipa_usb_xdci_resume - Peripheral should call this function to resume @@ -313,7 +315,8 @@ static inline int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot) } static inline int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, - enum ipa_usb_teth_prot teth_prot) + enum ipa_usb_teth_prot teth_prot, + bool with_remote_wakeup) { return -EPERM; } diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index d5d798b35c1f..e98425058f20 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -301,7 +301,7 @@ #define ICC_SGI1R_AFFINITY_1_SHIFT 16 #define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) #define ICC_SGI1R_SGI_ID_SHIFT 24 -#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT) +#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) #define ICC_SGI1R_AFFINITY_2_SHIFT 32 #define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 0536524bb9eb..68904469fba1 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -117,13 +117,18 @@ struct module; #include <linux/atomic.h> +#ifdef HAVE_JUMP_LABEL + static inline int static_key_count(struct static_key *key) { - return atomic_read(&key->enabled); + /* + * -1 means the first static_key_slow_inc() is in progress. + * static_key_enabled() must return true, so return 1 here. + */ + int n = atomic_read(&key->enabled); + return n >= 0 ? n : 1; } -#ifdef HAVE_JUMP_LABEL - #define JUMP_TYPE_FALSE 0UL #define JUMP_TYPE_TRUE 1UL #define JUMP_TYPE_MASK 1UL @@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod); #else /* !HAVE_JUMP_LABEL */ +static inline int static_key_count(struct static_key *key) +{ + return atomic_read(&key->enabled); +} + static __always_inline void jump_label_init(void) { static_key_initialized = true; diff --git a/include/linux/mm.h b/include/linux/mm.h index 57a44fa9ab89..6c1ea7f327c4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1709,7 +1709,7 @@ extern void free_highmem_page(struct page *page); extern void adjust_managed_page_count(struct page *page, long count); extern void mem_init_print_info(const char *str); -extern void reserve_bootmem_region(unsigned long start, unsigned long end); +extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); /* Free the reserved page into the buddy system, so it gets managed. */ static inline void __free_reserved_page(struct page *page) diff --git a/include/linux/net.h b/include/linux/net.h index 25ef630f1bd6..c00b8d182226 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -251,7 +251,8 @@ do { \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ net_ratelimit()) \ - __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \ + __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ + ##__VA_ARGS__); \ } while (0) #elif defined(DEBUG) #define net_dbg_ratelimited(fmt, ...) \ diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index c5577410c25d..04078e8a4803 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -239,11 +239,18 @@ void xt_unregister_match(struct xt_match *target); int xt_register_matches(struct xt_match *match, unsigned int n); void xt_unregister_matches(struct xt_match *match, unsigned int n); +int xt_check_entry_offsets(const void *base, const char *elems, + unsigned int target_offset, + unsigned int next_offset); + int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); +void *xt_copy_counters_from_user(const void __user *user, unsigned int len, + struct xt_counters_info *info, bool compat); + struct xt_table *xt_register_table(struct net *net, const struct xt_table *table, struct xt_table_info *bootstrap, @@ -478,7 +485,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number); int xt_compat_calc_jump(u_int8_t af, unsigned int offset); int xt_compat_match_offset(const struct xt_match *match); -int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, +void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, unsigned int *size); int xt_compat_match_to_user(const struct xt_entry_match *m, void __user **dstptr, unsigned int *size); @@ -488,6 +495,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, unsigned int *size); int xt_compat_target_to_user(const struct xt_entry_target *t, void __user **dstptr, unsigned int *size); +int xt_compat_check_entry_offsets(const void *base, const char *elems, + unsigned int target_offset, + unsigned int next_offset); #endif /* CONFIG_COMPAT */ #endif /* _X_TABLES_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 4f28b91f49c5..12d3415a3ef5 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -380,7 +380,7 @@ struct pmu { /* * Set up pmu-private data structures for an AUX area */ - void *(*setup_aux) (int cpu, void **pages, + void *(*setup_aux) (struct perf_event *event, void **pages, int nr_pages, bool overwrite); /* optional */ @@ -393,6 +393,14 @@ struct pmu { * Filter events for PMU-specific reasons. */ int (*filter_match) (struct perf_event *event); /* optional */ + + /* + * Initial, PMU driver specific configuration. + */ + int (*get_drv_configs) (struct perf_event *event, + void __user *arg); /* optional */ + void (*free_drv_configs) (struct perf_event *event); + /* optional */ }; /** @@ -560,6 +568,7 @@ struct perf_event { struct irq_work pending; atomic_t event_limit; + struct list_head drv_configs; void (*destroy)(struct perf_event *); struct rcu_head rcu_head; diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index eb8b8ac6df3c..24f5470d3944 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -42,6 +42,7 @@ struct pipe_buffer { * @fasync_readers: reader side fasync * @fasync_writers: writer side fasync * @bufs: the circular array of pipe buffers + * @user: the user who created this pipe **/ struct pipe_inode_info { struct mutex mutex; @@ -57,6 +58,7 @@ struct pipe_inode_info { struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; + struct user_struct *user; }; /* @@ -123,6 +125,8 @@ void pipe_unlock(struct pipe_inode_info *); void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); extern unsigned int pipe_max_size, pipe_min_size; +extern unsigned long pipe_user_pages_hard; +extern unsigned long pipe_user_pages_soft; int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *); diff --git a/include/linux/pm.h b/include/linux/pm.h index 528be6787796..6a5d654f4447 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -573,6 +573,7 @@ struct dev_pm_info { struct wakeup_source *wakeup; bool wakeup_path:1; bool syscore:1; + bool no_pm_callbacks:1; /* Owned by the PM core */ #else unsigned int should_wakeup:1; #endif diff --git a/include/linux/poll.h b/include/linux/poll.h index c08386fb3e08..9fb4f40d9a26 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -96,7 +96,7 @@ extern void poll_initwait(struct poll_wqueues *pwq); extern void poll_freewait(struct poll_wqueues *pwq); extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, ktime_t *expires, unsigned long slack); -extern long select_estimate_accuracy(struct timespec *tv); +extern u64 select_estimate_accuracy(struct timespec *tv); static inline int poll_schedule(struct poll_wqueues *pwq, int state) diff --git a/include/linux/sched.h b/include/linux/sched.h index 06acefeffd4c..b1351226b102 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -900,6 +900,7 @@ struct user_struct { #endif unsigned long locked_shm; /* How many pages of mlocked shm ? */ unsigned long unix_inflight; /* How many files in flight in unix sockets */ + atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ #ifdef CONFIG_KEYS struct key *uid_keyring; /* UID specific keyring */ @@ -1900,8 +1901,8 @@ struct task_struct { * time slack values; these are used to round up poll() and * select() etc timeout values. These are in nanoseconds. */ - unsigned long timer_slack_ns; - unsigned long default_timer_slack_ns; + u64 timer_slack_ns; + u64 default_timer_slack_ns; #ifdef CONFIG_KASAN unsigned int kasan_depth; diff --git a/include/linux/signal.h b/include/linux/signal.h index 92557bbce7e7..d80259afb9e5 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -28,6 +28,21 @@ struct sigpending { sigset_t signal; }; +#ifndef HAVE_ARCH_COPY_SIGINFO + +#include <linux/string.h> + +static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) +{ + if (from->si_code < 0) + memcpy(to, from, sizeof(*to)); + else + /* _sigchld is currently the largest know union member */ + memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); +} + +#endif + /* * Define some primitives to manipulate sigset_t. */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 4fde61804191..d443d9ab0236 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -982,6 +982,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) } void __skb_get_hash(struct sk_buff *skb); +u32 __skb_get_hash_symmetric(struct sk_buff *skb); u32 skb_get_poff(const struct sk_buff *skb); u32 __skb_get_poff(const struct sk_buff *skb, void *data, const struct flow_keys *keys, int hlen); @@ -2564,6 +2565,13 @@ static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len skb_headroom(skb) + len <= skb->hdr_len; } +static inline int skb_try_make_writable(struct sk_buff *skb, + unsigned int write_len) +{ + return skb_cloned(skb) && !skb_clone_writable(skb, write_len) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC); +} + static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, int cloned) { @@ -2766,6 +2774,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb, } /** + * skb_push_rcsum - push skb and update receive checksum + * @skb: buffer to update + * @len: length of data pulled + * + * This function performs an skb_push on the packet and updates + * the CHECKSUM_COMPLETE checksum. It should be used on + * receive path processing instead of skb_push unless you know + * that the checksum difference is zero (e.g., a valid IP header) + * or you are setting ip_summed to CHECKSUM_NONE. + */ +static inline unsigned char *skb_push_rcsum(struct sk_buff *skb, + unsigned int len) +{ + skb_push(skb, len); + skb_postpush_rcsum(skb, skb->data, len); + return skb->data; +} + +/** * pskb_trim_rcsum - trim received skb and update checksum * @skb: buffer to trim * @len: new length diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h index 4018b48f2b3b..a0596ca0e80a 100644 --- a/include/linux/sock_diag.h +++ b/include/linux/sock_diag.h @@ -36,6 +36,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk) { switch (sk->sk_family) { case AF_INET: + if (sk->sk_type == SOCK_RAW) + return SKNLGRP_NONE; + switch (sk->sk_protocol) { case IPPROTO_TCP: return SKNLGRP_INET_TCP_DESTROY; @@ -45,6 +48,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk) return SKNLGRP_NONE; } case AF_INET6: + if (sk->sk_type == SOCK_RAW) + return SKNLGRP_NONE; + switch (sk->sk_protocol) { case IPPROTO_TCP: return SKNLGRP_INET6_TCP_DESTROY; diff --git a/include/linux/stm.h b/include/linux/stm.h index 9d0083d364e6..8369d8a8cabd 100644 --- a/include/linux/stm.h +++ b/include/linux/stm.h @@ -50,6 +50,8 @@ struct stm_device; * @sw_end: last STP master available to software * @sw_nchannels: number of STP channels per master * @sw_mmiosz: size of one channel's IO space, for mmap, optional + * @hw_override: masters in the STP stream will not match the ones + * assigned by software, but are up to the STM hardware * @packet: callback that sends an STP packet * @mmio_addr: mmap callback, optional * @link: called when a new stm_source gets linked to us, optional @@ -67,6 +69,16 @@ struct stm_device; * description. That is, the lowest master that can be allocated to software * writers is @sw_start and data from this writer will appear is @sw_start * master in the STP stream. + * + * The @packet callback should adhere to the following rules: + * 1) it must return the number of bytes it consumed from the payload; + * 2) therefore, if it sent a packet that does not have payload (like FLAG), + * it must return zero; + * 3) if it does not support the requested packet type/flag combination, + * it must return -ENOTSUPP. + * + * The @unlink callback is called when there are no more active writers so + * that the master/channel can be quiesced. */ struct stm_data { const char *name; @@ -75,6 +87,7 @@ struct stm_data { unsigned int sw_end; unsigned int sw_nchannels; unsigned int sw_mmiosz; + unsigned int hw_override; ssize_t (*packet)(struct stm_data *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 131032f15cc1..9b6027c51736 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -135,8 +135,6 @@ struct rpc_create_args { #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9) struct rpc_clnt *rpc_create(struct rpc_create_args *args); -struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, - struct rpc_xprt *xprt); struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, const struct rpc_program *, u32); void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt); diff --git a/include/linux/tty.h b/include/linux/tty.h index 3bf03b6b52e9..83b264c52898 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -338,7 +338,6 @@ struct tty_file_private { #define TTY_EXCLUSIVE 3 /* Exclusive open mode */ #define TTY_DEBUG 4 /* Debugging */ #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ -#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */ #define TTY_LDISC_OPEN 11 /* Line discipline is open */ #define TTY_PTY_LOCK 16 /* pty private */ #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ @@ -469,6 +468,7 @@ extern void tty_buffer_init(struct tty_port *port); extern void tty_buffer_set_lock_subclass(struct tty_port *port); extern bool tty_buffer_restart_work(struct tty_port *port); extern bool tty_buffer_cancel_work(struct tty_port *port); +extern void tty_buffer_flush_work(struct tty_port *port); extern speed_t tty_termios_baud_rate(struct ktermios *termios); extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); extern void tty_termios_encode_baud_rate(struct ktermios *termios, diff --git a/include/linux/usb.h b/include/linux/usb.h index 55240f9a3b94..440248ba4123 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1079,7 +1079,7 @@ struct usbdrv_wrap { * for interfaces bound to this driver. * @soft_unbind: if set to 1, the USB core will not kill URBs and disable * endpoints before calling the driver's disconnect method. - * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs + * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs * to initiate lower power link state transitions when an idle timeout * occurs. Device-initiated USB 3.0 link PM will still be allowed. * diff --git a/include/linux/usb/class-dual-role.h b/include/linux/usb/class-dual-role.h index af42ed34944a..c6df2238012e 100644 --- a/include/linux/usb/class-dual-role.h +++ b/include/linux/usb/class-dual-role.h @@ -109,18 +109,19 @@ extern int dual_role_property_is_writeable(struct dual_role_phy_instance enum dual_role_property prop); extern void *dual_role_get_drvdata(struct dual_role_phy_instance *dual_role); #else /* CONFIG_DUAL_ROLE_USB_INTF */ -static void dual_role_instance_changed(struct dual_role_phy_instance +static inline void dual_role_instance_changed(struct dual_role_phy_instance *dual_role){} -static struct dual_role_phy_instance *__must_check +static inline struct dual_role_phy_instance *__must_check devm_dual_role_instance_register(struct device *parent, const struct dual_role_phy_desc *desc) { return ERR_PTR(-ENOSYS); } -static void devm_dual_role_instance_unregister(struct device *dev, +static inline void devm_dual_role_instance_unregister(struct device *dev, struct dual_role_phy_instance *dual_role){} -static void *dual_role_get_drvdata(struct dual_role_phy_instance *dual_role) +static inline void *dual_role_get_drvdata(struct dual_role_phy_instance + *dual_role) { return ERR_PTR(-ENOSYS); } diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index 966889a20ea3..e479033bd782 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -180,11 +180,11 @@ struct ehci_regs { * PORTSCx */ /* HOSTPC: offset 0x84 */ - u32 hostpc[1]; /* HOSTPC extension */ + u32 hostpc[0]; /* HOSTPC extension */ #define HOSTPC_PHCD (1<<22) /* Phy clock disable */ #define HOSTPC_PSPD (3<<25) /* Port speed detection */ - u32 reserved5[16]; + u32 reserved5[17]; /* USBMODE_EX: offset 0xc8 */ u32 usbmode_ex; /* USB Device mode extension */ diff --git a/include/linux/usb/usbpd.h b/include/linux/usb/usbpd.h index c2c1025feb8e..3566a7a974d1 100644 --- a/include/linux/usb/usbpd.h +++ b/include/linux/usb/usbpd.h @@ -42,6 +42,7 @@ enum usbpd_svdm_cmd_type { struct usbpd_svid_handler { u16 svid; + /* Notified when VDM session established/reset; must be implemented */ void (*connect)(struct usbpd_svid_handler *hdlr); void (*disconnect)(struct usbpd_svid_handler *hdlr); @@ -54,7 +55,9 @@ struct usbpd_svid_handler { enum usbpd_svdm_cmd_type cmd_type, const u32 *vdos, int num_vdos); + /* client should leave these blank; private members used by PD driver */ struct list_head entry; + bool discovered; }; enum plug_orientation { diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 9076fd9f92b2..b89c9c2f7f6e 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -784,6 +784,34 @@ struct cfg80211_csa_settings { }; /** + * struct iface_combination_params - input parameters for interface combinations + * + * Used to pass interface combination parameters + * + * @num_different_channels: the number of different channels we want + * to use for verification + * @radar_detect: a bitmap where each bit corresponds to a channel + * width where radar detection is needed, as in the definition of + * &struct ieee80211_iface_combination.@radar_detect_widths + * @iftype_num: array with the number of interfaces of each interface + * type. The index is the interface type as specified in &enum + * nl80211_iftype. + * @beacon_int_gcd: a value specifying GCD of all beaconing interfaces, + * the GCD of a single value is considered the value itself, so for + * a single interface this should be set to that interface's beacon + * interval + * @beacon_int_different: a flag indicating whether or not all beacon + * intervals (of beaconing interfaces) are different or not. + */ +struct iface_combination_params { + int num_different_channels; + u8 radar_detect; + int iftype_num[NUM_NL80211_IFTYPES]; + u32 beacon_int_gcd; + bool beacon_int_different; +}; + +/** * enum station_parameters_apply_mask - station parameter values to apply * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp) * @STATION_PARAM_APPLY_CAPABILITY: apply new capability @@ -2874,6 +2902,12 @@ struct ieee80211_iface_limit { * only in special cases. * @radar_detect_widths: bitmap of channel widths supported for radar detection * @radar_detect_regions: bitmap of regions supported for radar detection + * @beacon_int_min_gcd: This interface combination supports different + * beacon intervals. + * = 0 - all beacon intervals for different interface must be same. + * > 0 - any beacon interval for the interface part of this combination AND + * *GCD* of all beacon intervals from beaconing interfaces of this + * combination must be greater or equal to this value. * * With this structure the driver can describe which interface * combinations it supports concurrently. @@ -2932,6 +2966,7 @@ struct ieee80211_iface_combination { bool beacon_int_infra_match; u8 radar_detect_widths; u8 radar_detect_regions; + u32 beacon_int_min_gcd; }; struct ieee80211_txrx_stypes { @@ -5333,36 +5368,20 @@ unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy); * cfg80211_check_combinations - check interface combinations * * @wiphy: the wiphy - * @num_different_channels: the number of different channels we want - * to use for verification - * @radar_detect: a bitmap where each bit corresponds to a channel - * width where radar detection is needed, as in the definition of - * &struct ieee80211_iface_combination.@radar_detect_widths - * @iftype_num: array with the numbers of interfaces of each interface - * type. The index is the interface type as specified in &enum - * nl80211_iftype. - * + * @params: the interface combinations parameter +* * This function can be called by the driver to check whether a * combination of interfaces and their types are allowed according to * the interface combinations. */ int cfg80211_check_combinations(struct wiphy *wiphy, - const int num_different_channels, - const u8 radar_detect, - const int iftype_num[NUM_NL80211_IFTYPES]); + struct iface_combination_params *params); /** * cfg80211_iter_combinations - iterate over matching combinations * * @wiphy: the wiphy - * @num_different_channels: the number of different channels we want - * to use for verification - * @radar_detect: a bitmap where each bit corresponds to a channel - * width where radar detection is needed, as in the definition of - * &struct ieee80211_iface_combination.@radar_detect_widths - * @iftype_num: array with the numbers of interfaces of each interface - * type. The index is the interface type as specified in &enum - * nl80211_iftype. + * @params: the interface combinations parameter * @iter: function to call for each matching combination * @data: pointer to pass to iter function * @@ -5371,9 +5390,7 @@ int cfg80211_check_combinations(struct wiphy *wiphy, * purposes. */ int cfg80211_iter_combinations(struct wiphy *wiphy, - const int num_different_channels, - const u8 radar_detect, - const int iftype_num[NUM_NL80211_IFTYPES], + struct iface_combination_params *params, void (*iter)(const struct ieee80211_iface_combination *c, void *data), void *data); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 62a750a6a8f8..af40bc586a1b 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -230,6 +230,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, u8 *protocol, struct flowi4 *fl4); +int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 1d22ce9f352e..31d0e5143848 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -88,7 +88,7 @@ struct switchdev_obj_ipv4_fib { struct switchdev_obj obj; u32 dst; int dst_len; - struct fib_info fi; + struct fib_info *fi; u8 tos; u8 type; u32 nlflags; diff --git a/include/net/tcp.h b/include/net/tcp.h index 15ee95fcd561..5ed36a59b08e 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1695,8 +1695,6 @@ static inline bool tcp_stream_memory_free(const struct sock *sk) return notsent_bytes < tcp_notsent_lowat(tp); } -extern int tcp_nuke_addr(struct net *net, struct sockaddr *addr); - #ifdef CONFIG_PROC_FS int tcp4_proc_init(void); void tcp4_proc_exit(void); diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index f9f445be1a50..c37b22101473 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -243,6 +243,7 @@ scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...); enum scsi_target_state { STARGET_CREATED = 1, STARGET_RUNNING, + STARGET_REMOVE, STARGET_DEL, }; diff --git a/include/trace/events/power.h b/include/trace/events/power.h index e8bc27f3467e..bc33e91ec5e6 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -369,15 +369,17 @@ TRACE_EVENT(clock_set_parent, TRACE_EVENT(clock_state, TP_PROTO(const char *name, unsigned long prepare_count, - unsigned long count, unsigned long rate), + unsigned long count, unsigned long rate, + unsigned int vdd_level), - TP_ARGS(name, prepare_count, count, rate), + TP_ARGS(name, prepare_count, count, rate, vdd_level), TP_STRUCT__entry( __string(name, name) __field(unsigned long, prepare_count) __field(unsigned long, count) __field(unsigned long, rate) + __field(unsigned int, vdd_level) ), TP_fast_assign( @@ -385,10 +387,12 @@ TRACE_EVENT(clock_state, __entry->prepare_count = prepare_count; __entry->count = count; __entry->rate = rate; + __entry->vdd_level = vdd_level; ), - TP_printk("%s\t[%lu:%lu]\t%lu", __get_str(name), __entry->prepare_count, - __entry->count, __entry->rate) + TP_printk("%s\tprepare:enable cnt [%lu:%lu]\trate: vdd level [%lu:%u]", + __get_str(name), __entry->prepare_count, + __entry->count, __entry->rate, __entry->vdd_level) ); /* diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index a08933a58079..8a702e7b3f71 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -359,6 +359,7 @@ enum fuse_opcode { FUSE_FALLOCATE = 43, FUSE_READDIRPLUS = 44, FUSE_RENAME2 = 45, + FUSE_CANONICAL_PATH= 2016, /* CUSE specific operations */ CUSE_INIT = 4096, diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h index d5e38c73377c..e4f048ee7043 100644 --- a/include/uapi/linux/libc-compat.h +++ b/include/uapi/linux/libc-compat.h @@ -52,7 +52,7 @@ #if defined(__GLIBC__) /* Coordinate with glibc net/if.h header. */ -#if defined(_NET_IF_H) +#if defined(_NET_IF_H) && defined(__USE_MISC) /* GLIBC headers included first so don't define anything * that would already be defined. */ diff --git a/include/uapi/linux/msm_mdp.h b/include/uapi/linux/msm_mdp.h index 1df65c7f90b3..f0ac02e9c7a8 100644 --- a/include/uapi/linux/msm_mdp.h +++ b/include/uapi/linux/msm_mdp.h @@ -1406,12 +1406,21 @@ enum { MDP_WRITEBACK_MIRROR_RESUME, }; +/* + * The enum values are continued below as preprocessor macro definitions + */ enum mdp_color_space { MDP_CSC_ITU_R_601, MDP_CSC_ITU_R_601_FR, MDP_CSC_ITU_R_709, }; +/* + * These definitions are a continuation of the mdp_color_space enum above + */ +#define MDP_CSC_ITU_R_2020 (MDP_CSC_ITU_R_709 + 1) +#define MDP_CSC_ITU_R_2020_FR (MDP_CSC_ITU_R_2020 + 1) + enum { mdp_igc_v1_7 = 1, mdp_igc_vmax, diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 5305e8f4fce1..696a4322844a 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4101,6 +4101,9 @@ enum nl80211_iface_limit_attrs { * of supported channel widths for radar detection. * @NL80211_IFACE_COMB_RADAR_DETECT_REGIONS: u32 attribute containing the bitmap * of supported regulatory regions for radar detection. + * @NL80211_IFACE_COMB_BI_MIN_GCD: u32 attribute specifying the minimum GCD of + * different beacon intervals supported by all the interface combinations + * in this group (if not present, all beacon intervals be identical). * @NUM_NL80211_IFACE_COMB: number of attributes * @MAX_NL80211_IFACE_COMB: highest attribute number * @@ -4108,8 +4111,8 @@ enum nl80211_iface_limit_attrs { * limits = [ #{STA} <= 1, #{AP} <= 1 ], matching BI, channels = 1, max = 2 * => allows an AP and a STA that must match BIs * - * numbers = [ #{AP, P2P-GO} <= 8 ], channels = 1, max = 8 - * => allows 8 of AP/GO + * numbers = [ #{AP, P2P-GO} <= 8 ], BI min gcd, channels = 1, max = 8, + * => allows 8 of AP/GO that can have BI gcd >= min gcd * * numbers = [ #{STA} <= 2 ], channels = 2, max = 2 * => allows two STAs on different channels @@ -4135,6 +4138,7 @@ enum nl80211_if_combination_attrs { NL80211_IFACE_COMB_NUM_CHANNELS, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS, + NL80211_IFACE_COMB_BI_MIN_GCD, /* keep last */ NUM_NL80211_IFACE_COMB, diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 7ac89001d223..686da166aeec 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -397,6 +397,7 @@ struct perf_event_attr { #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) #define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *) #define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32) +#define PERF_EVENT_IOC_SET_DRV_CONFIGS _IOW('$', 10, char *) enum perf_event_ioc_flags { PERF_IOC_FLAG_GROUP = 1U << 0, diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 4a3b35f10257..7be4c28cc1ca 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -2161,6 +2161,7 @@ struct v4l2_streamparm { #define V4L2_EVENT_BITDEPTH_FLAG 0x1 #define V4L2_EVENT_PICSTRUCT_FLAG 0x2 +#define V4L2_EVENT_COLOUR_SPACE_FLAG 0x4 #define V4L2_EVENT_MSM_VIDC_START (V4L2_EVENT_PRIVATE_START + 0x00001000) #define V4L2_EVENT_MSM_VIDC_FLUSH_DONE (V4L2_EVENT_MSM_VIDC_START + 1) diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index eeba75395f7d..c8529ce28a3f 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -16,6 +16,7 @@ #include <linux/videodev2.h> #include <linux/bitmap.h> #include <linux/fb.h> +#include <linux/of.h> #include <media/v4l2-mediabus.h> #include <video/videomode.h> @@ -344,6 +345,7 @@ struct ipu_client_platformdata { int dc; int dp; int dma[2]; + struct device_node *of_node; }; #endif /* __DRM_IPU_H__ */ diff --git a/init/Makefile b/init/Makefile index 692b91f1c1d4..243f61de2cba 100644 --- a/init/Makefile +++ b/init/Makefile @@ -15,6 +15,7 @@ mounts-y := do_mounts.o mounts-$(CONFIG_BLK_DEV_RAM) += do_mounts_rd.o mounts-$(CONFIG_BLK_DEV_INITRD) += do_mounts_initrd.o mounts-$(CONFIG_BLK_DEV_MD) += do_mounts_md.o +mounts-$(CONFIG_BLK_DEV_DM) += do_mounts_dm.o # dependencies on generated files need to be listed explicitly $(obj)/version.o: include/generated/compile.h diff --git a/init/do_mounts.c b/init/do_mounts.c index dea5de95c2dd..1902a1c80831 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -566,6 +566,7 @@ void __init prepare_namespace(void) wait_for_device_probe(); md_run_setup(); + dm_run_setup(); if (saved_root_name[0]) { root_device_name = saved_root_name; diff --git a/init/do_mounts.h b/init/do_mounts.h index f5b978a9bb92..09d22862e8c3 100644 --- a/init/do_mounts.h +++ b/init/do_mounts.h @@ -74,3 +74,13 @@ void md_run_setup(void); static inline void md_run_setup(void) {} #endif + +#ifdef CONFIG_BLK_DEV_DM + +void dm_run_setup(void); + +#else + +static inline void dm_run_setup(void) {} + +#endif diff --git a/init/do_mounts_dm.c b/init/do_mounts_dm.c new file mode 100644 index 000000000000..f521bc5ae248 --- /dev/null +++ b/init/do_mounts_dm.c @@ -0,0 +1,425 @@ +/* do_mounts_dm.c + * Copyright (C) 2010 The Chromium OS Authors <chromium-os-dev@chromium.org> + * All Rights Reserved. + * Based on do_mounts_md.c + * + * This file is released under the GPL. + */ +#include <linux/device-mapper.h> +#include <linux/fs.h> +#include <linux/string.h> + +#include "do_mounts.h" +#include "../drivers/md/dm.h" + +#define DM_MAX_NAME 32 +#define DM_MAX_UUID 129 +#define DM_NO_UUID "none" + +#define DM_MSG_PREFIX "init" + +/* Separators used for parsing the dm= argument. */ +#define DM_FIELD_SEP ' ' +#define DM_LINE_SEP ',' + +/* + * When the device-mapper and any targets are compiled into the kernel + * (not a module), one target may be created and used as the root device at + * boot time with the parameters given with the boot line dm=... + * The code for that is here. + */ + +struct dm_setup_target { + sector_t begin; + sector_t length; + char *type; + char *params; + /* simple singly linked list */ + struct dm_setup_target *next; +}; + +static struct { + int minor; + int ro; + char name[DM_MAX_NAME]; + char uuid[DM_MAX_UUID]; + char *targets; + struct dm_setup_target *target; + int target_count; +} dm_setup_args __initdata; + +static __initdata int dm_early_setup; + +static size_t __init get_dm_option(char *str, char **next, char sep) +{ + size_t len = 0; + char *endp = NULL; + + if (!str) + return 0; + + endp = strchr(str, sep); + if (!endp) { /* act like strchrnul */ + len = strlen(str); + endp = str + len; + } else { + len = endp - str; + } + + if (endp == str) + return 0; + + if (!next) + return len; + + if (*endp == 0) { + /* Don't advance past the nul. */ + *next = endp; + } else { + *next = endp + 1; + } + return len; +} + +static int __init dm_setup_args_init(void) +{ + dm_setup_args.minor = 0; + dm_setup_args.ro = 0; + dm_setup_args.target = NULL; + dm_setup_args.target_count = 0; + return 0; +} + +static int __init dm_setup_cleanup(void) +{ + struct dm_setup_target *target = dm_setup_args.target; + struct dm_setup_target *old_target = NULL; + while (target) { + kfree(target->type); + kfree(target->params); + old_target = target; + target = target->next; + kfree(old_target); + dm_setup_args.target_count--; + } + BUG_ON(dm_setup_args.target_count); + return 0; +} + +static char * __init dm_setup_parse_device_args(char *str) +{ + char *next = NULL; + size_t len = 0; + + /* Grab the logical name of the device to be exported to udev */ + len = get_dm_option(str, &next, DM_FIELD_SEP); + if (!len) { + DMERR("failed to parse device name"); + goto parse_fail; + } + len = min(len + 1, sizeof(dm_setup_args.name)); + strlcpy(dm_setup_args.name, str, len); /* includes nul */ + str = skip_spaces(next); + + /* Grab the UUID value or "none" */ + len = get_dm_option(str, &next, DM_FIELD_SEP); + if (!len) { + DMERR("failed to parse device uuid"); + goto parse_fail; + } + len = min(len + 1, sizeof(dm_setup_args.uuid)); + strlcpy(dm_setup_args.uuid, str, len); + str = skip_spaces(next); + + /* Determine if the table/device will be read only or read-write */ + if (!strncmp("ro,", str, 3)) { + dm_setup_args.ro = 1; + } else if (!strncmp("rw,", str, 3)) { + dm_setup_args.ro = 0; + } else { + DMERR("failed to parse table mode"); + goto parse_fail; + } + str = skip_spaces(str + 3); + + return str; + +parse_fail: + return NULL; +} + +static void __init dm_substitute_devices(char *str, size_t str_len) +{ + char *candidate = str; + char *candidate_end = str; + char old_char; + size_t len = 0; + dev_t dev; + + if (str_len < 3) + return; + + while (str && *str) { + candidate = strchr(str, '/'); + if (!candidate) + break; + + /* Avoid embedded slashes */ + if (candidate != str && *(candidate - 1) != DM_FIELD_SEP) { + str = strchr(candidate, DM_FIELD_SEP); + continue; + } + + len = get_dm_option(candidate, &candidate_end, DM_FIELD_SEP); + str = skip_spaces(candidate_end); + if (len < 3 || len > 37) /* name_to_dev_t max; maj:mix min */ + continue; + + /* Temporarily terminate with a nul */ + candidate_end--; + old_char = *candidate_end; + *candidate_end = '\0'; + + DMDEBUG("converting candidate device '%s' to dev_t", candidate); + /* Use the boot-time specific device naming */ + dev = name_to_dev_t(candidate); + *candidate_end = old_char; + + DMDEBUG(" -> %u", dev); + /* No suitable replacement found */ + if (!dev) + continue; + + /* Rewrite the /dev/path as a major:minor */ + len = snprintf(candidate, len, "%u:%u", MAJOR(dev), MINOR(dev)); + if (!len) { + DMERR("error substituting device major/minor."); + break; + } + candidate += len; + /* Pad out with spaces (fixing our nul) */ + while (candidate < candidate_end) + *(candidate++) = DM_FIELD_SEP; + } +} + +static int __init dm_setup_parse_targets(char *str) +{ + char *next = NULL; + size_t len = 0; + struct dm_setup_target **target = NULL; + + /* Targets are defined as per the table format but with a + * comma as a newline separator. */ + target = &dm_setup_args.target; + while (str && *str) { + *target = kzalloc(sizeof(struct dm_setup_target), GFP_KERNEL); + if (!*target) { + DMERR("failed to allocate memory for target %d", + dm_setup_args.target_count); + goto parse_fail; + } + dm_setup_args.target_count++; + + (*target)->begin = simple_strtoull(str, &next, 10); + if (!next || *next != DM_FIELD_SEP) { + DMERR("failed to parse starting sector for target %d", + dm_setup_args.target_count - 1); + goto parse_fail; + } + str = skip_spaces(next + 1); + + (*target)->length = simple_strtoull(str, &next, 10); + if (!next || *next != DM_FIELD_SEP) { + DMERR("failed to parse length for target %d", + dm_setup_args.target_count - 1); + goto parse_fail; + } + str = skip_spaces(next + 1); + + len = get_dm_option(str, &next, DM_FIELD_SEP); + if (!len || + !((*target)->type = kstrndup(str, len, GFP_KERNEL))) { + DMERR("failed to parse type for target %d", + dm_setup_args.target_count - 1); + goto parse_fail; + } + str = skip_spaces(next); + + len = get_dm_option(str, &next, DM_LINE_SEP); + if (!len || + !((*target)->params = kstrndup(str, len, GFP_KERNEL))) { + DMERR("failed to parse params for target %d", + dm_setup_args.target_count - 1); + goto parse_fail; + } + str = skip_spaces(next); + + /* Before moving on, walk through the copied target and + * attempt to replace all /dev/xxx with the major:minor number. + * It may not be possible to resolve them traditionally at + * boot-time. */ + dm_substitute_devices((*target)->params, len); + + target = &((*target)->next); + } + DMDEBUG("parsed %d targets", dm_setup_args.target_count); + + return 0; + +parse_fail: + return 1; +} + +/* + * Parse the command-line parameters given our kernel, but do not + * actually try to invoke the DM device now; that is handled by + * dm_setup_drive after the low-level disk drivers have initialised. + * dm format is as follows: + * dm="name uuid fmode,[table line 1],[table line 2],..." + * May be used with root=/dev/dm-0 as it always uses the first dm minor. + */ + +static int __init dm_setup(char *str) +{ + dm_setup_args_init(); + + str = dm_setup_parse_device_args(str); + if (!str) { + DMDEBUG("str is NULL"); + goto parse_fail; + } + + /* Target parsing is delayed until we have dynamic memory */ + dm_setup_args.targets = str; + + printk(KERN_INFO "dm: will configure '%s' on dm-%d\n", + dm_setup_args.name, dm_setup_args.minor); + + dm_early_setup = 1; + return 1; + +parse_fail: + printk(KERN_WARNING "dm: Invalid arguments supplied to dm=.\n"); + return 0; +} + + +static void __init dm_setup_drive(void) +{ + struct mapped_device *md = NULL; + struct dm_table *table = NULL; + struct dm_setup_target *target; + char *uuid = dm_setup_args.uuid; + fmode_t fmode = FMODE_READ; + + /* Finish parsing the targets. */ + if (dm_setup_parse_targets(dm_setup_args.targets)) + goto parse_fail; + + if (dm_create(dm_setup_args.minor, &md)) { + DMDEBUG("failed to create the device"); + goto dm_create_fail; + } + DMDEBUG("created device '%s'", dm_device_name(md)); + + /* In addition to flagging the table below, the disk must be + * set explicitly ro/rw. */ + set_disk_ro(dm_disk(md), dm_setup_args.ro); + + if (!dm_setup_args.ro) + fmode |= FMODE_WRITE; + if (dm_table_create(&table, fmode, dm_setup_args.target_count, md)) { + DMDEBUG("failed to create the table"); + goto dm_table_create_fail; + } + + dm_lock_md_type(md); + target = dm_setup_args.target; + while (target) { + DMINFO("adding target '%llu %llu %s %s'", + (unsigned long long) target->begin, + (unsigned long long) target->length, target->type, + target->params); + if (dm_table_add_target(table, target->type, target->begin, + target->length, target->params)) { + DMDEBUG("failed to add the target to the table"); + goto add_target_fail; + } + target = target->next; + } + + if (dm_table_complete(table)) { + DMDEBUG("failed to complete the table"); + goto table_complete_fail; + } + + if (dm_get_md_type(md) == DM_TYPE_NONE) { + dm_set_md_type(md, dm_table_get_type(table)); + if (dm_setup_md_queue(md)) { + DMWARN("unable to set up device queue for new table."); + goto setup_md_queue_fail; + } + } else if (dm_get_md_type(md) != dm_table_get_type(table)) { + DMWARN("can't change device type after initial table load."); + goto setup_md_queue_fail; + } + + /* Suspend the device so that we can bind it to the table. */ + if (dm_suspend(md, 0)) { + DMDEBUG("failed to suspend the device pre-bind"); + goto suspend_fail; + } + + /* Bind the table to the device. This is the only way to associate + * md->map with the table and set the disk capacity directly. */ + if (dm_swap_table(md, table)) { /* should return NULL. */ + DMDEBUG("failed to bind the device to the table"); + goto table_bind_fail; + } + + /* Finally, resume and the device should be ready. */ + if (dm_resume(md)) { + DMDEBUG("failed to resume the device"); + goto resume_fail; + } + + /* Export the dm device via the ioctl interface */ + if (!strcmp(DM_NO_UUID, dm_setup_args.uuid)) + uuid = NULL; + if (dm_ioctl_export(md, dm_setup_args.name, uuid)) { + DMDEBUG("failed to export device with given name and uuid"); + goto export_fail; + } + printk(KERN_INFO "dm: dm-%d is ready\n", dm_setup_args.minor); + + dm_unlock_md_type(md); + dm_setup_cleanup(); + return; + +export_fail: +resume_fail: +table_bind_fail: +suspend_fail: +setup_md_queue_fail: +table_complete_fail: +add_target_fail: + dm_unlock_md_type(md); +dm_table_create_fail: + dm_put(md); +dm_create_fail: + dm_setup_cleanup(); +parse_fail: + printk(KERN_WARNING "dm: starting dm-%d (%s) failed\n", + dm_setup_args.minor, dm_setup_args.name); +} + +__setup("dm=", dm_setup); + +void __init dm_run_setup(void) +{ + if (!dm_early_setup) + return; + printk(KERN_INFO "dm: attempting early device configuration.\n"); + dm_setup_drive(); +} diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index d1a7646f79c5..cb85d228b1ac 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -358,7 +358,7 @@ static int bpf_fill_super(struct super_block *sb, void *data, int silent) static struct dentry *bpf_mount(struct file_system_type *type, int flags, const char *dev_name, void *data) { - return mount_ns(type, flags, current->nsproxy->mnt_ns, bpf_fill_super); + return mount_nodev(type, flags, data, bpf_fill_super); } static struct file_system_type bpf_fs_type = { @@ -366,7 +366,6 @@ static struct file_system_type bpf_fs_type = { .name = "bpf", .mount = bpf_mount, .kill_sb = kill_litter_super, - .fs_flags = FS_USERNS_MOUNT, }; MODULE_ALIAS_FS("bpf"); diff --git a/kernel/cpu.c b/kernel/cpu.c index 3c97f5b88a07..25cfcc804077 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -612,6 +612,7 @@ void __weak arch_enable_nonboot_cpus_end(void) void enable_nonboot_cpus(void) { int cpu, error; + struct device *cpu_device; /* Allow everyone to use the CPU hotplug again */ cpu_maps_update_begin(); @@ -629,6 +630,12 @@ void enable_nonboot_cpus(void) trace_suspend_resume(TPS("CPU_ON"), cpu, false); if (!error) { pr_info("CPU%d is up\n", cpu); + cpu_device = get_cpu_device(cpu); + if (!cpu_device) + pr_err("%s: failed to get cpu%d device\n", + __func__, cpu); + else + kobject_uevent(&cpu_device->kobj, KOBJ_ONLINE); continue; } pr_warn("Error taking CPU%d up: %d\n", cpu, error); diff --git a/kernel/events/core.c b/kernel/events/core.c index 5beb88f11671..446dbad75e60 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -954,6 +954,7 @@ static void put_ctx(struct perf_event_context *ctx) * function. * * Lock order: + * cred_guard_mutex * task_struct::perf_event_mutex * perf_event_context::mutex * perf_event_context::lock @@ -1923,8 +1924,13 @@ event_sched_in(struct perf_event *event, if (event->state <= PERF_EVENT_STATE_OFF) return 0; - event->state = PERF_EVENT_STATE_ACTIVE; - event->oncpu = smp_processor_id(); + WRITE_ONCE(event->oncpu, smp_processor_id()); + /* + * Order event::oncpu write to happen before the ACTIVE state + * is visible. + */ + smp_wmb(); + WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE); /* * Unthrottle events, since we scheduled we might have missed several @@ -2405,6 +2411,29 @@ void perf_event_enable(struct perf_event *event) } EXPORT_SYMBOL_GPL(perf_event_enable); +static int __perf_event_stop(void *info) +{ + struct perf_event *event = info; + + /* for AUX events, our job is done if the event is already inactive */ + if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) + return 0; + + /* matches smp_wmb() in event_sched_in() */ + smp_rmb(); + + /* + * There is a window with interrupts enabled before we get here, + * so we need to check again lest we try to stop another CPU's event. + */ + if (READ_ONCE(event->oncpu) != smp_processor_id()) + return -EAGAIN; + + event->pmu->stop(event, PERF_EF_UPDATE); + + return 0; +} + static int _perf_event_refresh(struct perf_event *event, int refresh) { /* @@ -3461,7 +3490,6 @@ static struct task_struct * find_lively_task_by_vpid(pid_t vpid) { struct task_struct *task; - int err; rcu_read_lock(); if (!vpid) @@ -3475,16 +3503,7 @@ find_lively_task_by_vpid(pid_t vpid) if (!task) return ERR_PTR(-ESRCH); - /* Reuse ptrace permission checks for now. */ - err = -EACCES; - if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) - goto errout; - return task; -errout: - put_task_struct(task); - return ERR_PTR(err); - } /* @@ -3745,6 +3764,9 @@ static void __free_event(struct perf_event *event) if (event->destroy) event->destroy(event); + if (event->pmu->free_drv_configs) + event->pmu->free_drv_configs(event); + if (event->ctx) put_ctx(event->ctx); @@ -4306,6 +4328,8 @@ static int perf_event_set_output(struct perf_event *event, struct perf_event *output_event); static int perf_event_set_filter(struct perf_event *event, void __user *arg); static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd); +static int perf_event_drv_configs(struct perf_event *event, + void __user *arg); static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) { @@ -4362,6 +4386,9 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon case PERF_EVENT_IOC_SET_BPF: return perf_event_set_bpf_prog(event, arg); + case PERF_EVENT_IOC_SET_DRV_CONFIGS: + return perf_event_drv_configs(event, (void __user *)arg); + default: return -ENOTTY; } @@ -4394,6 +4421,7 @@ static long perf_compat_ioctl(struct file *file, unsigned int cmd, switch (_IOC_NR(cmd)) { case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): case _IOC_NR(PERF_EVENT_IOC_ID): + case _IOC_NR(PERF_EVENT_IOC_SET_DRV_CONFIGS): /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { cmd &= ~IOCSIZE_MASK; @@ -4678,6 +4706,8 @@ static void perf_mmap_open(struct vm_area_struct *vma) event->pmu->event_mapped(event); } +static void perf_pmu_output_stop(struct perf_event *event); + /* * A buffer can be mmap()ed multiple times; either directly through the same * event, or through other events by use of perf_event_set_output(). @@ -4705,10 +4735,22 @@ static void perf_mmap_close(struct vm_area_struct *vma) */ if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { + /* + * Stop all AUX events that are writing to this buffer, + * so that we can free its AUX pages and corresponding PMU + * data. Note that after rb::aux_mmap_count dropped to zero, + * they won't start any more (see perf_aux_output_begin()). + */ + perf_pmu_output_stop(event); + + /* now it's safe to free the pages */ atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); vma->vm_mm->pinned_vm -= rb->aux_mmap_locked; + /* this has to be the last one */ rb_free_aux(rb); + WARN_ON_ONCE(atomic_read(&rb->aux_refcount)); + mutex_unlock(&event->mmap_mutex); } @@ -5779,6 +5821,80 @@ next: rcu_read_unlock(); } +struct remote_output { + struct ring_buffer *rb; + int err; +}; + +static void __perf_event_output_stop(struct perf_event *event, void *data) +{ + struct perf_event *parent = event->parent; + struct remote_output *ro = data; + struct ring_buffer *rb = ro->rb; + + if (!has_aux(event)) + return; + + if (!parent) + parent = event; + + /* + * In case of inheritance, it will be the parent that links to the + * ring-buffer, but it will be the child that's actually using it: + */ + if (rcu_dereference(parent->rb) == rb) + ro->err = __perf_event_stop(event); +} + +static int __perf_pmu_output_stop(void *info) +{ + struct perf_event *event = info; + struct pmu *pmu = event->pmu; + struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); + struct remote_output ro = { + .rb = event->rb, + }; + + rcu_read_lock(); + perf_event_aux_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro); + if (cpuctx->task_ctx) + perf_event_aux_ctx(cpuctx->task_ctx, __perf_event_output_stop, + &ro); + rcu_read_unlock(); + + return ro.err; +} + +static void perf_pmu_output_stop(struct perf_event *event) +{ + struct perf_event *iter; + int err, cpu; + +restart: + rcu_read_lock(); + list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { + /* + * For per-CPU events, we need to make sure that neither they + * nor their children are running; for cpu==-1 events it's + * sufficient to stop the event itself if it's active, since + * it can't have children. + */ + cpu = iter->cpu; + if (cpu == -1) + cpu = READ_ONCE(iter->oncpu); + + if (cpu == -1) + continue; + + err = cpu_function_call(cpu, __perf_pmu_output_stop, event); + if (err == -EAGAIN) { + rcu_read_unlock(); + goto restart; + } + } + rcu_read_unlock(); +} + /* * task tracking -- fork/exit * @@ -7167,7 +7283,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event) prog = event->tp_event->prog; if (prog) { event->tp_event->prog = NULL; - bpf_prog_put(prog); + bpf_prog_put_rcu(prog); } } @@ -7209,6 +7325,15 @@ void perf_bp_event(struct perf_event *bp, void *data) } #endif +static int perf_event_drv_configs(struct perf_event *event, + void __user *arg) +{ + if (!event->pmu->get_drv_configs) + return -EINVAL; + + return event->pmu->get_drv_configs(event, arg); +} + /* * hrtimer based swevent callback */ @@ -7949,6 +8074,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, INIT_LIST_HEAD(&event->sibling_list); INIT_LIST_HEAD(&event->rb_entry); INIT_LIST_HEAD(&event->active_entry); + INIT_LIST_HEAD(&event->drv_configs); INIT_HLIST_NODE(&event->hlist_entry); @@ -8395,6 +8521,24 @@ SYSCALL_DEFINE5(perf_event_open, get_online_cpus(); + if (task) { + err = mutex_lock_interruptible(&task->signal->cred_guard_mutex); + if (err) + goto err_cpus; + + /* + * Reuse ptrace permission checks for now. + * + * We must hold cred_guard_mutex across this and any potential + * perf_install_in_context() call for this new event to + * serialize against exec() altering our credentials (and the + * perf_event_exit_task() that could imply). + */ + err = -EACCES; + if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) + goto err_cred; + } + if (flags & PERF_FLAG_PID_CGROUP) cgroup_fd = pid; @@ -8402,7 +8546,7 @@ SYSCALL_DEFINE5(perf_event_open, NULL, NULL, cgroup_fd); if (IS_ERR(event)) { err = PTR_ERR(event); - goto err_cpus; + goto err_cred; } if (is_sampling_event(event)) { @@ -8461,11 +8605,6 @@ SYSCALL_DEFINE5(perf_event_open, goto err_context; } - if (task) { - put_task_struct(task); - task = NULL; - } - /* * Look up the group leader (we will attach this event to it): */ @@ -8524,6 +8663,7 @@ SYSCALL_DEFINE5(perf_event_open, f_flags); if (IS_ERR(event_file)) { err = PTR_ERR(event_file); + event_file = NULL; goto err_context; } @@ -8553,6 +8693,11 @@ SYSCALL_DEFINE5(perf_event_open, WARN_ON_ONCE(ctx->parent_ctx); + /* + * This is the point on no return; we cannot fail hereafter. This is + * where we start modifying current state. + */ + if (move_group) { /* * See perf_event_ctx_lock() for comments on the details @@ -8622,6 +8767,11 @@ SYSCALL_DEFINE5(perf_event_open, mutex_unlock(&gctx->mutex); mutex_unlock(&ctx->mutex); + if (task) { + mutex_unlock(&task->signal->cred_guard_mutex); + put_task_struct(task); + } + put_online_cpus(); event->owner = current; @@ -8656,6 +8806,9 @@ err_alloc: */ if (!event_file) free_event(event); +err_cred: + if (task) + mutex_unlock(&task->signal->cred_guard_mutex); err_cpus: put_online_cpus(); err_task: @@ -8935,6 +9088,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) /* * When a child task exits, feed back event values to parent events. + * + * Can be called with cred_guard_mutex held when called from + * install_exec_creds(). */ void perf_event_exit_task(struct task_struct *child) { diff --git a/kernel/events/internal.h b/kernel/events/internal.h index 2bbad9c1274c..2b229fdcfc09 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -11,7 +11,6 @@ struct ring_buffer { atomic_t refcount; struct rcu_head rcu_head; - struct irq_work irq_work; #ifdef CONFIG_PERF_USE_VMALLOC struct work_struct work; int page_order; /* allocation order */ diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 014b69528194..8c60a4eb4080 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -221,8 +221,6 @@ void perf_output_end(struct perf_output_handle *handle) rcu_read_unlock(); } -static void rb_irq_work(struct irq_work *work); - static void ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) { @@ -243,16 +241,6 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) INIT_LIST_HEAD(&rb->event_list); spin_lock_init(&rb->event_lock); - init_irq_work(&rb->irq_work, rb_irq_work); -} - -static void ring_buffer_put_async(struct ring_buffer *rb) -{ - if (!atomic_dec_and_test(&rb->refcount)) - return; - - rb->rcu_head.next = (void *)rb; - irq_work_queue(&rb->irq_work); } /* @@ -264,6 +252,10 @@ static void ring_buffer_put_async(struct ring_buffer *rb) * The ordering is similar to that of perf_output_{begin,end}, with * the exception of (B), which should be taken care of by the pmu * driver, since ordering rules will differ depending on hardware. + * + * Call this from pmu::start(); see the comment in perf_aux_output_end() + * about its use in pmu callbacks. Both can also be called from the PMI + * handler if needed. */ void *perf_aux_output_begin(struct perf_output_handle *handle, struct perf_event *event) @@ -288,6 +280,13 @@ void *perf_aux_output_begin(struct perf_output_handle *handle, goto err; /* + * If rb::aux_mmap_count is zero (and rb_has_aux() above went through), + * the aux buffer is in perf_mmap_close(), about to get freed. + */ + if (!atomic_read(&rb->aux_mmap_count)) + goto err_put; + + /* * Nesting is not supported for AUX area, make sure nested * writers are caught early */ @@ -328,10 +327,11 @@ void *perf_aux_output_begin(struct perf_output_handle *handle, return handle->rb->aux_priv; err_put: + /* can't be last */ rb_free_aux(rb); err: - ring_buffer_put_async(rb); + ring_buffer_put(rb); handle->event = NULL; return NULL; @@ -342,6 +342,10 @@ err: * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the * pmu driver's responsibility to observe ordering rules of the hardware, * so that all the data is externally visible before this is called. + * + * Note: this has to be called from pmu::stop() callback, as the assumption + * of the AUX buffer management code is that after pmu::stop(), the AUX + * transaction must be stopped and therefore drop the AUX reference count. */ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size, bool truncated) @@ -389,8 +393,9 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size, handle->event = NULL; local_set(&rb->aux_nest, 0); + /* can't be last */ rb_free_aux(rb); - ring_buffer_put_async(rb); + ring_buffer_put(rb); } /* @@ -467,6 +472,33 @@ static void rb_free_aux_page(struct ring_buffer *rb, int idx) __free_page(page); } +static void __rb_free_aux(struct ring_buffer *rb) +{ + int pg; + + /* + * Should never happen, the last reference should be dropped from + * perf_mmap_close() path, which first stops aux transactions (which + * in turn are the atomic holders of aux_refcount) and then does the + * last rb_free_aux(). + */ + WARN_ON_ONCE(in_atomic()); + + if (rb->aux_priv) { + rb->free_aux(rb->aux_priv); + rb->free_aux = NULL; + rb->aux_priv = NULL; + } + + if (rb->aux_nr_pages) { + for (pg = 0; pg < rb->aux_nr_pages; pg++) + rb_free_aux_page(rb, pg); + + kfree(rb->aux_pages); + rb->aux_nr_pages = 0; + } +} + int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, pgoff_t pgoff, int nr_pages, long watermark, int flags) { @@ -530,7 +562,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, goto out; } - rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, + rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages, overwrite); if (!rb->aux_priv) goto out; @@ -555,45 +587,15 @@ out: if (!ret) rb->aux_pgoff = pgoff; else - rb_free_aux(rb); + __rb_free_aux(rb); return ret; } -static void __rb_free_aux(struct ring_buffer *rb) -{ - int pg; - - if (rb->aux_priv) { - rb->free_aux(rb->aux_priv); - rb->free_aux = NULL; - rb->aux_priv = NULL; - } - - if (rb->aux_nr_pages) { - for (pg = 0; pg < rb->aux_nr_pages; pg++) - rb_free_aux_page(rb, pg); - - kfree(rb->aux_pages); - rb->aux_nr_pages = 0; - } -} - void rb_free_aux(struct ring_buffer *rb) { if (atomic_dec_and_test(&rb->aux_refcount)) - irq_work_queue(&rb->irq_work); -} - -static void rb_irq_work(struct irq_work *work) -{ - struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work); - - if (!atomic_read(&rb->aux_refcount)) __rb_free_aux(rb); - - if (rb->rcu_head.next == (void *)rb) - call_rcu(&rb->rcu_head, rb_free_rcu); } #ifndef CONFIG_PERF_USE_VMALLOC diff --git a/kernel/exit.c b/kernel/exit.c index a32e83d567b9..d61f001c5788 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -931,17 +931,28 @@ static int eligible_pid(struct wait_opts *wo, struct task_struct *p) task_pid_type(p, wo->wo_type) == wo->wo_pid; } -static int eligible_child(struct wait_opts *wo, struct task_struct *p) +static int +eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p) { if (!eligible_pid(wo, p)) return 0; - /* Wait for all children (clone and not) if __WALL is set; - * otherwise, wait for clone children *only* if __WCLONE is - * set; otherwise, wait for non-clone children *only*. (Note: - * A "clone" child here is one that reports to its parent - * using a signal other than SIGCHLD.) */ - if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) - && !(wo->wo_flags & __WALL)) + + /* + * Wait for all children (clone and not) if __WALL is set or + * if it is traced by us. + */ + if (ptrace || (wo->wo_flags & __WALL)) + return 1; + + /* + * Otherwise, wait for clone children *only* if __WCLONE is set; + * otherwise, wait for non-clone children *only*. + * + * Note: a "clone" child here is one that reports to its parent + * using a signal other than SIGCHLD, or a non-leader thread which + * we can only see if it is traced by us. + */ + if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) return 0; return 1; @@ -1314,7 +1325,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, if (unlikely(exit_state == EXIT_DEAD)) return 0; - ret = eligible_child(wo, p); + ret = eligible_child(wo, ptrace, p); if (!ret) return ret; diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 05254eeb4b4e..4b353e0be121 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key); void static_key_slow_inc(struct static_key *key) { + int v, v1; + STATIC_KEY_CHECK_USE(); - if (atomic_inc_not_zero(&key->enabled)) - return; + + /* + * Careful if we get concurrent static_key_slow_inc() calls; + * later calls must wait for the first one to _finish_ the + * jump_label_update() process. At the same time, however, + * the jump_label_update() call below wants to see + * static_key_enabled(&key) for jumps to be updated properly. + * + * So give a special meaning to negative key->enabled: it sends + * static_key_slow_inc() down the slow path, and it is non-zero + * so it counts as "enabled" in jump_label_update(). Note that + * atomic_inc_unless_negative() checks >= 0, so roll our own. + */ + for (v = atomic_read(&key->enabled); v > 0; v = v1) { + v1 = atomic_cmpxchg(&key->enabled, v, v + 1); + if (likely(v1 == v)) + return; + } jump_label_lock(); - if (atomic_inc_return(&key->enabled) == 1) + if (atomic_read(&key->enabled) == 0) { + atomic_set(&key->enabled, -1); jump_label_update(key); + atomic_set(&key->enabled, 1); + } else { + atomic_inc(&key->enabled); + } jump_label_unlock(); } EXPORT_SYMBOL_GPL(static_key_slow_inc); @@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc); static void __static_key_slow_dec(struct static_key *key, unsigned long rate_limit, struct delayed_work *work) { + /* + * The negative count check is valid even when a negative + * key->enabled is in use by static_key_slow_inc(); a + * __static_key_slow_dec() before the first static_key_slow_inc() + * returns is unbalanced, because all other static_key_slow_inc() + * instances block while the update is in progress. + */ if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { WARN(atomic_read(&key->enabled) < 0, "jump label: negative count!\n"); diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index fb42418507ae..14b9cca36b05 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -498,9 +498,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) if (!hold_ctx) return 0; - if (unlikely(ctx == hold_ctx)) - return -EALREADY; - if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { #ifdef CONFIG_DEBUG_MUTEXES @@ -526,6 +523,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, unsigned long flags; int ret; + if (use_ww_ctx) { + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); + if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) + return -EALREADY; + } + preempt_disable(); mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 87e9ce6a63c5..8173bc7fec92 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -255,6 +255,66 @@ static __always_inline void __pv_wait_head(struct qspinlock *lock, #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath #endif +/* + * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before + * issuing an _unordered_ store to set _Q_LOCKED_VAL. + * + * This means that the store can be delayed, but no later than the + * store-release from the unlock. This means that simply observing + * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired. + * + * There are two paths that can issue the unordered store: + * + * (1) clear_pending_set_locked(): *,1,0 -> *,0,1 + * + * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0 + * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1 + * + * However, in both cases we have other !0 state we've set before to queue + * ourseves: + * + * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our + * load is constrained by that ACQUIRE to not pass before that, and thus must + * observe the store. + * + * For (2) we have a more intersting scenario. We enqueue ourselves using + * xchg_tail(), which ends up being a RELEASE. This in itself is not + * sufficient, however that is followed by an smp_cond_acquire() on the same + * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and + * guarantees we must observe that store. + * + * Therefore both cases have other !0 state that is observable before the + * unordered locked byte store comes through. This means we can use that to + * wait for the lock store, and then wait for an unlock. + */ +#ifndef queued_spin_unlock_wait +void queued_spin_unlock_wait(struct qspinlock *lock) +{ + u32 val; + + for (;;) { + val = atomic_read(&lock->val); + + if (!val) /* not locked, we're done */ + goto done; + + if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */ + break; + + /* not locked, but pending, wait until we observe the lock */ + cpu_relax(); + } + + /* any unlock is good */ + while (atomic_read(&lock->val) & _Q_LOCKED_MASK) + cpu_relax(); + +done: + smp_rmb(); /* CTRL + RMB -> ACQUIRE */ +} +EXPORT_SYMBOL(queued_spin_unlock_wait); +#endif + #endif /* _GEN_PV_LOCK_SLOWPATH */ /** diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c07d844c576e..f3b1688b3be7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2572,8 +2572,8 @@ void wake_up_new_task(struct task_struct *p) unsigned long flags; struct rq *rq; - raw_spin_lock_irqsave(&p->pi_lock, flags); add_new_task_to_grp(p); + raw_spin_lock_irqsave(&p->pi_lock, flags); /* Initialize new task's runnable average */ init_entity_runnable_average(&p->se); #ifdef CONFIG_SMP diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e0f212743c77..21a60beb8288 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3721,6 +3721,23 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq); +/* + * Unsigned subtract and clamp on underflow. + * + * Explicitly do a load-store to ensure the intermediate value never hits + * memory. This allows lockless observations without ever seeing the negative + * values. + */ +#define sub_positive(_ptr, _val) do { \ + typeof(_ptr) ptr = (_ptr); \ + typeof(*ptr) val = (_val); \ + typeof(*ptr) res, var = READ_ONCE(*ptr); \ + res = var - val; \ + if (res > var) \ + res = 0; \ + WRITE_ONCE(*ptr, res); \ +} while (0) + /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) { @@ -3729,15 +3746,15 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) if (atomic_long_read(&cfs_rq->removed_load_avg)) { s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); - sa->load_avg = max_t(long, sa->load_avg - r, 0); - sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0); + sub_positive(&sa->load_avg, r); + sub_positive(&sa->load_sum, r * LOAD_AVG_MAX); removed = 1; } if (atomic_long_read(&cfs_rq->removed_util_avg)) { long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0); - sa->util_avg = max_t(long, sa->util_avg - r, 0); - sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0); + sub_positive(&sa->util_avg, r); + sub_positive(&sa->util_sum, r * LOAD_AVG_MAX); } decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa, @@ -3803,10 +3820,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s &se->avg, se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL); - cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0); - cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0); - cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0); - cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0); + sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); + sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum); + sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); + sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); } /* Add the load generated by se into cfs_rq's load average */ diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index 50a6d8e0d4d4..56f4c60d4d5d 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -1396,16 +1396,6 @@ void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra) } } -/* - * Invoked from three places: - * 1) try_to_wake_up() -> ... -> select_best_cpu() - * 2) scheduler_tick() -> ... -> migration_needed() -> select_best_cpu() - * 3) can_migrate_task() - * - * Its safe to de-reference p->grp in first case (since p->pi_lock is held) - * but not in other cases. p->grp is hence freed after a RCU grace period and - * accessed under rcu_read_lock() - */ int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p) { struct related_thread_group *grp; @@ -3106,9 +3096,9 @@ static void reset_all_task_stats(void) read_lock(&tasklist_lock); do_each_thread(g, p) { - raw_spin_lock(&p->pi_lock); + raw_spin_lock_irq(&p->pi_lock); reset_task_stats(p); - raw_spin_unlock(&p->pi_lock); + raw_spin_unlock_irq(&p->pi_lock); } while_each_thread(g, p); read_unlock(&tasklist_lock); } @@ -3883,7 +3873,12 @@ static void _set_preferred_cluster(struct related_thread_group *grp) void set_preferred_cluster(struct related_thread_group *grp) { - raw_spin_lock(&grp->lock); + /* + * Prevent possible deadlock with update_children(). Not updating + * the preferred cluster once is not a big deal. + */ + if (!raw_spin_trylock(&grp->lock)) + return; _set_preferred_cluster(grp); raw_spin_unlock(&grp->lock); } @@ -3904,7 +3899,7 @@ static int alloc_group_cputime(struct related_thread_group *grp) struct rq *rq = cpu_rq(cpu); u64 window_start = rq->window_start; - grp->cpu_time = alloc_percpu(struct group_cpu_time); + grp->cpu_time = alloc_percpu_gfp(struct group_cpu_time, GFP_ATOMIC); if (!grp->cpu_time) return -ENOMEM; @@ -4088,7 +4083,7 @@ struct related_thread_group *alloc_related_thread_group(int group_id) { struct related_thread_group *grp; - grp = kzalloc(sizeof(*grp), GFP_KERNEL); + grp = kzalloc(sizeof(*grp), GFP_ATOMIC); if (!grp) return ERR_PTR(-ENOMEM); @@ -4127,19 +4122,64 @@ static void free_related_thread_group(struct rcu_head *rcu) kfree(grp); } +/* + * The thread group for a task can change while we are here. However, + * add_new_task_to_grp() will take care of any tasks that we miss here. + * When a parent exits, and a child thread is simultaneously exiting, + * sched_set_group_id() will synchronize those operations. + */ +static void update_children(struct task_struct *leader, + struct related_thread_group *grp, int event) +{ + struct task_struct *child; + struct rq *rq; + unsigned long flags; + + if (!thread_group_leader(leader)) + return; + + if (event == ADD_TASK && !sysctl_sched_enable_thread_grouping) + return; + + if (thread_group_empty(leader)) + return; + + child = next_thread(leader); + + do { + rq = task_rq_lock(child, &flags); + + if (event == REM_TASK && child->grp && grp == child->grp) { + transfer_busy_time(rq, grp, child, event); + list_del_init(&child->grp_list); + rcu_assign_pointer(child->grp, NULL); + } else if (event == ADD_TASK && !child->grp) { + transfer_busy_time(rq, grp, child, event); + list_add(&child->grp_list, &grp->tasks); + rcu_assign_pointer(child->grp, grp); + } + + task_rq_unlock(rq, child, &flags); + } while_each_thread(leader, child); + +} + static void remove_task_from_group(struct task_struct *p) { struct related_thread_group *grp = p->grp; struct rq *rq; int empty_group = 1; + unsigned long flags; raw_spin_lock(&grp->lock); - rq = __task_rq_lock(p); + rq = task_rq_lock(p, &flags); transfer_busy_time(rq, p->grp, p, REM_TASK); list_del_init(&p->grp_list); rcu_assign_pointer(p->grp, NULL); - __task_rq_unlock(rq); + task_rq_unlock(rq, p, &flags); + + update_children(p, grp, REM_TASK); if (!list_empty(&grp->tasks)) { empty_group = 0; @@ -4158,6 +4198,7 @@ static int add_task_to_group(struct task_struct *p, struct related_thread_group *grp) { struct rq *rq; + unsigned long flags; raw_spin_lock(&grp->lock); @@ -4165,11 +4206,13 @@ add_task_to_group(struct task_struct *p, struct related_thread_group *grp) * Change p->grp under rq->lock. Will prevent races with read-side * reference of p->grp in various hot-paths */ - rq = __task_rq_lock(p); + rq = task_rq_lock(p, &flags); transfer_busy_time(rq, grp, p, ADD_TASK); list_add(&p->grp_list, &grp->tasks); rcu_assign_pointer(p->grp, grp); - __task_rq_unlock(rq); + task_rq_unlock(rq, p, &flags); + + update_children(p, grp, ADD_TASK); _set_preferred_cluster(grp); @@ -4192,82 +4235,62 @@ void add_new_task_to_grp(struct task_struct *new) parent = new->group_leader; - /* - * The parent's pi_lock is required here to protect race - * against the parent task being removed from the - * group. - */ - raw_spin_lock_irqsave(&parent->pi_lock, flags); + write_lock_irqsave(&related_thread_group_lock, flags); - /* protected by pi_lock. */ + rcu_read_lock(); grp = task_related_thread_group(parent); - if (!grp) { - raw_spin_unlock_irqrestore(&parent->pi_lock, flags); + rcu_read_unlock(); + + /* Its possible that update_children() already added us to the group */ + if (!grp || new->grp) { + write_unlock_irqrestore(&related_thread_group_lock, flags); return; } + raw_spin_lock(&grp->lock); rcu_assign_pointer(new->grp, grp); list_add(&new->grp_list, &grp->tasks); raw_spin_unlock(&grp->lock); - raw_spin_unlock_irqrestore(&parent->pi_lock, flags); + write_unlock_irqrestore(&related_thread_group_lock, flags); } int sched_set_group_id(struct task_struct *p, unsigned int group_id) { - int rc = 0, destroy = 0; + int rc = 0; unsigned long flags; - struct related_thread_group *grp = NULL, *new = NULL; + struct related_thread_group *grp = NULL; -redo: - raw_spin_lock_irqsave(&p->pi_lock, flags); + /* Prevents tasks from exiting while we are managing groups. */ + write_lock_irqsave(&related_thread_group_lock, flags); + /* Switching from one group to another directly is not permitted */ if ((current != p && p->flags & PF_EXITING) || (!p->grp && !group_id) || - (p->grp && p->grp->id == group_id)) + (p->grp && group_id)) goto done; - write_lock(&related_thread_group_lock); - if (!group_id) { remove_task_from_group(p); - write_unlock(&related_thread_group_lock); goto done; } - if (p->grp && p->grp->id != group_id) - remove_task_from_group(p); - grp = lookup_related_thread_group(group_id); - if (!grp && !new) { - /* New group */ - write_unlock(&related_thread_group_lock); - raw_spin_unlock_irqrestore(&p->pi_lock, flags); - new = alloc_related_thread_group(group_id); - if (IS_ERR(new)) - return -ENOMEM; - destroy = 1; - /* Rerun checks (like task exiting), since we dropped pi_lock */ - goto redo; - } else if (!grp && new) { - /* New group - use object allocated before */ - destroy = 0; - list_add(&new->list, &related_thread_groups); - grp = new; + if (!grp) { + grp = alloc_related_thread_group(group_id); + if (IS_ERR(grp)) { + rc = -ENOMEM; + goto done; + } + + list_add(&grp->list, &related_thread_groups); } BUG_ON(!grp); rc = add_task_to_group(p, grp); - write_unlock(&related_thread_group_lock); done: - raw_spin_unlock_irqrestore(&p->pi_lock, flags); - - if (new && destroy) { - free_group_cputime(new); - kfree(new); - } - + write_unlock_irqrestore(&related_thread_group_lock, flags); return rc; } diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index ef7159012cf3..b0b93fd33af9 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -99,10 +99,13 @@ long calc_load_fold_active(struct rq *this_rq) static unsigned long calc_load(unsigned long load, unsigned long exp, unsigned long active) { - load *= exp; - load += active * (FIXED_1 - exp); - load += 1UL << (FSHIFT - 1); - return load >> FSHIFT; + unsigned long newload; + + newload = load * exp + active * (FIXED_1 - exp); + if (active >= load) + newload += FIXED_1-1; + + return newload / FIXED_1; } #ifdef CONFIG_NO_HZ_COMMON diff --git a/kernel/sys.c b/kernel/sys.c index b5a8e844a968..ba3ddb43dd9f 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2319,7 +2319,10 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, error = perf_event_task_enable(); break; case PR_GET_TIMERSLACK: - error = current->timer_slack_ns; + if (current->timer_slack_ns > ULONG_MAX) + error = ULONG_MAX; + else + error = current->timer_slack_ns; break; case PR_SET_TIMERSLACK: if (arg2 <= 0) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index c72cb2053da7..574316f1c344 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2022,6 +2022,20 @@ static struct ctl_table fs_table[] = { .proc_handler = &pipe_proc_fn, .extra1 = &pipe_min_size, }, + { + .procname = "pipe-user-pages-hard", + .data = &pipe_user_pages_hard, + .maxlen = sizeof(pipe_user_pages_hard), + .mode = 0644, + .proc_handler = proc_doulongvec_minmax, + }, + { + .procname = "pipe-user-pages-soft", + .data = &pipe_user_pages_soft, + .maxlen = sizeof(pipe_user_pages_soft), + .mode = 0644, + .proc_handler = proc_doulongvec_minmax, + }, { } }; diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 9e1349fc5bbe..18d2fe271cf7 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -985,7 +985,7 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim, * relative (HRTIMER_MODE_REL) */ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, - unsigned long delta_ns, const enum hrtimer_mode mode) + u64 delta_ns, const enum hrtimer_mode mode) { struct hrtimer_clock_base *base, *new_base; unsigned long flags; @@ -1558,7 +1558,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, struct restart_block *restart; struct hrtimer_sleeper t; int ret = 0; - unsigned long slack; + u64 slack; slack = current->timer_slack_ns; if (dl_task(current) || rt_task(current)) @@ -1780,7 +1780,7 @@ void __init hrtimers_init(void) * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME */ int __sched -schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta, +schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, const enum hrtimer_mode mode, int clock) { struct hrtimer_sleeper t; @@ -1848,7 +1848,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta, * * Returns 0 when the timer has expired otherwise -EINTR */ -int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, +int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta, const enum hrtimer_mode mode) { return schedule_hrtimeout_range_clock(expires, delta, mode, diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 0efb3916f5a4..748eefb72a91 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1784,10 +1784,10 @@ EXPORT_SYMBOL(msleep_interruptible); static void __sched do_usleep_range(unsigned long min, unsigned long max) { ktime_t kmin; - unsigned long delta; + u64 delta; kmin = ktime_set(0, min * NSEC_PER_USEC); - delta = (max - min) * NSEC_PER_USEC; + delta = (u64)(max - min) * NSEC_PER_USEC; schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL); } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 9c6045a27ba3..acbb0e73d3a2 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -437,7 +437,7 @@ struct ring_buffer_per_cpu { raw_spinlock_t reader_lock; /* serialize readers */ arch_spinlock_t lock; struct lock_class_key lock_key; - unsigned int nr_pages; + unsigned long nr_pages; unsigned int current_context; struct list_head *pages; struct buffer_page *head_page; /* read from head */ @@ -458,7 +458,7 @@ struct ring_buffer_per_cpu { u64 write_stamp; u64 read_stamp; /* ring buffer pages to update, > 0 to add, < 0 to remove */ - int nr_pages_to_update; + long nr_pages_to_update; struct list_head new_pages; /* new pages to add */ struct work_struct update_pages_work; struct completion update_done; @@ -1137,10 +1137,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) return 0; } -static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu) +static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu) { - int i; struct buffer_page *bpage, *tmp; + long i; for (i = 0; i < nr_pages; i++) { struct page *page; @@ -1177,7 +1177,7 @@ free_pages: } static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, - unsigned nr_pages) + unsigned long nr_pages) { LIST_HEAD(pages); @@ -1202,7 +1202,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, } static struct ring_buffer_per_cpu * -rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) +rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; struct buffer_page *bpage; @@ -1302,8 +1302,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key) { struct ring_buffer *buffer; + long nr_pages; int bsize; - int cpu, nr_pages; + int cpu; /* keep it in its own cache line */ buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), @@ -1429,12 +1430,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage) } static int -rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages) +rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) { struct list_head *tail_page, *to_remove, *next_page; struct buffer_page *to_remove_page, *tmp_iter_page; struct buffer_page *last_page, *first_page; - unsigned int nr_removed; + unsigned long nr_removed; unsigned long head_bit; int page_entries; @@ -1651,7 +1652,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu_id) { struct ring_buffer_per_cpu *cpu_buffer; - unsigned nr_pages; + unsigned long nr_pages; int cpu, err = 0; /* @@ -1665,14 +1666,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, !cpumask_test_cpu(cpu_id, buffer->cpumask)) return size; - size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); - size *= BUF_PAGE_SIZE; + nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); /* we need a minimum of two pages */ - if (size < BUF_PAGE_SIZE * 2) - size = BUF_PAGE_SIZE * 2; + if (nr_pages < 2) + nr_pages = 2; - nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); + size = nr_pages * BUF_PAGE_SIZE; /* * Don't succeed if resizing is disabled, as a reader might be @@ -4645,8 +4645,9 @@ static int rb_cpu_notify(struct notifier_block *self, struct ring_buffer *buffer = container_of(self, struct ring_buffer, cpu_notify); long cpu = (long)hcpu; - int cpu_i, nr_pages_same; - unsigned int nr_pages; + long nr_pages_same; + int cpu_i; + unsigned long nr_pages; switch (action) { case CPU_UP_PREPARE: diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index f96f0383f6c6..ad1d6164e946 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c @@ -36,6 +36,10 @@ struct trace_bprintk_fmt { static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) { struct trace_bprintk_fmt *pos; + + if (!fmt) + return ERR_PTR(-EINVAL); + list_for_each_entry(pos, &trace_bprintk_fmt_list, list) { if (!strcmp(pos->fmt, fmt)) return pos; @@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end) for (iter = start; iter < end; iter++) { struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter); if (tb_fmt) { - *iter = tb_fmt->fmt; + if (!IS_ERR(tb_fmt)) + *iter = tb_fmt->fmt; continue; } diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 4a1515f4b452..51a76af25c66 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -657,9 +657,9 @@ static struct dma_debug_entry *dma_entry_alloc(void) spin_lock_irqsave(&free_entries_lock, flags); if (list_empty(&free_entries)) { - pr_err("DMA-API: debugging out of memory - disabling\n"); global_disable = true; spin_unlock_irqrestore(&free_entries_lock, flags); + pr_err("DMA-API: debugging out of memory - disabling\n"); return NULL; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d1f6dc5a715d..08806bb1f070 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3661,6 +3661,7 @@ static void memcg_deactivate_kmem(struct mem_cgroup *memcg) * ordering is imposed by list_lru_node->lock taken by * memcg_drain_all_list_lrus(). */ + rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ css_for_each_descendant_pre(css, &memcg->css) { child = mem_cgroup_from_css(css); BUG_ON(child->kmemcg_id != kmemcg_id); @@ -3668,6 +3669,8 @@ static void memcg_deactivate_kmem(struct mem_cgroup *memcg) if (!memcg->use_hierarchy) break; } + rcu_read_unlock(); + memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); memcg_free_cache_id(kmemcg_id); diff --git a/mm/migrate.c b/mm/migrate.c index 3db1b0277eb4..a9ce3783361a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -429,6 +429,7 @@ int migrate_page_move_mapping(struct address_space *mapping, return MIGRATEPAGE_SUCCESS; } +EXPORT_SYMBOL(migrate_page_move_mapping); /* * The expected number of remaining references is the same as that @@ -579,6 +580,7 @@ void migrate_page_copy(struct page *newpage, struct page *page) if (PageWriteback(newpage)) end_page_writeback(newpage); } +EXPORT_SYMBOL(migrate_page_copy); /************************************************************ * Migration functions diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 8bf8e06a56a6..6b4fa64a5c91 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -359,8 +359,9 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc) struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc); unsigned long bytes = vm_dirty_bytes; unsigned long bg_bytes = dirty_background_bytes; - unsigned long ratio = vm_dirty_ratio; - unsigned long bg_ratio = dirty_background_ratio; + /* convert ratios to per-PAGE_SIZE for higher precision */ + unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100; + unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100; unsigned long thresh; unsigned long bg_thresh; struct task_struct *tsk; @@ -372,26 +373,28 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc) /* * The byte settings can't be applied directly to memcg * domains. Convert them to ratios by scaling against - * globally available memory. + * globally available memory. As the ratios are in + * per-PAGE_SIZE, they can be obtained by dividing bytes by + * number of pages. */ if (bytes) - ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 / - global_avail, 100UL); + ratio = min(DIV_ROUND_UP(bytes, global_avail), + PAGE_SIZE); if (bg_bytes) - bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 / - global_avail, 100UL); + bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail), + PAGE_SIZE); bytes = bg_bytes = 0; } if (bytes) thresh = DIV_ROUND_UP(bytes, PAGE_SIZE); else - thresh = (ratio * available_memory) / 100; + thresh = (ratio * available_memory) / PAGE_SIZE; if (bg_bytes) bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE); else - bg_thresh = (bg_ratio * available_memory) / 100; + bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE; if (bg_thresh >= thresh) bg_thresh = thresh / 2; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c8a31783c2d6..f12a0bde548b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -967,7 +967,7 @@ static inline void init_reserved_page(unsigned long pfn) * marks the pages PageReserved. The remaining valid pages are later * sent to the buddy page allocator. */ -void __meminit reserve_bootmem_region(unsigned long start, unsigned long end) +void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) { unsigned long start_pfn = PFN_DOWN(start); unsigned long end_pfn = PFN_UP(end); diff --git a/mm/percpu.c b/mm/percpu.c index 8a943b97a053..1f376bce413c 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -110,7 +110,7 @@ struct pcpu_chunk { int map_used; /* # of map entries used before the sentry */ int map_alloc; /* # of map entries allocated */ int *map; /* allocation map */ - struct work_struct map_extend_work;/* async ->map[] extension */ + struct list_head map_extend_list;/* on pcpu_map_extend_chunks */ void *data; /* chunk data */ int first_free; /* no free below this */ @@ -160,10 +160,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk; static int pcpu_reserved_chunk_limit; static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ -static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */ +static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ +/* chunks which need their map areas extended, protected by pcpu_lock */ +static LIST_HEAD(pcpu_map_extend_chunks); + /* * The number of empty populated pages, protected by pcpu_lock. The * reserved chunk doesn't contribute to the count. @@ -397,13 +400,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic) { int margin, new_alloc; + lockdep_assert_held(&pcpu_lock); + if (is_atomic) { margin = 3; if (chunk->map_alloc < - chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW && - pcpu_async_enabled) - schedule_work(&chunk->map_extend_work); + chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) { + if (list_empty(&chunk->map_extend_list)) { + list_add_tail(&chunk->map_extend_list, + &pcpu_map_extend_chunks); + pcpu_schedule_balance_work(); + } + } } else { margin = PCPU_ATOMIC_MAP_MARGIN_HIGH; } @@ -437,6 +446,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); unsigned long flags; + lockdep_assert_held(&pcpu_alloc_mutex); + new = pcpu_mem_zalloc(new_size); if (!new) return -ENOMEM; @@ -469,20 +480,6 @@ out_unlock: return 0; } -static void pcpu_map_extend_workfn(struct work_struct *work) -{ - struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk, - map_extend_work); - int new_alloc; - - spin_lock_irq(&pcpu_lock); - new_alloc = pcpu_need_to_extend(chunk, false); - spin_unlock_irq(&pcpu_lock); - - if (new_alloc) - pcpu_extend_area_map(chunk, new_alloc); -} - /** * pcpu_fit_in_area - try to fit the requested allocation in a candidate area * @chunk: chunk the candidate area belongs to @@ -742,7 +739,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void) chunk->map_used = 1; INIT_LIST_HEAD(&chunk->list); - INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn); + INIT_LIST_HEAD(&chunk->map_extend_list); chunk->free_size = pcpu_unit_size; chunk->contig_hint = pcpu_unit_size; @@ -897,6 +894,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, return NULL; } + if (!is_atomic) + mutex_lock(&pcpu_alloc_mutex); + spin_lock_irqsave(&pcpu_lock, flags); /* serve reserved allocations from the reserved chunk if available */ @@ -969,12 +969,9 @@ restart: if (is_atomic) goto fail; - mutex_lock(&pcpu_alloc_mutex); - if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { chunk = pcpu_create_chunk(); if (!chunk) { - mutex_unlock(&pcpu_alloc_mutex); err = "failed to allocate new chunk"; goto fail; } @@ -985,7 +982,6 @@ restart: spin_lock_irqsave(&pcpu_lock, flags); } - mutex_unlock(&pcpu_alloc_mutex); goto restart; area_found: @@ -995,8 +991,6 @@ area_found: if (!is_atomic) { int page_start, page_end, rs, re; - mutex_lock(&pcpu_alloc_mutex); - page_start = PFN_DOWN(off); page_end = PFN_UP(off + size); @@ -1007,7 +1001,6 @@ area_found: spin_lock_irqsave(&pcpu_lock, flags); if (ret) { - mutex_unlock(&pcpu_alloc_mutex); pcpu_free_area(chunk, off, &occ_pages); err = "failed to populate"; goto fail_unlock; @@ -1047,6 +1040,8 @@ fail: /* see the flag handling in pcpu_blance_workfn() */ pcpu_atomic_alloc_failed = true; pcpu_schedule_balance_work(); + } else { + mutex_unlock(&pcpu_alloc_mutex); } return NULL; } @@ -1131,6 +1126,7 @@ static void pcpu_balance_workfn(struct work_struct *work) if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) continue; + list_del_init(&chunk->map_extend_list); list_move(&chunk->list, &to_free); } @@ -1148,6 +1144,25 @@ static void pcpu_balance_workfn(struct work_struct *work) pcpu_destroy_chunk(chunk); } + /* service chunks which requested async area map extension */ + do { + int new_alloc = 0; + + spin_lock_irq(&pcpu_lock); + + chunk = list_first_entry_or_null(&pcpu_map_extend_chunks, + struct pcpu_chunk, map_extend_list); + if (chunk) { + list_del_init(&chunk->map_extend_list); + new_alloc = pcpu_need_to_extend(chunk, false); + } + + spin_unlock_irq(&pcpu_lock); + + if (new_alloc) + pcpu_extend_area_map(chunk, new_alloc); + } while (chunk); + /* * Ensure there are certain number of free populated pages for * atomic allocs. Fill up from the most packed so that atomic @@ -1646,7 +1661,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, */ schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); INIT_LIST_HEAD(&schunk->list); - INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn); + INIT_LIST_HEAD(&schunk->map_extend_list); schunk->base_addr = base_addr; schunk->map = smap; schunk->map_alloc = ARRAY_SIZE(smap); @@ -1675,7 +1690,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, if (dyn_size) { dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); INIT_LIST_HEAD(&dchunk->list); - INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn); + INIT_LIST_HEAD(&dchunk->map_extend_list); dchunk->base_addr = base_addr; dchunk->map = dmap; dchunk->map_alloc = ARRAY_SIZE(dmap); diff --git a/mm/shmem.c b/mm/shmem.c index b82e56ebabdd..79997e8cf807 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2153,9 +2153,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, NULL); if (error) { /* Remove the !PageUptodate pages we added */ - shmem_undo_range(inode, - (loff_t)start << PAGE_CACHE_SHIFT, - (loff_t)index << PAGE_CACHE_SHIFT, true); + if (index > start) { + shmem_undo_range(inode, + (loff_t)start << PAGE_CACHE_SHIFT, + ((loff_t)index << PAGE_CACHE_SHIFT) - 1, true); + } goto undone; } diff --git a/mm/vmscan.c b/mm/vmscan.c index d5c3ef60a71e..ff408638fd95 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -46,7 +46,6 @@ #include <linux/oom.h> #include <linux/prefetch.h> #include <linux/printk.h> -#include <linux/debugfs.h> #include <asm/tlbflush.h> #include <asm/div64.h> @@ -234,39 +233,6 @@ static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru) return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru); } -struct dentry *debug_file; - -static int debug_shrinker_show(struct seq_file *s, void *unused) -{ - struct shrinker *shrinker; - struct shrink_control sc; - - sc.gfp_mask = -1; - sc.nr_to_scan = 0; - - down_read(&shrinker_rwsem); - list_for_each_entry(shrinker, &shrinker_list, list) { - int num_objs; - - num_objs = shrinker->count_objects(shrinker, &sc); - seq_printf(s, "%pf %d\n", shrinker->scan_objects, num_objs); - } - up_read(&shrinker_rwsem); - return 0; -} - -static int debug_shrinker_open(struct inode *inode, struct file *file) -{ - return single_open(file, debug_shrinker_show, inode->i_private); -} - -static const struct file_operations debug_shrinker_fops = { - .open = debug_shrinker_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - /* * Add a shrinker callback to be called from the vm. */ @@ -296,15 +262,6 @@ int register_shrinker(struct shrinker *shrinker) } EXPORT_SYMBOL(register_shrinker); -static int __init add_shrinker_debug(void) -{ - debugfs_create_file("shrinker", 0644, NULL, NULL, - &debug_shrinker_fops); - return 0; -} - -late_initcall(add_shrinker_debug); - /* * Remove one */ diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index fbd0acf80b13..2fdebabbfacd 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -976,7 +976,8 @@ static int ax25_release(struct socket *sock) release_sock(sk); ax25_disconnect(ax25, 0); lock_sock(sk); - ax25_destroy_socket(ax25); + if (!sock_flag(ax25->sk, SOCK_DESTROY)) + ax25_destroy_socket(ax25); break; case AX25_STATE_3: diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c index 951cd57bb07d..5237dff6941d 100644 --- a/net/ax25/ax25_ds_timer.c +++ b/net/ax25/ax25_ds_timer.c @@ -102,6 +102,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25) switch (ax25->state) { case AX25_STATE_0: + case AX25_STATE_2: /* Magic here: If we listen() and a new link dies before it is accepted() it isn't 'dead' so doesn't get removed. */ if (!sk || sock_flag(sk, SOCK_DESTROY) || @@ -111,6 +112,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25) sock_hold(sk); ax25_destroy_socket(ax25); bh_unlock_sock(sk); + /* Ungrab socket and destroy it */ sock_put(sk); } else ax25_destroy_socket(ax25); @@ -213,7 +215,8 @@ void ax25_ds_t1_timeout(ax25_cb *ax25) case AX25_STATE_2: if (ax25->n2count == ax25->n2) { ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); - ax25_disconnect(ax25, ETIMEDOUT); + if (!sock_flag(ax25->sk, SOCK_DESTROY)) + ax25_disconnect(ax25, ETIMEDOUT); return; } else { ax25->n2count++; diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c index 004467c9e6e1..2c0d6ef66f9d 100644 --- a/net/ax25/ax25_std_timer.c +++ b/net/ax25/ax25_std_timer.c @@ -38,6 +38,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25) switch (ax25->state) { case AX25_STATE_0: + case AX25_STATE_2: /* Magic here: If we listen() and a new link dies before it is accepted() it isn't 'dead' so doesn't get removed. */ if (!sk || sock_flag(sk, SOCK_DESTROY) || @@ -47,6 +48,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25) sock_hold(sk); ax25_destroy_socket(ax25); bh_unlock_sock(sk); + /* Ungrab socket and destroy it */ sock_put(sk); } else ax25_destroy_socket(ax25); @@ -144,7 +146,8 @@ void ax25_std_t1timer_expiry(ax25_cb *ax25) case AX25_STATE_2: if (ax25->n2count == ax25->n2) { ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); - ax25_disconnect(ax25, ETIMEDOUT); + if (!sock_flag(ax25->sk, SOCK_DESTROY)) + ax25_disconnect(ax25, ETIMEDOUT); return; } else { ax25->n2count++; diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c index 3b78e8473a01..655a7d4c96e1 100644 --- a/net/ax25/ax25_subr.c +++ b/net/ax25/ax25_subr.c @@ -264,7 +264,8 @@ void ax25_disconnect(ax25_cb *ax25, int reason) { ax25_clear_queues(ax25); - ax25_stop_heartbeat(ax25); + if (!sock_flag(ax25->sk, SOCK_DESTROY)) + ax25_stop_heartbeat(ax25); ax25_stop_t1timer(ax25); ax25_stop_t2timer(ax25); ax25_stop_t3timer(ax25); diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index a642bb829d09..09442e0f7f67 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -278,6 +278,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) * change from under us. */ list_for_each_entry(v, &vg->vlan_list, vlist) { + if (!br_vlan_should_use(v)) + continue; f = __br_fdb_get(br, br->dev->dev_addr, v->vid); if (f && f->is_local && !f->dst) fdb_delete_local(br, NULL, f); diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index ea9893743a0f..7173a685309a 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -464,8 +464,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, &ip6h->saddr)) { kfree_skb(skb); + br->has_ipv6_addr = 0; return NULL; } + + br->has_ipv6_addr = 1; ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); hopopt = (u8 *)(ip6h + 1); @@ -1736,6 +1739,7 @@ void br_multicast_init(struct net_bridge *br) br->ip6_other_query.delay_time = 0; br->ip6_querier.port = NULL; #endif + br->has_ipv6_addr = 1; spin_lock_init(&br->multicast_lock); setup_timer(&br->multicast_router_timer, diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 216018c76018..1001a1b7df9b 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -301,6 +301,7 @@ struct net_bridge u8 multicast_disabled:1; u8 multicast_querier:1; u8 multicast_query_use_ifaddr:1; + u8 has_ipv6_addr:1; u32 hash_elasticity; u32 hash_max; @@ -574,10 +575,22 @@ static inline bool br_multicast_is_router(struct net_bridge *br) static inline bool __br_multicast_querier_exists(struct net_bridge *br, - struct bridge_mcast_other_query *querier) + struct bridge_mcast_other_query *querier, + const bool is_ipv6) { + bool own_querier_enabled; + + if (br->multicast_querier) { + if (is_ipv6 && !br->has_ipv6_addr) + own_querier_enabled = false; + else + own_querier_enabled = true; + } else { + own_querier_enabled = false; + } + return time_is_before_jiffies(querier->delay_time) && - (br->multicast_querier || timer_pending(&querier->timer)); + (own_querier_enabled || timer_pending(&querier->timer)); } static inline bool br_multicast_querier_exists(struct net_bridge *br, @@ -585,10 +598,12 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br, { switch (eth->h_proto) { case (htons(ETH_P_IP)): - return __br_multicast_querier_exists(br, &br->ip4_other_query); + return __br_multicast_querier_exists(br, + &br->ip4_other_query, false); #if IS_ENABLED(CONFIG_IPV6) case (htons(ETH_P_IPV6)): - return __br_multicast_querier_exists(br, &br->ip6_other_query); + return __br_multicast_querier_exists(br, + &br->ip6_other_query, true); #endif default: return false; diff --git a/net/core/filter.c b/net/core/filter.c index f393a22b9d50..75e9b2b2336d 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1275,9 +1275,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) */ if (unlikely((u32) offset > 0xffff || len > sizeof(buf))) return -EFAULT; - - if (unlikely(skb_cloned(skb) && - !skb_clone_writable(skb, offset + len))) + if (unlikely(skb_try_make_writable(skb, offset + len))) return -EFAULT; ptr = skb_header_pointer(skb, offset, len, buf); @@ -1321,8 +1319,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) if (unlikely((u32) offset > 0xffff)) return -EFAULT; - if (unlikely(skb_cloned(skb) && - !skb_clone_writable(skb, offset + sizeof(sum)))) + if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum)))) return -EFAULT; ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); @@ -1367,9 +1364,7 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) if (unlikely((u32) offset > 0xffff)) return -EFAULT; - - if (unlikely(skb_cloned(skb) && - !skb_clone_writable(skb, offset + sizeof(sum)))) + if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum)))) return -EFAULT; ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); @@ -1554,6 +1549,13 @@ bool bpf_helper_changes_skb_data(void *func) return true; if (func == bpf_skb_vlan_pop) return true; + if (func == bpf_skb_store_bytes) + return true; + if (func == bpf_l3_csum_replace) + return true; + if (func == bpf_l4_csum_replace) + return true; + return false; } diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 08764a380f9a..4509cba1d0e6 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -697,6 +697,23 @@ void make_flow_keys_digest(struct flow_keys_digest *digest, } EXPORT_SYMBOL(make_flow_keys_digest); +static struct flow_dissector flow_keys_dissector_symmetric __read_mostly; + +u32 __skb_get_hash_symmetric(struct sk_buff *skb) +{ + struct flow_keys keys; + + __flow_hash_secret_init(); + + memset(&keys, 0, sizeof(keys)); + __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys, + NULL, 0, 0, 0, + FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); + + return __flow_hash_from_keys(&keys, hashrnd); +} +EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric); + /** * __skb_get_hash: calculate a flow hash * @skb: sk_buff to calculate flow hash from @@ -909,6 +926,29 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = { }, }; +static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = { + { + .key_id = FLOW_DISSECTOR_KEY_CONTROL, + .offset = offsetof(struct flow_keys, control), + }, + { + .key_id = FLOW_DISSECTOR_KEY_BASIC, + .offset = offsetof(struct flow_keys, basic), + }, + { + .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, + .offset = offsetof(struct flow_keys, addrs.v4addrs), + }, + { + .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, + .offset = offsetof(struct flow_keys, addrs.v6addrs), + }, + { + .key_id = FLOW_DISSECTOR_KEY_PORTS, + .offset = offsetof(struct flow_keys, ports), + }, +}; + static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = { { .key_id = FLOW_DISSECTOR_KEY_CONTROL, @@ -930,6 +970,9 @@ static int __init init_default_flow_dissectors(void) skb_flow_dissector_init(&flow_keys_dissector, flow_keys_dissector_keys, ARRAY_SIZE(flow_keys_dissector_keys)); + skb_flow_dissector_init(&flow_keys_dissector_symmetric, + flow_keys_dissector_symmetric_keys, + ARRAY_SIZE(flow_keys_dissector_symmetric_keys)); skb_flow_dissector_init(&flow_keys_buf_dissector, flow_keys_buf_dissector_keys, ARRAY_SIZE(flow_keys_buf_dissector_keys)); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 436822c109d5..f1e575d7f21a 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2467,13 +2467,17 @@ int neigh_xmit(int index, struct net_device *dev, tbl = neigh_tables[index]; if (!tbl) goto out; + rcu_read_lock_bh(); neigh = __neigh_lookup_noref(tbl, addr, dev); if (!neigh) neigh = __neigh_create(tbl, addr, dev, false); err = PTR_ERR(neigh); - if (IS_ERR(neigh)) + if (IS_ERR(neigh)) { + rcu_read_unlock_bh(); goto out_kfree_skb; + } err = neigh->output(neigh, skb); + rcu_read_unlock_bh(); } else if (index == NEIGH_LINK_TABLE) { err = dev_hard_header(skb, dev, ntohs(skb->protocol), diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 38467f386b14..46e60923221f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2965,24 +2965,6 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page, EXPORT_SYMBOL_GPL(skb_append_pagefrags); /** - * skb_push_rcsum - push skb and update receive checksum - * @skb: buffer to update - * @len: length of data pulled - * - * This function performs an skb_push on the packet and updates - * the CHECKSUM_COMPLETE checksum. It should be used on - * receive path processing instead of skb_push unless you know - * that the checksum difference is zero (e.g., a valid IP header) - * or you are setting ip_summed to CHECKSUM_NONE. - */ -static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len) -{ - skb_push(skb, len); - skb_postpush_rcsum(skb, skb->data, len); - return skb->data; -} - -/** * skb_pull_rcsum - pull skb and update receive checksum * @skb: buffer to update * @len: length of data pulled diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index c600403137b7..1021e770355b 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -888,7 +888,6 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCSIFPFLAGS: case SIOCGIFPFLAGS: case SIOCSIFFLAGS: - case SIOCKILLADDR: err = devinet_ioctl(net, cmd, (void __user *)arg); break; default: diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 926169c94a0b..0212591b0077 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -59,7 +59,6 @@ #include <net/arp.h> #include <net/ip.h> -#include <net/tcp.h> #include <net/route.h> #include <net/ip_fib.h> #include <net/rtnetlink.h> @@ -969,7 +968,6 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) case SIOCSIFBRDADDR: /* Set the broadcast address */ case SIOCSIFDSTADDR: /* Set the destination address */ case SIOCSIFNETMASK: /* Set the netmask for the interface */ - case SIOCKILLADDR: /* Nuke all sockets on this address */ ret = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) goto out; @@ -1021,8 +1019,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) } ret = -EADDRNOTAVAIL; - if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS - && cmd != SIOCKILLADDR) + if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS) goto done; switch (cmd) { @@ -1149,9 +1146,6 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) inet_insert_ifa(ifa); } break; - case SIOCKILLADDR: /* Nuke all connections on this address */ - ret = tcp_nuke_addr(net, (struct sockaddr *) sin); - break; } done: rtnl_unlock(); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 477937465a20..d95631d09248 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -23,6 +23,11 @@ struct esp_skb_cb { void *tmp; }; +struct esp_output_extra { + __be32 seqhi; + u32 esphoff; +}; + #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) static u32 esp4_get_mtu(struct xfrm_state *x, int mtu); @@ -35,11 +40,11 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu); * * TODO: Use spare space in skb for this where possible. */ -static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen) +static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen) { unsigned int len; - len = seqhilen; + len = extralen; len += crypto_aead_ivsize(aead); @@ -57,15 +62,16 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen) return kmalloc(len, GFP_ATOMIC); } -static inline __be32 *esp_tmp_seqhi(void *tmp) +static inline void *esp_tmp_extra(void *tmp) { - return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32)); + return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra)); } -static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) + +static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen) { return crypto_aead_ivsize(aead) ? - PTR_ALIGN((u8 *)tmp + seqhilen, - crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; + PTR_ALIGN((u8 *)tmp + extralen, + crypto_aead_alignmask(aead) + 1) : tmp + extralen; } static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) @@ -99,7 +105,7 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset) { struct ip_esp_hdr *esph = (void *)(skb->data + offset); void *tmp = ESP_SKB_CB(skb)->tmp; - __be32 *seqhi = esp_tmp_seqhi(tmp); + __be32 *seqhi = esp_tmp_extra(tmp); esph->seq_no = esph->spi; esph->spi = *seqhi; @@ -107,7 +113,11 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset) static void esp_output_restore_header(struct sk_buff *skb) { - esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32)); + void *tmp = ESP_SKB_CB(skb)->tmp; + struct esp_output_extra *extra = esp_tmp_extra(tmp); + + esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff - + sizeof(__be32)); } static void esp_output_done_esn(struct crypto_async_request *base, int err) @@ -121,6 +131,7 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err) static int esp_output(struct xfrm_state *x, struct sk_buff *skb) { int err; + struct esp_output_extra *extra; struct ip_esp_hdr *esph; struct crypto_aead *aead; struct aead_request *req; @@ -137,8 +148,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) int tfclen; int nfrags; int assoclen; - int seqhilen; - __be32 *seqhi; + int extralen; __be64 seqno; /* skb is pure payload to encrypt */ @@ -166,21 +176,21 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) nfrags = err; assoclen = sizeof(*esph); - seqhilen = 0; + extralen = 0; if (x->props.flags & XFRM_STATE_ESN) { - seqhilen += sizeof(__be32); - assoclen += seqhilen; + extralen += sizeof(*extra); + assoclen += sizeof(__be32); } - tmp = esp_alloc_tmp(aead, nfrags, seqhilen); + tmp = esp_alloc_tmp(aead, nfrags, extralen); if (!tmp) { err = -ENOMEM; goto error; } - seqhi = esp_tmp_seqhi(tmp); - iv = esp_tmp_iv(aead, tmp, seqhilen); + extra = esp_tmp_extra(tmp); + iv = esp_tmp_iv(aead, tmp, extralen); req = esp_tmp_req(aead, iv); sg = esp_req_sg(aead, req); @@ -247,8 +257,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) * encryption. */ if ((x->props.flags & XFRM_STATE_ESN)) { - esph = (void *)(skb_transport_header(skb) - sizeof(__be32)); - *seqhi = esph->spi; + extra->esphoff = (unsigned char *)esph - + skb_transport_header(skb); + esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); + extra->seqhi = esph->spi; esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); aead_request_set_callback(req, 0, esp_output_done_esn, skb); } @@ -445,7 +457,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) goto out; ESP_SKB_CB(skb)->tmp = tmp; - seqhi = esp_tmp_seqhi(tmp); + seqhi = esp_tmp_extra(tmp); iv = esp_tmp_iv(aead, tmp, seqhilen); req = esp_tmp_req(aead, iv); sg = esp_req_sg(aead, req); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 7dc962b89fa1..3e4184088082 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1247,6 +1247,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name, err = ipgre_newlink(net, dev, tb, NULL); if (err < 0) goto out; + + /* openvswitch users expect packet sizes to be unrestricted, + * so set the largest MTU we can. + */ + err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false); + if (err) + goto out; + return dev; out: free_netdev(dev); diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index ce30c8b72457..3310ac75e3f3 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -948,17 +948,31 @@ done: } EXPORT_SYMBOL_GPL(ip_tunnel_ioctl); -int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu) +int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict) { struct ip_tunnel *tunnel = netdev_priv(dev); int t_hlen = tunnel->hlen + sizeof(struct iphdr); + int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; - if (new_mtu < 68 || - new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen) + if (new_mtu < 68) return -EINVAL; + + if (new_mtu > max_mtu) { + if (strict) + return -EINVAL; + + new_mtu = max_mtu; + } + dev->mtu = new_mtu; return 0; } +EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu); + +int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu) +{ + return __ip_tunnel_change_mtu(dev, new_mtu, true); +} EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu); static void ip_tunnel_dev_free(struct net_device *dev) diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index c3a38353f5dc..9d1e555496e3 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -882,8 +882,10 @@ static struct mfc_cache *ipmr_cache_alloc(void) { struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); - if (c) + if (c) { + c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1; c->mfc_un.res.minvif = MAXVIFS; + } return c; } diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 36a30fab8625..6e3e0e8b1ce3 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -367,6 +367,18 @@ static inline bool unconditional(const struct arpt_entry *e) memcmp(&e->arp, &uncond, sizeof(uncond)) == 0; } +static bool find_jump_target(const struct xt_table_info *t, + const struct arpt_entry *target) +{ + struct arpt_entry *iter; + + xt_entry_foreach(iter, t->entries, t->size) { + if (iter == target) + return true; + } + return false; +} + /* Figures out from what hook each rule can be called: returns 0 if * there are loops. Puts hook bitmask in comefrom. */ @@ -439,6 +451,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo, size = e->next_offset; e = (struct arpt_entry *) (entry0 + pos + size); + if (pos + size >= newinfo->size) + return 0; e->counters.pcnt = pos; pos += size; } else { @@ -458,9 +472,15 @@ static int mark_source_chains(const struct xt_table_info *newinfo, /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); + e = (struct arpt_entry *) + (entry0 + newpos); + if (!find_jump_target(newinfo, e)) + return 0; } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; + if (newpos >= newinfo->size) + return 0; } e = (struct arpt_entry *) (entry0 + newpos); @@ -474,23 +494,6 @@ next: return 1; } -static inline int check_entry(const struct arpt_entry *e) -{ - const struct xt_entry_target *t; - - if (!arp_checkentry(&e->arp)) - return -EINVAL; - - if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) - return -EINVAL; - - t = arpt_get_target_c(e); - if (e->target_offset + t->u.target_size > e->next_offset) - return -EINVAL; - - return 0; -} - static inline int check_target(struct arpt_entry *e, const char *name) { struct xt_entry_target *t = arpt_get_target(e); @@ -586,7 +589,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e, return -EINVAL; } - err = check_entry(e); + if (!arp_checkentry(&e->arp)) + return -EINVAL; + + err = xt_check_entry_offsets(e, e->elems, e->target_offset, + e->next_offset); if (err) return err; @@ -691,10 +698,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, } } - if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) { - duprintf("Looping hook\n"); + if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) return -ELOOP; - } /* Finally, each sanity check must pass */ i = 0; @@ -1125,55 +1130,17 @@ static int do_add_counters(struct net *net, const void __user *user, unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; - unsigned int num_counters; - const char *name; - int size; - void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct arpt_entry *iter; unsigned int addend; -#ifdef CONFIG_COMPAT - struct compat_xt_counters_info compat_tmp; - if (compat) { - ptmp = &compat_tmp; - size = sizeof(struct compat_xt_counters_info); - } else -#endif - { - ptmp = &tmp; - size = sizeof(struct xt_counters_info); - } + paddc = xt_copy_counters_from_user(user, len, &tmp, compat); + if (IS_ERR(paddc)) + return PTR_ERR(paddc); - if (copy_from_user(ptmp, user, size) != 0) - return -EFAULT; - -#ifdef CONFIG_COMPAT - if (compat) { - num_counters = compat_tmp.num_counters; - name = compat_tmp.name; - } else -#endif - { - num_counters = tmp.num_counters; - name = tmp.name; - } - - if (len != size + num_counters * sizeof(struct xt_counters)) - return -EINVAL; - - paddc = vmalloc(len - size); - if (!paddc) - return -ENOMEM; - - if (copy_from_user(paddc, user + size, len - size) != 0) { - ret = -EFAULT; - goto free; - } - - t = xt_find_table_lock(net, NFPROTO_ARP, name); + t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; @@ -1181,7 +1148,7 @@ static int do_add_counters(struct net *net, const void __user *user, local_bh_disable(); private = t->private; - if (private->number != num_counters) { + if (private->number != tmp.num_counters) { ret = -EINVAL; goto unlock_up_free; } @@ -1208,6 +1175,18 @@ static int do_add_counters(struct net *net, const void __user *user, } #ifdef CONFIG_COMPAT +struct compat_arpt_replace { + char name[XT_TABLE_MAXNAMELEN]; + u32 valid_hooks; + u32 num_entries; + u32 size; + u32 hook_entry[NF_ARP_NUMHOOKS]; + u32 underflow[NF_ARP_NUMHOOKS]; + u32 num_counters; + compat_uptr_t counters; + struct compat_arpt_entry entries[0]; +}; + static inline void compat_release_entry(struct compat_arpt_entry *e) { struct xt_entry_target *t; @@ -1216,20 +1195,17 @@ static inline void compat_release_entry(struct compat_arpt_entry *e) module_put(t->u.kernel.target->me); } -static inline int +static int check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, - const unsigned char *limit, - const unsigned int *hook_entries, - const unsigned int *underflows, - const char *name) + const unsigned char *limit) { struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; - int ret, off, h; + int ret, off; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || @@ -1246,8 +1222,11 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, return -EINVAL; } - /* For purposes of check_entry casting the compat entry is fine */ - ret = check_entry((struct arpt_entry *)e); + if (!arp_checkentry(&e->arp)) + return -EINVAL; + + ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset, + e->next_offset); if (ret) return ret; @@ -1271,17 +1250,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, if (ret) goto release_target; - /* Check hooks & underflows */ - for (h = 0; h < NF_ARP_NUMHOOKS; h++) { - if ((unsigned char *)e - base == hook_entries[h]) - newinfo->hook_entry[h] = hook_entries[h]; - if ((unsigned char *)e - base == underflows[h]) - newinfo->underflow[h] = underflows[h]; - } - - /* Clear counters and comefrom */ - memset(&e->counters, 0, sizeof(e->counters)); - e->comefrom = 0; return 0; release_target: @@ -1290,18 +1258,17 @@ out: return ret; } -static int +static void compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, - unsigned int *size, const char *name, + unsigned int *size, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct xt_target *target; struct arpt_entry *de; unsigned int origsize; - int ret, h; + int h; - ret = 0; origsize = *size; de = (struct arpt_entry *)*dstptr; memcpy(de, e, sizeof(struct arpt_entry)); @@ -1322,148 +1289,82 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } - return ret; } -static int translate_compat_table(const char *name, - unsigned int valid_hooks, - struct xt_table_info **pinfo, +static int translate_compat_table(struct xt_table_info **pinfo, void **pentry0, - unsigned int total_size, - unsigned int number, - unsigned int *hook_entries, - unsigned int *underflows) + const struct compat_arpt_replace *compatr) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_arpt_entry *iter0; - struct arpt_entry *iter1; + struct arpt_replace repl; unsigned int size; int ret = 0; info = *pinfo; entry0 = *pentry0; - size = total_size; - info->number = number; - - /* Init all hooks to impossible value. */ - for (i = 0; i < NF_ARP_NUMHOOKS; i++) { - info->hook_entry[i] = 0xFFFFFFFF; - info->underflow[i] = 0xFFFFFFFF; - } + size = compatr->size; + info->number = compatr->num_entries; duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(NFPROTO_ARP); - xt_compat_init_offsets(NFPROTO_ARP, number); + xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries); /* Walk through entries, checking offsets. */ - xt_entry_foreach(iter0, entry0, total_size) { + xt_entry_foreach(iter0, entry0, compatr->size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, - entry0 + total_size, - hook_entries, - underflows, - name); + entry0 + compatr->size); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; - if (j != number) { + if (j != compatr->num_entries) { duprintf("translate_compat_table: %u not %u entries\n", - j, number); + j, compatr->num_entries); goto out_unlock; } - /* Check hooks all assigned */ - for (i = 0; i < NF_ARP_NUMHOOKS; i++) { - /* Only hooks which are valid */ - if (!(valid_hooks & (1 << i))) - continue; - if (info->hook_entry[i] == 0xFFFFFFFF) { - duprintf("Invalid hook entry %u %u\n", - i, hook_entries[i]); - goto out_unlock; - } - if (info->underflow[i] == 0xFFFFFFFF) { - duprintf("Invalid underflow %u %u\n", - i, underflows[i]); - goto out_unlock; - } - } - ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; - newinfo->number = number; + newinfo->number = compatr->num_entries; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; - size = total_size; - xt_entry_foreach(iter0, entry0, total_size) { - ret = compat_copy_entry_from_user(iter0, &pos, &size, - name, newinfo, entry1); - if (ret != 0) - break; - } + size = compatr->size; + xt_entry_foreach(iter0, entry0, compatr->size) + compat_copy_entry_from_user(iter0, &pos, &size, + newinfo, entry1); + + /* all module references in entry0 are now gone */ + xt_compat_flush_offsets(NFPROTO_ARP); xt_compat_unlock(NFPROTO_ARP); - if (ret) - goto free_newinfo; - ret = -ELOOP; - if (!mark_source_chains(newinfo, valid_hooks, entry1)) - goto free_newinfo; + memcpy(&repl, compatr, sizeof(*compatr)); - i = 0; - xt_entry_foreach(iter1, entry1, newinfo->size) { - iter1->counters.pcnt = xt_percpu_counter_alloc(); - if (IS_ERR_VALUE(iter1->counters.pcnt)) { - ret = -ENOMEM; - break; - } - - ret = check_target(iter1, name); - if (ret != 0) { - xt_percpu_counter_free(iter1->counters.pcnt); - break; - } - ++i; - if (strcmp(arpt_get_target(iter1)->u.user.name, - XT_ERROR_TARGET) == 0) - ++newinfo->stacksize; - } - if (ret) { - /* - * The first i matches need cleanup_entry (calls ->destroy) - * because they had called ->check already. The other j-i - * entries need only release. - */ - int skip = i; - j -= i; - xt_entry_foreach(iter0, entry0, newinfo->size) { - if (skip-- > 0) - continue; - if (j-- == 0) - break; - compat_release_entry(iter0); - } - xt_entry_foreach(iter1, entry1, newinfo->size) { - if (i-- == 0) - break; - cleanup_entry(iter1); - } - xt_free_table_info(newinfo); - return ret; + for (i = 0; i < NF_ARP_NUMHOOKS; i++) { + repl.hook_entry[i] = newinfo->hook_entry[i]; + repl.underflow[i] = newinfo->underflow[i]; } + repl.num_counters = 0; + repl.counters = NULL; + repl.size = newinfo->size; + ret = translate_table(newinfo, entry1, &repl); + if (ret) + goto free_newinfo; + *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); @@ -1471,31 +1372,18 @@ static int translate_compat_table(const char *name, free_newinfo: xt_free_table_info(newinfo); -out: - xt_entry_foreach(iter0, entry0, total_size) { + return ret; +out_unlock: + xt_compat_flush_offsets(NFPROTO_ARP); + xt_compat_unlock(NFPROTO_ARP); + xt_entry_foreach(iter0, entry0, compatr->size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; -out_unlock: - xt_compat_flush_offsets(NFPROTO_ARP); - xt_compat_unlock(NFPROTO_ARP); - goto out; } -struct compat_arpt_replace { - char name[XT_TABLE_MAXNAMELEN]; - u32 valid_hooks; - u32 num_entries; - u32 size; - u32 hook_entry[NF_ARP_NUMHOOKS]; - u32 underflow[NF_ARP_NUMHOOKS]; - u32 num_counters; - compat_uptr_t counters; - struct compat_arpt_entry entries[0]; -}; - static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { @@ -1528,10 +1416,7 @@ static int compat_do_replace(struct net *net, void __user *user, goto free_newinfo; } - ret = translate_compat_table(tmp.name, tmp.valid_hooks, - &newinfo, &loc_cpu_entry, tmp.size, - tmp.num_entries, tmp.hook_entry, - tmp.underflow); + ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 99d46b0a4ead..a399c5419622 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -443,6 +443,18 @@ ipt_do_table(struct sk_buff *skb, #endif } +static bool find_jump_target(const struct xt_table_info *t, + const struct ipt_entry *target) +{ + struct ipt_entry *iter; + + xt_entry_foreach(iter, t->entries, t->size) { + if (iter == target) + return true; + } + return false; +} + /* Figures out from what hook each rule can be called: returns 0 if there are loops. Puts hook bitmask in comefrom. */ static int @@ -520,6 +532,8 @@ mark_source_chains(const struct xt_table_info *newinfo, size = e->next_offset; e = (struct ipt_entry *) (entry0 + pos + size); + if (pos + size >= newinfo->size) + return 0; e->counters.pcnt = pos; pos += size; } else { @@ -538,9 +552,15 @@ mark_source_chains(const struct xt_table_info *newinfo, /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); + e = (struct ipt_entry *) + (entry0 + newpos); + if (!find_jump_target(newinfo, e)) + return 0; } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; + if (newpos >= newinfo->size) + return 0; } e = (struct ipt_entry *) (entry0 + newpos); @@ -568,25 +588,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net) } static int -check_entry(const struct ipt_entry *e) -{ - const struct xt_entry_target *t; - - if (!ip_checkentry(&e->ip)) - return -EINVAL; - - if (e->target_offset + sizeof(struct xt_entry_target) > - e->next_offset) - return -EINVAL; - - t = ipt_get_target_c(e); - if (e->target_offset + t->u.target_size > e->next_offset) - return -EINVAL; - - return 0; -} - -static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { const struct ipt_ip *ip = par->entryinfo; @@ -750,7 +751,11 @@ check_entry_size_and_hooks(struct ipt_entry *e, return -EINVAL; } - err = check_entry(e); + if (!ip_checkentry(&e->ip)) + return -EINVAL; + + err = xt_check_entry_offsets(e, e->elems, e->target_offset, + e->next_offset); if (err) return err; @@ -1308,55 +1313,17 @@ do_add_counters(struct net *net, const void __user *user, unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; - unsigned int num_counters; - const char *name; - int size; - void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct ipt_entry *iter; unsigned int addend; -#ifdef CONFIG_COMPAT - struct compat_xt_counters_info compat_tmp; - if (compat) { - ptmp = &compat_tmp; - size = sizeof(struct compat_xt_counters_info); - } else -#endif - { - ptmp = &tmp; - size = sizeof(struct xt_counters_info); - } - - if (copy_from_user(ptmp, user, size) != 0) - return -EFAULT; - -#ifdef CONFIG_COMPAT - if (compat) { - num_counters = compat_tmp.num_counters; - name = compat_tmp.name; - } else -#endif - { - num_counters = tmp.num_counters; - name = tmp.name; - } + paddc = xt_copy_counters_from_user(user, len, &tmp, compat); + if (IS_ERR(paddc)) + return PTR_ERR(paddc); - if (len != size + num_counters * sizeof(struct xt_counters)) - return -EINVAL; - - paddc = vmalloc(len - size); - if (!paddc) - return -ENOMEM; - - if (copy_from_user(paddc, user + size, len - size) != 0) { - ret = -EFAULT; - goto free; - } - - t = xt_find_table_lock(net, AF_INET, name); + t = xt_find_table_lock(net, AF_INET, tmp.name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; @@ -1364,7 +1331,7 @@ do_add_counters(struct net *net, const void __user *user, local_bh_disable(); private = t->private; - if (private->number != num_counters) { + if (private->number != tmp.num_counters) { ret = -EINVAL; goto unlock_up_free; } @@ -1443,7 +1410,6 @@ compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, static int compat_find_calc_match(struct xt_entry_match *m, - const char *name, const struct ipt_ip *ip, int *size) { @@ -1478,17 +1444,14 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, - const unsigned char *limit, - const unsigned int *hook_entries, - const unsigned int *underflows, - const char *name) + const unsigned char *limit) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; - int ret, off, h; + int ret, off; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || @@ -1505,8 +1468,11 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, return -EINVAL; } - /* For purposes of check_entry casting the compat entry is fine */ - ret = check_entry((struct ipt_entry *)e); + if (!ip_checkentry(&e->ip)) + return -EINVAL; + + ret = xt_compat_check_entry_offsets(e, e->elems, + e->target_offset, e->next_offset); if (ret) return ret; @@ -1514,7 +1480,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { - ret = compat_find_calc_match(ematch, name, &e->ip, &off); + ret = compat_find_calc_match(ematch, &e->ip, &off); if (ret != 0) goto release_matches; ++j; @@ -1537,17 +1503,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, if (ret) goto out; - /* Check hooks & underflows */ - for (h = 0; h < NF_INET_NUMHOOKS; h++) { - if ((unsigned char *)e - base == hook_entries[h]) - newinfo->hook_entry[h] = hook_entries[h]; - if ((unsigned char *)e - base == underflows[h]) - newinfo->underflow[h] = underflows[h]; - } - - /* Clear counters and comefrom */ - memset(&e->counters, 0, sizeof(e->counters)); - e->comefrom = 0; return 0; out: @@ -1561,19 +1516,18 @@ release_matches: return ret; } -static int +static void compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, - unsigned int *size, const char *name, + unsigned int *size, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct xt_target *target; struct ipt_entry *de; unsigned int origsize; - int ret, h; + int h; struct xt_entry_match *ematch; - ret = 0; origsize = *size; de = (struct ipt_entry *)*dstptr; memcpy(de, e, sizeof(struct ipt_entry)); @@ -1582,201 +1536,105 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, *dstptr += sizeof(struct ipt_entry); *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); - xt_ematch_foreach(ematch, e) { - ret = xt_compat_match_from_user(ematch, dstptr, size); - if (ret != 0) - return ret; - } + xt_ematch_foreach(ematch, e) + xt_compat_match_from_user(ematch, dstptr, size); + de->target_offset = e->target_offset - (origsize - *size); t = compat_ipt_get_target(e); target = t->u.kernel.target; xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); + for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } - return ret; -} - -static int -compat_check_entry(struct ipt_entry *e, struct net *net, const char *name) -{ - struct xt_entry_match *ematch; - struct xt_mtchk_param mtpar; - unsigned int j; - int ret = 0; - - e->counters.pcnt = xt_percpu_counter_alloc(); - if (IS_ERR_VALUE(e->counters.pcnt)) - return -ENOMEM; - - j = 0; - mtpar.net = net; - mtpar.table = name; - mtpar.entryinfo = &e->ip; - mtpar.hook_mask = e->comefrom; - mtpar.family = NFPROTO_IPV4; - xt_ematch_foreach(ematch, e) { - ret = check_match(ematch, &mtpar); - if (ret != 0) - goto cleanup_matches; - ++j; - } - - ret = check_target(e, net, name); - if (ret) - goto cleanup_matches; - return 0; - - cleanup_matches: - xt_ematch_foreach(ematch, e) { - if (j-- == 0) - break; - cleanup_match(ematch, net); - } - - xt_percpu_counter_free(e->counters.pcnt); - - return ret; } static int translate_compat_table(struct net *net, - const char *name, - unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, - unsigned int total_size, - unsigned int number, - unsigned int *hook_entries, - unsigned int *underflows) + const struct compat_ipt_replace *compatr) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_ipt_entry *iter0; - struct ipt_entry *iter1; + struct ipt_replace repl; unsigned int size; int ret; info = *pinfo; entry0 = *pentry0; - size = total_size; - info->number = number; - - /* Init all hooks to impossible value. */ - for (i = 0; i < NF_INET_NUMHOOKS; i++) { - info->hook_entry[i] = 0xFFFFFFFF; - info->underflow[i] = 0xFFFFFFFF; - } + size = compatr->size; + info->number = compatr->num_entries; duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET); - xt_compat_init_offsets(AF_INET, number); + xt_compat_init_offsets(AF_INET, compatr->num_entries); /* Walk through entries, checking offsets. */ - xt_entry_foreach(iter0, entry0, total_size) { + xt_entry_foreach(iter0, entry0, compatr->size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, - entry0 + total_size, - hook_entries, - underflows, - name); + entry0 + compatr->size); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; - if (j != number) { + if (j != compatr->num_entries) { duprintf("translate_compat_table: %u not %u entries\n", - j, number); + j, compatr->num_entries); goto out_unlock; } - /* Check hooks all assigned */ - for (i = 0; i < NF_INET_NUMHOOKS; i++) { - /* Only hooks which are valid */ - if (!(valid_hooks & (1 << i))) - continue; - if (info->hook_entry[i] == 0xFFFFFFFF) { - duprintf("Invalid hook entry %u %u\n", - i, hook_entries[i]); - goto out_unlock; - } - if (info->underflow[i] == 0xFFFFFFFF) { - duprintf("Invalid underflow %u %u\n", - i, underflows[i]); - goto out_unlock; - } - } - ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; - newinfo->number = number; + newinfo->number = compatr->num_entries; for (i = 0; i < NF_INET_NUMHOOKS; i++) { - newinfo->hook_entry[i] = info->hook_entry[i]; - newinfo->underflow[i] = info->underflow[i]; + newinfo->hook_entry[i] = compatr->hook_entry[i]; + newinfo->underflow[i] = compatr->underflow[i]; } entry1 = newinfo->entries; pos = entry1; - size = total_size; - xt_entry_foreach(iter0, entry0, total_size) { - ret = compat_copy_entry_from_user(iter0, &pos, &size, - name, newinfo, entry1); - if (ret != 0) - break; - } + size = compatr->size; + xt_entry_foreach(iter0, entry0, compatr->size) + compat_copy_entry_from_user(iter0, &pos, &size, + newinfo, entry1); + + /* all module references in entry0 are now gone. + * entry1/newinfo contains a 64bit ruleset that looks exactly as + * generated by 64bit userspace. + * + * Call standard translate_table() to validate all hook_entrys, + * underflows, check for loops, etc. + */ xt_compat_flush_offsets(AF_INET); xt_compat_unlock(AF_INET); - if (ret) - goto free_newinfo; - ret = -ELOOP; - if (!mark_source_chains(newinfo, valid_hooks, entry1)) - goto free_newinfo; + memcpy(&repl, compatr, sizeof(*compatr)); - i = 0; - xt_entry_foreach(iter1, entry1, newinfo->size) { - ret = compat_check_entry(iter1, net, name); - if (ret != 0) - break; - ++i; - if (strcmp(ipt_get_target(iter1)->u.user.name, - XT_ERROR_TARGET) == 0) - ++newinfo->stacksize; - } - if (ret) { - /* - * The first i matches need cleanup_entry (calls ->destroy) - * because they had called ->check already. The other j-i - * entries need only release. - */ - int skip = i; - j -= i; - xt_entry_foreach(iter0, entry0, newinfo->size) { - if (skip-- > 0) - continue; - if (j-- == 0) - break; - compat_release_entry(iter0); - } - xt_entry_foreach(iter1, entry1, newinfo->size) { - if (i-- == 0) - break; - cleanup_entry(iter1, net); - } - xt_free_table_info(newinfo); - return ret; + for (i = 0; i < NF_INET_NUMHOOKS; i++) { + repl.hook_entry[i] = newinfo->hook_entry[i]; + repl.underflow[i] = newinfo->underflow[i]; } + repl.num_counters = 0; + repl.counters = NULL; + repl.size = newinfo->size; + ret = translate_table(net, newinfo, entry1, &repl); + if (ret) + goto free_newinfo; + *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); @@ -1784,17 +1642,16 @@ translate_compat_table(struct net *net, free_newinfo: xt_free_table_info(newinfo); -out: - xt_entry_foreach(iter0, entry0, total_size) { + return ret; +out_unlock: + xt_compat_flush_offsets(AF_INET); + xt_compat_unlock(AF_INET); + xt_entry_foreach(iter0, entry0, compatr->size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; -out_unlock: - xt_compat_flush_offsets(AF_INET); - xt_compat_unlock(AF_INET); - goto out; } static int @@ -1830,10 +1687,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) goto free_newinfo; } - ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, - &newinfo, &loc_cpu_entry, tmp.size, - tmp.num_entries, tmp.hook_entry, - tmp.underflow); + ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2ee7b830a35e..d5bd460a13f1 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -275,9 +275,6 @@ #include <net/tcp.h> #include <net/xfrm.h> #include <net/ip.h> -#include <net/ip6_route.h> -#include <net/ipv6.h> -#include <net/transp_v6.h> #include <net/sock.h> #include <asm/uaccess.h> @@ -3257,125 +3254,3 @@ void __init tcp_init(void) BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); tcp_tasklet_init(); } - -static int tcp_is_local(struct net *net, __be32 addr) { - struct rtable *rt; - struct flowi4 fl4 = { .daddr = addr }; - rt = ip_route_output_key(net, &fl4); - if (IS_ERR_OR_NULL(rt)) - return 0; - return rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK); -} - -#if defined(CONFIG_IPV6) -static int tcp_is_local6(struct net *net, struct in6_addr *addr) { - struct rt6_info *rt6 = rt6_lookup(net, addr, addr, 0, 0); - return rt6 && rt6->dst.dev && (rt6->dst.dev->flags & IFF_LOOPBACK); -} -#endif - -/* - * tcp_nuke_addr - destroy all sockets on the given local address - * if local address is the unspecified address (0.0.0.0 or ::), destroy all - * sockets with local addresses that are not configured. - */ -int tcp_nuke_addr(struct net *net, struct sockaddr *addr) -{ - int family = addr->sa_family; - unsigned int bucket; - - struct in_addr *in = NULL; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) - struct in6_addr *in6 = NULL; -#endif - if (family == AF_INET) { - in = &((struct sockaddr_in *)addr)->sin_addr; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) - } else if (family == AF_INET6) { - in6 = &((struct sockaddr_in6 *)addr)->sin6_addr; -#endif - } else { - return -EAFNOSUPPORT; - } - - for (bucket = 0; bucket <= tcp_hashinfo.ehash_mask; bucket++) { - struct hlist_nulls_node *node; - struct sock *sk; - spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket); - -restart: - spin_lock_bh(lock); - sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) { - struct inet_sock *inet = inet_sk(sk); - - if (sk->sk_state == TCP_TIME_WAIT) { - /* - * Sockets that are in TIME_WAIT state are - * instances of lightweight inet_timewait_sock, - * we should simply skip them (or we'll try to - * access non-existing fields and crash). - */ - continue; - } - - if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT) - continue; - - if (sock_flag(sk, SOCK_DEAD)) - continue; - - if (family == AF_INET) { - __be32 s4 = inet->inet_rcv_saddr; - if (s4 == LOOPBACK4_IPV6) - continue; - - if (in && in->s_addr != s4 && - !(in->s_addr == INADDR_ANY && - !tcp_is_local(net, s4))) - continue; - } - -#if defined(CONFIG_IPV6) - if (family == AF_INET6) { - struct in6_addr *s6; - if (!inet->pinet6) - continue; - - s6 = &sk->sk_v6_rcv_saddr; - if (ipv6_addr_type(s6) == IPV6_ADDR_MAPPED) - continue; - - if (!ipv6_addr_equal(in6, s6) && - !(ipv6_addr_equal(in6, &in6addr_any) && - !tcp_is_local6(net, s6))) - continue; - } -#endif - - sock_hold(sk); - spin_unlock_bh(lock); - - lock_sock(sk); - local_bh_disable(); - bh_lock_sock(sk); - - if (!sock_flag(sk, SOCK_DEAD)) { - smp_wmb(); /* be consistent with tcp_reset */ - sk->sk_err = ETIMEDOUT; - sk->sk_error_report(sk); - tcp_done(sk); - } - - bh_unlock_sock(sk); - local_bh_enable(); - release_sock(sk); - sock_put(sk); - - goto restart; - } - spin_unlock_bh(lock); - } - - return 0; -} -EXPORT_SYMBOL_GPL(tcp_nuke_addr); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index eeac33e94527..04822b7d7966 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1537,7 +1537,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) /* if we're overly short, let UDP handle it */ encap_rcv = ACCESS_ONCE(up->encap_rcv); - if (skb->len > sizeof(struct udphdr) && encap_rcv) { + if (encap_rcv) { int ret; /* Verify checksum before giving to encap */ diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 99fccad391e0..d9b25bd17bf1 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -495,21 +495,6 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr, } EXPORT_SYMBOL(inet6_getname); -int inet6_killaddr_ioctl(struct net *net, void __user *arg) { - struct in6_ifreq ireq; - struct sockaddr_in6 sin6; - - if (!capable(CAP_NET_ADMIN)) - return -EACCES; - - if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) - return -EFAULT; - - sin6.sin6_family = AF_INET6; - sin6.sin6_addr = ireq.ifr6_addr; - return tcp_nuke_addr(net, (struct sockaddr *) &sin6); -} - int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; @@ -533,8 +518,6 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) return addrconf_del_ifaddr(net, (void __user *) arg); case SIOCSIFDSTADDR: return addrconf_set_dstaddr(net, (void __user *) arg); - case SIOCKILLADDR: - return inet6_killaddr_ioctl(net, (void __user *) arg); default: if (!sk->sk_prot->ioctl) return -ENOIOCTLCMD; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 0c7e276c230e..34cf46d74554 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -179,6 +179,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) } } + free_percpu(non_pcpu_rt->rt6i_pcpu); non_pcpu_rt->rt6i_pcpu = NULL; } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index c87ca6987f9c..cd96a01032a2 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1071,17 +1071,12 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst) { struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); - int err; dst = ip6_sk_dst_check(sk, dst, fl6); + if (!dst) + dst = ip6_dst_lookup_flow(sk, fl6, final_dst); - err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6); - if (err) - return ERR_PTR(err); - if (final_dst) - fl6->daddr = *final_dst; - - return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); + return dst; } EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index a10e77103c88..e207cb2468da 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1074,6 +1074,7 @@ static struct mfc6_cache *ip6mr_cache_alloc(void) struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); if (!c) return NULL; + c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1; c->mfc_un.res.minvif = MAXMIFS; return c; } diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 6198807e06f4..22f39e00bef3 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -455,6 +455,18 @@ ip6t_do_table(struct sk_buff *skb, #endif } +static bool find_jump_target(const struct xt_table_info *t, + const struct ip6t_entry *target) +{ + struct ip6t_entry *iter; + + xt_entry_foreach(iter, t->entries, t->size) { + if (iter == target) + return true; + } + return false; +} + /* Figures out from what hook each rule can be called: returns 0 if there are loops. Puts hook bitmask in comefrom. */ static int @@ -532,6 +544,8 @@ mark_source_chains(const struct xt_table_info *newinfo, size = e->next_offset; e = (struct ip6t_entry *) (entry0 + pos + size); + if (pos + size >= newinfo->size) + return 0; e->counters.pcnt = pos; pos += size; } else { @@ -550,9 +564,15 @@ mark_source_chains(const struct xt_table_info *newinfo, /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); + e = (struct ip6t_entry *) + (entry0 + newpos); + if (!find_jump_target(newinfo, e)) + return 0; } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; + if (newpos >= newinfo->size) + return 0; } e = (struct ip6t_entry *) (entry0 + newpos); @@ -579,25 +599,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net) module_put(par.match->me); } -static int -check_entry(const struct ip6t_entry *e) -{ - const struct xt_entry_target *t; - - if (!ip6_checkentry(&e->ipv6)) - return -EINVAL; - - if (e->target_offset + sizeof(struct xt_entry_target) > - e->next_offset) - return -EINVAL; - - t = ip6t_get_target_c(e); - if (e->target_offset + t->u.target_size > e->next_offset) - return -EINVAL; - - return 0; -} - static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { const struct ip6t_ip6 *ipv6 = par->entryinfo; @@ -762,7 +763,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e, return -EINVAL; } - err = check_entry(e); + if (!ip6_checkentry(&e->ipv6)) + return -EINVAL; + + err = xt_check_entry_offsets(e, e->elems, e->target_offset, + e->next_offset); if (err) return err; @@ -1320,55 +1325,16 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; - unsigned int num_counters; - char *name; - int size; - void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct ip6t_entry *iter; unsigned int addend; -#ifdef CONFIG_COMPAT - struct compat_xt_counters_info compat_tmp; - - if (compat) { - ptmp = &compat_tmp; - size = sizeof(struct compat_xt_counters_info); - } else -#endif - { - ptmp = &tmp; - size = sizeof(struct xt_counters_info); - } - - if (copy_from_user(ptmp, user, size) != 0) - return -EFAULT; - -#ifdef CONFIG_COMPAT - if (compat) { - num_counters = compat_tmp.num_counters; - name = compat_tmp.name; - } else -#endif - { - num_counters = tmp.num_counters; - name = tmp.name; - } - - if (len != size + num_counters * sizeof(struct xt_counters)) - return -EINVAL; - - paddc = vmalloc(len - size); - if (!paddc) - return -ENOMEM; - if (copy_from_user(paddc, user + size, len - size) != 0) { - ret = -EFAULT; - goto free; - } - - t = xt_find_table_lock(net, AF_INET6, name); + paddc = xt_copy_counters_from_user(user, len, &tmp, compat); + if (IS_ERR(paddc)) + return PTR_ERR(paddc); + t = xt_find_table_lock(net, AF_INET6, tmp.name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; @@ -1376,7 +1342,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, local_bh_disable(); private = t->private; - if (private->number != num_counters) { + if (private->number != tmp.num_counters) { ret = -EINVAL; goto unlock_up_free; } @@ -1455,7 +1421,6 @@ compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, static int compat_find_calc_match(struct xt_entry_match *m, - const char *name, const struct ip6t_ip6 *ipv6, int *size) { @@ -1490,17 +1455,14 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, - const unsigned char *limit, - const unsigned int *hook_entries, - const unsigned int *underflows, - const char *name) + const unsigned char *limit) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; - int ret, off, h; + int ret, off; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || @@ -1517,8 +1479,11 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, return -EINVAL; } - /* For purposes of check_entry casting the compat entry is fine */ - ret = check_entry((struct ip6t_entry *)e); + if (!ip6_checkentry(&e->ipv6)) + return -EINVAL; + + ret = xt_compat_check_entry_offsets(e, e->elems, + e->target_offset, e->next_offset); if (ret) return ret; @@ -1526,7 +1491,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { - ret = compat_find_calc_match(ematch, name, &e->ipv6, &off); + ret = compat_find_calc_match(ematch, &e->ipv6, &off); if (ret != 0) goto release_matches; ++j; @@ -1549,17 +1514,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, if (ret) goto out; - /* Check hooks & underflows */ - for (h = 0; h < NF_INET_NUMHOOKS; h++) { - if ((unsigned char *)e - base == hook_entries[h]) - newinfo->hook_entry[h] = hook_entries[h]; - if ((unsigned char *)e - base == underflows[h]) - newinfo->underflow[h] = underflows[h]; - } - - /* Clear counters and comefrom */ - memset(&e->counters, 0, sizeof(e->counters)); - e->comefrom = 0; return 0; out: @@ -1573,18 +1527,17 @@ release_matches: return ret; } -static int +static void compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, - unsigned int *size, const char *name, + unsigned int *size, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct ip6t_entry *de; unsigned int origsize; - int ret, h; + int h; struct xt_entry_match *ematch; - ret = 0; origsize = *size; de = (struct ip6t_entry *)*dstptr; memcpy(de, e, sizeof(struct ip6t_entry)); @@ -1593,11 +1546,9 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, *dstptr += sizeof(struct ip6t_entry); *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); - xt_ematch_foreach(ematch, e) { - ret = xt_compat_match_from_user(ematch, dstptr, size); - if (ret != 0) - return ret; - } + xt_ematch_foreach(ematch, e) + xt_compat_match_from_user(ematch, dstptr, size); + de->target_offset = e->target_offset - (origsize - *size); t = compat_ip6t_get_target(e); xt_compat_target_from_user(t, dstptr, size); @@ -1609,183 +1560,83 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } - return ret; -} - -static int compat_check_entry(struct ip6t_entry *e, struct net *net, - const char *name) -{ - unsigned int j; - int ret = 0; - struct xt_mtchk_param mtpar; - struct xt_entry_match *ematch; - - e->counters.pcnt = xt_percpu_counter_alloc(); - if (IS_ERR_VALUE(e->counters.pcnt)) - return -ENOMEM; - j = 0; - mtpar.net = net; - mtpar.table = name; - mtpar.entryinfo = &e->ipv6; - mtpar.hook_mask = e->comefrom; - mtpar.family = NFPROTO_IPV6; - xt_ematch_foreach(ematch, e) { - ret = check_match(ematch, &mtpar); - if (ret != 0) - goto cleanup_matches; - ++j; - } - - ret = check_target(e, net, name); - if (ret) - goto cleanup_matches; - return 0; - - cleanup_matches: - xt_ematch_foreach(ematch, e) { - if (j-- == 0) - break; - cleanup_match(ematch, net); - } - - xt_percpu_counter_free(e->counters.pcnt); - - return ret; } static int translate_compat_table(struct net *net, - const char *name, - unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, - unsigned int total_size, - unsigned int number, - unsigned int *hook_entries, - unsigned int *underflows) + const struct compat_ip6t_replace *compatr) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_ip6t_entry *iter0; - struct ip6t_entry *iter1; + struct ip6t_replace repl; unsigned int size; int ret = 0; info = *pinfo; entry0 = *pentry0; - size = total_size; - info->number = number; - - /* Init all hooks to impossible value. */ - for (i = 0; i < NF_INET_NUMHOOKS; i++) { - info->hook_entry[i] = 0xFFFFFFFF; - info->underflow[i] = 0xFFFFFFFF; - } + size = compatr->size; + info->number = compatr->num_entries; duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET6); - xt_compat_init_offsets(AF_INET6, number); + xt_compat_init_offsets(AF_INET6, compatr->num_entries); /* Walk through entries, checking offsets. */ - xt_entry_foreach(iter0, entry0, total_size) { + xt_entry_foreach(iter0, entry0, compatr->size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, - entry0 + total_size, - hook_entries, - underflows, - name); + entry0 + compatr->size); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; - if (j != number) { + if (j != compatr->num_entries) { duprintf("translate_compat_table: %u not %u entries\n", - j, number); + j, compatr->num_entries); goto out_unlock; } - /* Check hooks all assigned */ - for (i = 0; i < NF_INET_NUMHOOKS; i++) { - /* Only hooks which are valid */ - if (!(valid_hooks & (1 << i))) - continue; - if (info->hook_entry[i] == 0xFFFFFFFF) { - duprintf("Invalid hook entry %u %u\n", - i, hook_entries[i]); - goto out_unlock; - } - if (info->underflow[i] == 0xFFFFFFFF) { - duprintf("Invalid underflow %u %u\n", - i, underflows[i]); - goto out_unlock; - } - } - ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; - newinfo->number = number; + newinfo->number = compatr->num_entries; for (i = 0; i < NF_INET_NUMHOOKS; i++) { - newinfo->hook_entry[i] = info->hook_entry[i]; - newinfo->underflow[i] = info->underflow[i]; + newinfo->hook_entry[i] = compatr->hook_entry[i]; + newinfo->underflow[i] = compatr->underflow[i]; } entry1 = newinfo->entries; pos = entry1; - size = total_size; - xt_entry_foreach(iter0, entry0, total_size) { - ret = compat_copy_entry_from_user(iter0, &pos, &size, - name, newinfo, entry1); - if (ret != 0) - break; - } + size = compatr->size; + xt_entry_foreach(iter0, entry0, compatr->size) + compat_copy_entry_from_user(iter0, &pos, &size, + newinfo, entry1); + + /* all module references in entry0 are now gone. */ xt_compat_flush_offsets(AF_INET6); xt_compat_unlock(AF_INET6); - if (ret) - goto free_newinfo; - ret = -ELOOP; - if (!mark_source_chains(newinfo, valid_hooks, entry1)) - goto free_newinfo; + memcpy(&repl, compatr, sizeof(*compatr)); - i = 0; - xt_entry_foreach(iter1, entry1, newinfo->size) { - ret = compat_check_entry(iter1, net, name); - if (ret != 0) - break; - ++i; - if (strcmp(ip6t_get_target(iter1)->u.user.name, - XT_ERROR_TARGET) == 0) - ++newinfo->stacksize; - } - if (ret) { - /* - * The first i matches need cleanup_entry (calls ->destroy) - * because they had called ->check already. The other j-i - * entries need only release. - */ - int skip = i; - j -= i; - xt_entry_foreach(iter0, entry0, newinfo->size) { - if (skip-- > 0) - continue; - if (j-- == 0) - break; - compat_release_entry(iter0); - } - xt_entry_foreach(iter1, entry1, newinfo->size) { - if (i-- == 0) - break; - cleanup_entry(iter1, net); - } - xt_free_table_info(newinfo); - return ret; + for (i = 0; i < NF_INET_NUMHOOKS; i++) { + repl.hook_entry[i] = newinfo->hook_entry[i]; + repl.underflow[i] = newinfo->underflow[i]; } + repl.num_counters = 0; + repl.counters = NULL; + repl.size = newinfo->size; + ret = translate_table(net, newinfo, entry1, &repl); + if (ret) + goto free_newinfo; + *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); @@ -1793,17 +1644,16 @@ translate_compat_table(struct net *net, free_newinfo: xt_free_table_info(newinfo); -out: - xt_entry_foreach(iter0, entry0, total_size) { + return ret; +out_unlock: + xt_compat_flush_offsets(AF_INET6); + xt_compat_unlock(AF_INET6); + xt_entry_foreach(iter0, entry0, compatr->size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; -out_unlock: - xt_compat_flush_offsets(AF_INET6); - xt_compat_unlock(AF_INET6); - goto out; } static int @@ -1839,10 +1689,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) goto free_newinfo; } - ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, - &newinfo, &loc_cpu_entry, tmp.size, - tmp.num_entries, tmp.hook_entry, - tmp.underflow); + ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index dcccae86190f..ba3d2f3d66d2 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -560,13 +560,13 @@ static int ipip6_err(struct sk_buff *skb, u32 info) if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { ipv4_update_pmtu(skb, dev_net(skb->dev), info, - t->parms.link, 0, IPPROTO_IPV6, 0); + t->parms.link, 0, iph->protocol, 0); err = 0; goto out; } if (type == ICMP_REDIRECT) { ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, - IPPROTO_IPV6, 0); + iph->protocol, 0); err = 0; goto out; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index dab6da85a10e..04955a5d2350 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1708,7 +1708,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) destp = ntohs(inet->inet_dport); srcp = ntohs(inet->inet_sport); - if (icsk->icsk_pending == ICSK_TIME_RETRANS) { + if (icsk->icsk_pending == ICSK_TIME_RETRANS || + icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; timer_expires = icsk->icsk_timeout; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 9cb0ff304336..ed7f4a81a932 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -647,7 +647,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) /* if we're overly short, let UDP handle it */ encap_rcv = ACCESS_ONCE(up->encap_rcv); - if (skb->len > sizeof(struct udphdr) && encap_rcv) { + if (encap_rcv) { int ret; /* Verify checksum before giving to encap */ diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index afca2eb4dfa7..ec17cbe8a02b 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1581,7 +1581,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ tunnel->encap = encap; if (encap == L2TP_ENCAPTYPE_UDP) { - struct udp_tunnel_sock_cfg udp_cfg; + struct udp_tunnel_sock_cfg udp_cfg = { }; udp_cfg.sk_user_data = tunnel; udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 6f85b6ab8e51..f7bb6829b415 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -151,19 +151,26 @@ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata) void mesh_sta_cleanup(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; - u32 changed; + u32 changed = 0; /* * maybe userspace handles peer allocation and peering, but in either * case the beacon is still generated by the kernel and we might need * an update. */ - changed = mesh_accept_plinks_update(sdata); + if (sdata->u.mesh.user_mpm && + sta->mesh->plink_state == NL80211_PLINK_ESTAB) + changed |= mesh_plink_dec_estab_count(sdata); + changed |= mesh_accept_plinks_update(sdata); if (!sdata->u.mesh.user_mpm) { changed |= mesh_plink_deactivate(sta); del_timer_sync(&sta->mesh->plink_timer); } + /* make sure no readers can access nexthop sta from here on */ + mesh_path_flush_by_nexthop(sta); + synchronize_net(); + if (changed) ieee80211_mbss_info_change_notify(sdata, changed); } diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index c6be0b4f4058..b6dc2d7cd650 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -530,7 +530,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, const u8 *target_addr, *orig_addr; const u8 *da; u8 target_flags, ttl, flags; - u32 orig_sn, target_sn, lifetime, target_metric; + u32 orig_sn, target_sn, lifetime, target_metric = 0; bool reply = false; bool forward = true; bool root_is_gate; diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 2cafb21b422f..15b0150283b6 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -269,7 +269,7 @@ struct ieee80211_fast_tx { u8 sa_offs, da_offs, pn_offs; u8 band; u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV + - sizeof(rfc1042_header)]; + sizeof(rfc1042_header)] __aligned(2); struct rcu_head rcu_head; }; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 33344f5a66a8..9ea2cc098ad1 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -3198,10 +3198,11 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *sdata_iter; enum nl80211_iftype iftype = sdata->wdev.iftype; - int num[NUM_NL80211_IFTYPES]; struct ieee80211_chanctx *ctx; - int num_different_channels = 0; int total = 1; + struct iface_combination_params params = { + .radar_detect = radar_detect, + }; lockdep_assert_held(&local->chanctx_mtx); @@ -3212,9 +3213,6 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, !chandef->chan)) return -EINVAL; - if (chandef) - num_different_channels = 1; - if (WARN_ON(iftype >= NUM_NL80211_IFTYPES)) return -EINVAL; @@ -3225,24 +3223,26 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, return 0; } - memset(num, 0, sizeof(num)); + if (chandef) + params.num_different_channels = 1; if (iftype != NL80211_IFTYPE_UNSPECIFIED) - num[iftype] = 1; + params.iftype_num[iftype] = 1; list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) continue; - radar_detect |= ieee80211_chanctx_radar_detect(local, ctx); + params.radar_detect |= + ieee80211_chanctx_radar_detect(local, ctx); if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) { - num_different_channels++; + params.num_different_channels++; continue; } if (chandef && chanmode == IEEE80211_CHANCTX_SHARED && cfg80211_chandef_compatible(chandef, &ctx->conf.def)) continue; - num_different_channels++; + params.num_different_channels++; } list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) { @@ -3255,16 +3255,14 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype)) continue; - num[wdev_iter->iftype]++; + params.iftype_num[wdev_iter->iftype]++; total++; } - if (total == 1 && !radar_detect) + if (total == 1 && !params.radar_detect) return 0; - return cfg80211_check_combinations(local->hw.wiphy, - num_different_channels, - radar_detect, num); + return cfg80211_check_combinations(local->hw.wiphy, ¶ms); } static void @@ -3280,12 +3278,10 @@ ieee80211_iter_max_chans(const struct ieee80211_iface_combination *c, int ieee80211_max_num_channels(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; - int num[NUM_NL80211_IFTYPES] = {}; struct ieee80211_chanctx *ctx; - int num_different_channels = 0; - u8 radar_detect = 0; u32 max_num_different_channels = 1; int err; + struct iface_combination_params params = {0}; lockdep_assert_held(&local->chanctx_mtx); @@ -3293,17 +3289,17 @@ int ieee80211_max_num_channels(struct ieee80211_local *local) if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) continue; - num_different_channels++; + params.num_different_channels++; - radar_detect |= ieee80211_chanctx_radar_detect(local, ctx); + params.radar_detect |= + ieee80211_chanctx_radar_detect(local, ctx); } list_for_each_entry_rcu(sdata, &local->interfaces, list) - num[sdata->wdev.iftype]++; + params.iftype_num[sdata->wdev.iftype]++; - err = cfg80211_iter_combinations(local->hw.wiphy, - num_different_channels, radar_detect, - num, ieee80211_iter_max_chans, + err = cfg80211_iter_combinations(local->hw.wiphy, ¶ms, + ieee80211_iter_max_chans, &max_num_different_channels); if (err < 0) return err; diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 53b592ec040f..f83c255d7da2 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -1368,7 +1368,6 @@ config NETFILTER_XT_MATCH_QUOTA2 config NETFILTER_XT_MATCH_QUOTA2_LOG bool '"quota2" Netfilter LOG support' depends on NETFILTER_XT_MATCH_QUOTA2 - depends on IP_NF_TARGET_ULOG=n # not yes, not module, just no default n help This option allows `quota2' to log ONCE when a quota limit diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index d4aaad747ea9..25391fb25516 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -415,6 +415,47 @@ int xt_check_match(struct xt_mtchk_param *par, } EXPORT_SYMBOL_GPL(xt_check_match); +/** xt_check_entry_match - check that matches end before start of target + * + * @match: beginning of xt_entry_match + * @target: beginning of this rules target (alleged end of matches) + * @alignment: alignment requirement of match structures + * + * Validates that all matches add up to the beginning of the target, + * and that each match covers at least the base structure size. + * + * Return: 0 on success, negative errno on failure. + */ +static int xt_check_entry_match(const char *match, const char *target, + const size_t alignment) +{ + const struct xt_entry_match *pos; + int length = target - match; + + if (length == 0) /* no matches */ + return 0; + + pos = (struct xt_entry_match *)match; + do { + if ((unsigned long)pos % alignment) + return -EINVAL; + + if (length < (int)sizeof(struct xt_entry_match)) + return -EINVAL; + + if (pos->u.match_size < sizeof(struct xt_entry_match)) + return -EINVAL; + + if (pos->u.match_size > length) + return -EINVAL; + + length -= pos->u.match_size; + pos = ((void *)((char *)(pos) + (pos)->u.match_size)); + } while (length > 0); + + return 0; +} + #ifdef CONFIG_COMPAT int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta) { @@ -484,13 +525,14 @@ int xt_compat_match_offset(const struct xt_match *match) } EXPORT_SYMBOL_GPL(xt_compat_match_offset); -int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, - unsigned int *size) +void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, + unsigned int *size) { const struct xt_match *match = m->u.kernel.match; struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; int pad, off = xt_compat_match_offset(match); u_int16_t msize = cm->u.user.match_size; + char name[sizeof(m->u.user.name)]; m = *dstptr; memcpy(m, cm, sizeof(*cm)); @@ -504,10 +546,12 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, msize += off; m->u.user.match_size = msize; + strlcpy(name, match->name, sizeof(name)); + module_put(match->me); + strncpy(m->u.user.name, name, sizeof(m->u.user.name)); *size += off; *dstptr += msize; - return 0; } EXPORT_SYMBOL_GPL(xt_compat_match_from_user); @@ -538,8 +582,125 @@ int xt_compat_match_to_user(const struct xt_entry_match *m, return 0; } EXPORT_SYMBOL_GPL(xt_compat_match_to_user); + +/* non-compat version may have padding after verdict */ +struct compat_xt_standard_target { + struct compat_xt_entry_target t; + compat_uint_t verdict; +}; + +int xt_compat_check_entry_offsets(const void *base, const char *elems, + unsigned int target_offset, + unsigned int next_offset) +{ + long size_of_base_struct = elems - (const char *)base; + const struct compat_xt_entry_target *t; + const char *e = base; + + if (target_offset < size_of_base_struct) + return -EINVAL; + + if (target_offset + sizeof(*t) > next_offset) + return -EINVAL; + + t = (void *)(e + target_offset); + if (t->u.target_size < sizeof(*t)) + return -EINVAL; + + if (target_offset + t->u.target_size > next_offset) + return -EINVAL; + + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && + COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset) + return -EINVAL; + + /* compat_xt_entry match has less strict aligment requirements, + * otherwise they are identical. In case of padding differences + * we need to add compat version of xt_check_entry_match. + */ + BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match)); + + return xt_check_entry_match(elems, base + target_offset, + __alignof__(struct compat_xt_entry_match)); +} +EXPORT_SYMBOL(xt_compat_check_entry_offsets); #endif /* CONFIG_COMPAT */ +/** + * xt_check_entry_offsets - validate arp/ip/ip6t_entry + * + * @base: pointer to arp/ip/ip6t_entry + * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems + * @target_offset: the arp/ip/ip6_t->target_offset + * @next_offset: the arp/ip/ip6_t->next_offset + * + * validates that target_offset and next_offset are sane and that all + * match sizes (if any) align with the target offset. + * + * This function does not validate the targets or matches themselves, it + * only tests that all the offsets and sizes are correct, that all + * match structures are aligned, and that the last structure ends where + * the target structure begins. + * + * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version. + * + * The arp/ip/ip6t_entry structure @base must have passed following tests: + * - it must point to a valid memory location + * - base to base + next_offset must be accessible, i.e. not exceed allocated + * length. + * + * A well-formed entry looks like this: + * + * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry + * e->elems[]-----' | | + * matchsize | | + * matchsize | | + * | | + * target_offset---------------------------------' | + * next_offset---------------------------------------------------' + * + * elems[]: flexible array member at end of ip(6)/arpt_entry struct. + * This is where matches (if any) and the target reside. + * target_offset: beginning of target. + * next_offset: start of the next rule; also: size of this rule. + * Since targets have a minimum size, target_offset + minlen <= next_offset. + * + * Every match stores its size, sum of sizes must not exceed target_offset. + * + * Return: 0 on success, negative errno on failure. + */ +int xt_check_entry_offsets(const void *base, + const char *elems, + unsigned int target_offset, + unsigned int next_offset) +{ + long size_of_base_struct = elems - (const char *)base; + const struct xt_entry_target *t; + const char *e = base; + + /* target start is within the ip/ip6/arpt_entry struct */ + if (target_offset < size_of_base_struct) + return -EINVAL; + + if (target_offset + sizeof(*t) > next_offset) + return -EINVAL; + + t = (void *)(e + target_offset); + if (t->u.target_size < sizeof(*t)) + return -EINVAL; + + if (target_offset + t->u.target_size > next_offset) + return -EINVAL; + + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && + XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset) + return -EINVAL; + + return xt_check_entry_match(elems, base + target_offset, + __alignof__(struct xt_entry_match)); +} +EXPORT_SYMBOL(xt_check_entry_offsets); + int xt_check_target(struct xt_tgchk_param *par, unsigned int size, u_int8_t proto, bool inv_proto) { @@ -590,6 +751,80 @@ int xt_check_target(struct xt_tgchk_param *par, } EXPORT_SYMBOL_GPL(xt_check_target); +/** + * xt_copy_counters_from_user - copy counters and metadata from userspace + * + * @user: src pointer to userspace memory + * @len: alleged size of userspace memory + * @info: where to store the xt_counters_info metadata + * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel + * + * Copies counter meta data from @user and stores it in @info. + * + * vmallocs memory to hold the counters, then copies the counter data + * from @user to the new memory and returns a pointer to it. + * + * If @compat is true, @info gets converted automatically to the 64bit + * representation. + * + * The metadata associated with the counters is stored in @info. + * + * Return: returns pointer that caller has to test via IS_ERR(). + * If IS_ERR is false, caller has to vfree the pointer. + */ +void *xt_copy_counters_from_user(const void __user *user, unsigned int len, + struct xt_counters_info *info, bool compat) +{ + void *mem; + u64 size; + +#ifdef CONFIG_COMPAT + if (compat) { + /* structures only differ in size due to alignment */ + struct compat_xt_counters_info compat_tmp; + + if (len <= sizeof(compat_tmp)) + return ERR_PTR(-EINVAL); + + len -= sizeof(compat_tmp); + if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) + return ERR_PTR(-EFAULT); + + strlcpy(info->name, compat_tmp.name, sizeof(info->name)); + info->num_counters = compat_tmp.num_counters; + user += sizeof(compat_tmp); + } else +#endif + { + if (len <= sizeof(*info)) + return ERR_PTR(-EINVAL); + + len -= sizeof(*info); + if (copy_from_user(info, user, sizeof(*info)) != 0) + return ERR_PTR(-EFAULT); + + info->name[sizeof(info->name) - 1] = '\0'; + user += sizeof(*info); + } + + size = sizeof(struct xt_counters); + size *= info->num_counters; + + if (size != (u64)len) + return ERR_PTR(-EINVAL); + + mem = vmalloc(len); + if (!mem) + return ERR_PTR(-ENOMEM); + + if (copy_from_user(mem, user, len) == 0) + return mem; + + vfree(mem); + return ERR_PTR(-EFAULT); +} +EXPORT_SYMBOL_GPL(xt_copy_counters_from_user); + #ifdef CONFIG_COMPAT int xt_compat_target_offset(const struct xt_target *target) { @@ -605,6 +840,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; int pad, off = xt_compat_target_offset(target); u_int16_t tsize = ct->u.user.target_size; + char name[sizeof(t->u.user.name)]; t = *dstptr; memcpy(t, ct, sizeof(*ct)); @@ -618,6 +854,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, tsize += off; t->u.user.target_size = tsize; + strlcpy(name, target->name, sizeof(name)); + module_put(target->me); + strncpy(t->u.user.name, name, sizeof(t->u.user.name)); *size += off; *dstptr += tsize; diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c index dca5cacc51f0..ececa65868ef 100644 --- a/net/netfilter/xt_qtaguid.c +++ b/net/netfilter/xt_qtaguid.c @@ -2536,8 +2536,7 @@ static int pp_stats_line(struct seq_file *m, struct tag_stat *ts_entry, uid_t stat_uid = get_uid_from_tag(tag); struct proc_print_info *ppi = m->private; /* Detailed tags are not available to everybody */ - if (get_atag_from_tag(tag) && !can_read_other_uid_stats( - make_kuid(&init_user_ns,stat_uid))) { + if (!can_read_other_uid_stats(make_kuid(&init_user_ns,stat_uid))) { CT_DEBUG("qtaguid: stats line: " "%s 0x%llx %u: insufficient priv " "from pid=%u tgid=%u uid=%u stats.gid=%u\n", diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c index 90604d8a8b4f..94663440d160 100644 --- a/net/netfilter/xt_quota2.c +++ b/net/netfilter/xt_quota2.c @@ -26,6 +26,28 @@ #define QUOTA2_SYSFS_WORK_MAX_SIZE 64 #define QUOTA2_SYSFS_NUM_ENVP 3 +#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG +/* For compatibility, these definitions are copied from the + * deprecated header file <linux/netfilter_ipv4/ipt_ULOG.h> */ +#define ULOG_MAC_LEN 80 +#define ULOG_PREFIX_LEN 32 + +/* Format of the ULOG packets passed through netlink */ +typedef struct ulog_packet_msg { + unsigned long mark; + long timestamp_sec; + long timestamp_usec; + unsigned int hook; + char indev_name[IFNAMSIZ]; + char outdev_name[IFNAMSIZ]; + size_t data_len; + char prefix[ULOG_PREFIX_LEN]; + unsigned char mac_len; + unsigned char mac[ULOG_MAC_LEN]; + unsigned char payload[0]; +} ulog_packet_msg_t; +#endif + /** * @lock: lock to protect quota writers from each other */ diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 992b35fb8615..7a5fa0c98377 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2784,6 +2784,7 @@ static int netlink_dump(struct sock *sk) struct netlink_callback *cb; struct sk_buff *skb = NULL; struct nlmsghdr *nlh; + struct module *module; int len, err = -ENOBUFS; int alloc_min_size; int alloc_size; @@ -2863,9 +2864,11 @@ static int netlink_dump(struct sock *sk) cb->done(cb); nlk->cb_running = false; + module = cb->module; + skb = cb->skb; mutex_unlock(nlk->cb_mutex); - module_put(cb->module); - consume_skb(cb->skb); + module_put(module); + consume_skb(skb); return 0; errout_skb: diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index d933cb89efac..5eb7694348b5 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c @@ -91,6 +91,8 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms) struct vxlan_config conf = { .no_share = true, .flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX, + /* Don't restrict the packets that can be sent by MTU */ + .mtu = IP_MAX_MTU, }; if (!options) { diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 9cc7b512b472..a86f26d05bc2 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1341,7 +1341,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { - return reciprocal_scale(skb_get_hash(skb), num); + return reciprocal_scale(__skb_get_hash_symmetric(skb), num); } static unsigned int fanout_demux_lb(struct packet_fanout *f, diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index b07c535ba8e7..eeb3eb3ea9eb 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -105,9 +105,7 @@ static void *tcf_csum_skb_nextlayer(struct sk_buff *skb, int hl = ihl + jhl; if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) || - (skb_cloned(skb) && - !skb_clone_writable(skb, hl + ntkoff) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) + skb_try_make_writable(skb, hl + ntkoff)) return NULL; else return (void *)(skb_network_header(skb) + ihl); @@ -365,9 +363,7 @@ static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) } if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) { - if (skb_cloned(skb) && - !skb_clone_writable(skb, sizeof(*iph) + ntkoff) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff)) goto fail; ip_send_check(ip_hdr(skb)); diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 32fcdecdb9e2..e384d6aefa3a 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -170,7 +170,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, if (!(at & AT_EGRESS)) { if (m->tcfm_ok_push) - skb_push(skb2, skb->mac_len); + skb_push_rcsum(skb2, skb->mac_len); } /* mirror is always swallowed */ diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index b7c4ead8b5a8..27607b863aba 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -126,9 +126,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a, addr = iph->daddr; if (!((old_addr ^ addr) & mask)) { - if (skb_cloned(skb) && - !skb_clone_writable(skb, sizeof(*iph) + noff) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + if (skb_try_make_writable(skb, sizeof(*iph) + noff)) goto drop; new_addr &= mask; @@ -156,9 +154,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a, struct tcphdr *tcph; if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) || - (skb_cloned(skb) && - !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) + skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff)) goto drop; tcph = (void *)(skb_network_header(skb) + ihl); @@ -171,9 +167,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a, struct udphdr *udph; if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) || - (skb_cloned(skb) && - !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) + skb_try_make_writable(skb, ihl + sizeof(*udph) + noff)) goto drop; udph = (void *)(skb_network_header(skb) + ihl); @@ -213,10 +207,8 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a, if ((old_addr ^ addr) & mask) break; - if (skb_cloned(skb) && - !skb_clone_writable(skb, ihl + sizeof(*icmph) + - sizeof(*iph) + noff) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + if (skb_try_make_writable(skb, ihl + sizeof(*icmph) + + sizeof(*iph) + noff)) goto drop; icmph = (void *)(skb_network_header(skb) + ihl); diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 2177eac0a61e..2e4bd2c0a50c 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -37,14 +37,18 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) { + unsigned int prev_backlog; + if (likely(skb_queue_len(&sch->q) < sch->limit)) return qdisc_enqueue_tail(skb, sch); + prev_backlog = sch->qstats.backlog; /* queue full, remove one skb to fulfill the limit */ __qdisc_queue_drop_head(sch, &sch->q); qdisc_qstats_drop(sch); qdisc_enqueue_tail(skb, sch); + qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog); return NET_XMIT_CN; } diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 4befe97a9034..b7c29d5b6f04 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -650,14 +650,14 @@ deliver: #endif if (q->qdisc) { + unsigned int pkt_len = qdisc_pkt_len(skb); int err = qdisc_enqueue(skb, q->qdisc); - if (unlikely(err != NET_XMIT_SUCCESS)) { - if (net_xmit_drop_count(err)) { - qdisc_qstats_drop(sch); - qdisc_tree_reduce_backlog(sch, 1, - qdisc_pkt_len(skb)); - } + if (err != NET_XMIT_SUCCESS && + net_xmit_drop_count(err)) { + qdisc_qstats_drop(sch); + qdisc_tree_reduce_backlog(sch, 1, + pkt_len); } goto tfifo_dequeue; } diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 1095be9c80ab..4605dc73def6 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -857,8 +857,8 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g goto out; if (svc_getnl(&buf->head[0]) != seq) goto out; - /* trim off the mic at the end before returning */ - xdr_buf_trim(buf, mic.len + 4); + /* trim off the mic and padding at the end before returning */ + xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4); stat = 0; out: kfree(mic.data); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 23608eb0ded2..7a93922457ff 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -442,7 +442,7 @@ out_no_rpciod: return ERR_PTR(err); } -struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, +static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, struct rpc_xprt *xprt) { struct rpc_clnt *clnt = NULL; @@ -474,7 +474,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, return clnt; } -EXPORT_SYMBOL_GPL(rpc_create_xprt); /** * rpc_create - create an RPC client and transport with one call @@ -500,6 +499,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) }; char servername[48]; + if (args->bc_xprt) { + WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP); + xprt = args->bc_xprt->xpt_bc_xprt; + if (xprt) { + xprt_get(xprt); + return rpc_create_xprt(args, xprt); + } + } + if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index d5d7132ac847..1b58866175e6 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -1169,6 +1169,7 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB, .dst = dst, .dst_len = dst_len, + .fi = fi, .tos = tos, .type = type, .nlflags = nlflags, @@ -1177,8 +1178,6 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, struct net_device *dev; int err = 0; - memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi)); - /* Don't offload route if using custom ip rules or if * IPv4 FIB offloading has been disabled completely. */ @@ -1222,6 +1221,7 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB, .dst = dst, .dst_len = dst_len, + .fi = fi, .tos = tos, .type = type, .nlflags = 0, @@ -1230,8 +1230,6 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, struct net_device *dev; int err = 0; - memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi)); - if (!(fi->fib_flags & RTNH_F_OFFLOAD)) return 0; diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 1eadc95e1132..2ed732bfe94b 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -802,7 +802,7 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg, goto out; tipc_tlv_sprintf(msg->rep, "%-10u %s", - nla_get_u32(publ[TIPC_NLA_PUBL_REF]), + nla_get_u32(publ[TIPC_NLA_PUBL_KEY]), scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]); out: tipc_tlv_sprintf(msg->rep, "\n"); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index e53003cf7703..9b713e0ce00d 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2814,6 +2814,9 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) if (err) return err; + if (!attrs[TIPC_NLA_SOCK]) + return -EINVAL; + err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], tipc_nl_sock_policy); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 898a53a562b8..6579fd6e7459 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i) &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { struct dentry *dentry = unix_sk(s)->path.dentry; - if (dentry && d_backing_inode(dentry) == i) { + if (dentry && d_real_inode(dentry) == i) { sock_hold(s); goto found; } @@ -911,7 +911,7 @@ static struct sock *unix_find_other(struct net *net, err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path); if (err) goto fail; - inode = d_backing_inode(path.dentry); + inode = d_real_inode(path.dentry); err = inode_permission(inode, MAY_WRITE); if (err) goto put_fail; @@ -1048,7 +1048,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out_up; } addr->hash = UNIX_HASH_SIZE; - hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1); + hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1); spin_lock(&unix_table_lock); u->path = u_path; list = &unix_socket_table[hash]; diff --git a/net/wireless/core.h b/net/wireless/core.h index 05125d092b18..fcd59e76a8e5 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -466,7 +466,7 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, u32 *mask); int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, - u32 beacon_int); + enum nl80211_iftype iftype, u32 beacon_int); void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, enum nl80211_iftype iftype, int num); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 4d7281df26b6..40299f19c09b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1003,6 +1003,10 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy, nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS, c->radar_detect_regions))) goto nla_put_failure; + if (c->beacon_int_min_gcd && + nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD, + c->beacon_int_min_gcd)) + goto nla_put_failure; nla_nest_end(msg, nl_combi); } @@ -3656,7 +3660,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) params.dtim_period = nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]); - err = cfg80211_validate_beacon_int(rdev, params.beacon_interval); + err = cfg80211_validate_beacon_int(rdev, dev->ieee80211_ptr->iftype, + params.beacon_interval); if (err) return err; diff --git a/net/wireless/util.c b/net/wireless/util.c index a5b20d75017e..acff02fcc281 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1483,30 +1483,50 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, EXPORT_SYMBOL(ieee80211_chandef_to_operating_class); int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, - u32 beacon_int) + enum nl80211_iftype iftype, u32 beacon_int) { struct wireless_dev *wdev; - int res = 0; + struct iface_combination_params params = { + .beacon_int_gcd = beacon_int, /* GCD(n) = n */ + }; if (!beacon_int) return -EINVAL; + params.iftype_num[iftype] = 1; list_for_each_entry(wdev, &rdev->wdev_list, list) { if (!wdev->beacon_interval) continue; - if (wdev->beacon_interval != beacon_int) { - res = -EINVAL; - break; + + params.iftype_num[wdev->iftype]++; + } + + list_for_each_entry(wdev, &rdev->wdev_list, list) { + u32 bi_prev = wdev->beacon_interval; + + if (!wdev->beacon_interval) + continue; + + /* slight optimisation - skip identical BIs */ + if (wdev->beacon_interval == beacon_int) + continue; + + params.beacon_int_different = true; + + /* Get the GCD */ + while (bi_prev != 0) { + u32 tmp_bi = bi_prev; + + bi_prev = params.beacon_int_gcd % bi_prev; + params.beacon_int_gcd = tmp_bi; } } - return res; + return cfg80211_check_combinations(&rdev->wiphy, ¶ms); } int cfg80211_iter_combinations(struct wiphy *wiphy, - const int num_different_channels, - const u8 radar_detect, - const int iftype_num[NUM_NL80211_IFTYPES], + struct iface_combination_params *params, void (*iter)(const struct ieee80211_iface_combination *c, void *data), void *data) @@ -1517,7 +1537,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy, int num_interfaces = 0; u32 used_iftypes = 0; - if (radar_detect) { + if (params->radar_detect) { rcu_read_lock(); regdom = rcu_dereference(cfg80211_regdomain); if (regdom) @@ -1526,8 +1546,8 @@ int cfg80211_iter_combinations(struct wiphy *wiphy, } for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) { - num_interfaces += iftype_num[iftype]; - if (iftype_num[iftype] > 0 && + num_interfaces += params->iftype_num[iftype]; + if (params->iftype_num[iftype] > 0 && !(wiphy->software_iftypes & BIT(iftype))) used_iftypes |= BIT(iftype); } @@ -1541,7 +1561,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy, if (num_interfaces > c->max_interfaces) continue; - if (num_different_channels > c->num_different_channels) + if (params->num_different_channels > c->num_different_channels) continue; limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits, @@ -1556,16 +1576,17 @@ int cfg80211_iter_combinations(struct wiphy *wiphy, all_iftypes |= limits[j].types; if (!(limits[j].types & BIT(iftype))) continue; - if (limits[j].max < iftype_num[iftype]) + if (limits[j].max < params->iftype_num[iftype]) goto cont; - limits[j].max -= iftype_num[iftype]; + limits[j].max -= params->iftype_num[iftype]; } } - if (radar_detect != (c->radar_detect_widths & radar_detect)) + if (params->radar_detect != + (c->radar_detect_widths & params->radar_detect)) goto cont; - if (radar_detect && c->radar_detect_regions && + if (params->radar_detect && c->radar_detect_regions && !(c->radar_detect_regions & BIT(region))) goto cont; @@ -1577,6 +1598,17 @@ int cfg80211_iter_combinations(struct wiphy *wiphy, if ((all_iftypes & used_iftypes) != used_iftypes) goto cont; + if (params->beacon_int_gcd) { + if (c->beacon_int_min_gcd && + params->beacon_int_gcd < c->beacon_int_min_gcd) { + kfree(limits); + return -EINVAL; + } + if (!c->beacon_int_min_gcd && + params->beacon_int_different) + goto cont; + } + /* This combination covered all interface types and * supported the requested numbers, so we're good. */ @@ -1599,14 +1631,11 @@ cfg80211_iter_sum_ifcombs(const struct ieee80211_iface_combination *c, } int cfg80211_check_combinations(struct wiphy *wiphy, - const int num_different_channels, - const u8 radar_detect, - const int iftype_num[NUM_NL80211_IFTYPES]) + struct iface_combination_params *params) { int err, num = 0; - err = cfg80211_iter_combinations(wiphy, num_different_channels, - radar_detect, iftype_num, + err = cfg80211_iter_combinations(wiphy, params, cfg80211_iter_sum_ifcombs, &num); if (err) return err; @@ -1625,14 +1654,15 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev, u8 radar_detect) { struct wireless_dev *wdev_iter; - int num[NUM_NL80211_IFTYPES]; struct ieee80211_channel *used_channels[CFG80211_MAX_NUM_DIFFERENT_CHANNELS]; struct ieee80211_channel *ch; enum cfg80211_chan_mode chmode; - int num_different_channels = 0; int total = 1; int i; + struct iface_combination_params params = { + .radar_detect = radar_detect, + }; ASSERT_RTNL(); @@ -1649,10 +1679,9 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev, return 0; } - memset(num, 0, sizeof(num)); memset(used_channels, 0, sizeof(used_channels)); - num[iftype] = 1; + params.iftype_num[iftype] = 1; /* TODO: We'll probably not need this anymore, since this * should only be called with CHAN_MODE_UNDEFINED. There are @@ -1665,10 +1694,10 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev, case CHAN_MODE_SHARED: WARN_ON(!chan); used_channels[0] = chan; - num_different_channels++; + params.num_different_channels++; break; case CHAN_MODE_EXCLUSIVE: - num_different_channels++; + params.num_different_channels++; break; } @@ -1696,7 +1725,8 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev, */ mutex_lock_nested(&wdev_iter->mtx, 1); __acquire(wdev_iter->mtx); - cfg80211_get_chan_state(wdev_iter, &ch, &chmode, &radar_detect); + cfg80211_get_chan_state(wdev_iter, &ch, &chmode, + ¶ms.radar_detect); wdev_unlock(wdev_iter); switch (chmode) { @@ -1712,23 +1742,22 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev, if (used_channels[i] == NULL) { used_channels[i] = ch; - num_different_channels++; + params.num_different_channels++; } break; case CHAN_MODE_EXCLUSIVE: - num_different_channels++; + params.num_different_channels++; break; } - num[wdev_iter->iftype]++; + params.iftype_num[wdev_iter->iftype]++; total++; } - if (total == 1 && !radar_detect) + if (total == 1 && !params.radar_detect) return 0; - return cfg80211_check_combinations(&rdev->wiphy, num_different_channels, - radar_detect, num); + return cfg80211_check_combinations(&rdev->wiphy, ¶ms); } int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index b50ee5d622e1..c753211cb83f 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -955,8 +955,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, return private(dev, iwr, cmd, info, handler); } /* Old driver API : call driver ioctl handler */ - if (dev->netdev_ops->ndo_do_ioctl) - return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); + if (dev->netdev_ops->ndo_do_ioctl) { +#ifdef CONFIG_COMPAT + if (info->flags & IW_REQUEST_FLAG_COMPAT) { + int ret = 0; + struct iwreq iwr_lcl; + struct compat_iw_point *iwp_compat = (void *) &iwr->u.data; + + memcpy(&iwr_lcl, iwr, sizeof(struct iwreq)); + iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer); + iwr_lcl.u.data.length = iwp_compat->length; + iwr_lcl.u.data.flags = iwp_compat->flags; + + ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd); + + iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer); + iwp_compat->length = iwr_lcl.u.data.length; + iwp_compat->flags = iwr_lcl.u.data.flags; + + return ret; + } else +#endif + return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); + } return -EOPNOTSUPP; } diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index 4efedcbe4165..da3386a9d244 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -24,6 +24,7 @@ warning-1 += $(call cc-option, -Wmissing-prototypes) warning-1 += -Wold-style-definition warning-1 += $(call cc-option, -Wmissing-include-dirs) warning-1 += $(call cc-option, -Wunused-but-set-variable) +warning-1 += $(call cc-option, -Wunused-const-variable) warning-1 += $(call cc-disable-warning, missing-field-initializers) warning-2 := -Waggregate-return diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 5b96206e9aab..9f5cdd49ff0b 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -695,7 +695,7 @@ static int do_of_entry (const char *filename, void *symval, char *alias) len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*", (*type)[0] ? *type : "*"); - if (compatible[0]) + if ((*compatible)[0]) sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "", *compatible); diff --git a/scripts/package/Makefile b/scripts/package/Makefile index 1aca224e8597..493e226356ca 100644 --- a/scripts/package/Makefile +++ b/scripts/package/Makefile @@ -52,7 +52,7 @@ rpm-pkg rpm: FORCE $(call cmd,src_tar,$(KERNELPATH),kernel.spec) $(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version mv -f $(objtree)/.tmp_version $(objtree)/.version - rpmbuild --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz + rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz rm $(KERNELPATH).tar.gz kernel.spec # binrpm-pkg @@ -63,7 +63,7 @@ binrpm-pkg: FORCE $(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version mv -f $(objtree)/.tmp_version $(objtree)/.version - rpmbuild --define "_builddir $(objtree)" --target \ + rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \ $(UTS_MACHINE) -bb $(objtree)/binkernel.spec rm binkernel.spec diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 89fea87feafb..bbfdfee59b38 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -467,13 +467,6 @@ static int sb_finish_set_opts(struct super_block *sb) if (selinux_is_sblabel_mnt(sb)) sbsec->flags |= SBLABEL_MNT; - /* - * Special handling for rootfs. Is genfs but supports - * setting SELinux context on in-core inodes. - */ - if (strncmp(sb->s_type->name, "rootfs", sizeof("rootfs")) == 0) - sbsec->flags |= SBLABEL_MNT; - /* Initialize the root inode. */ rc = inode_doinit_with_dentry(root_inode, root); @@ -3711,6 +3704,38 @@ static int selinux_kernel_module_request(char *kmod_name) SYSTEM__MODULE_REQUEST, &ad); } +static int selinux_kernel_module_from_file(struct file *file) +{ + struct common_audit_data ad; + struct inode_security_struct *isec; + struct file_security_struct *fsec; + struct inode *inode; + u32 sid = current_sid(); + int rc; + + /* init_module */ + if (file == NULL) + return avc_has_perm(sid, sid, SECCLASS_SYSTEM, + SYSTEM__MODULE_LOAD, NULL); + + /* finit_module */ + ad.type = LSM_AUDIT_DATA_PATH; + ad.u.path = file->f_path; + + inode = file_inode(file); + isec = inode->i_security; + fsec = file->f_security; + + if (sid != fsec->sid) { + rc = avc_has_perm(sid, fsec->sid, SECCLASS_FD, FD__USE, &ad); + if (rc) + return rc; + } + + return avc_has_perm(sid, isec->sid, SECCLASS_SYSTEM, + SYSTEM__MODULE_LOAD, &ad); +} + static int selinux_task_setpgid(struct task_struct *p, pid_t pgid) { return current_has_perm(p, PROCESS__SETPGID); @@ -6003,6 +6028,7 @@ static struct security_hook_list selinux_hooks[] = { LSM_HOOK_INIT(kernel_act_as, selinux_kernel_act_as), LSM_HOOK_INIT(kernel_create_files_as, selinux_kernel_create_files_as), LSM_HOOK_INIT(kernel_module_request, selinux_kernel_module_request), + LSM_HOOK_INIT(kernel_module_from_file, selinux_kernel_module_from_file), LSM_HOOK_INIT(task_setpgid, selinux_task_setpgid), LSM_HOOK_INIT(task_getpgid, selinux_task_getpgid), LSM_HOOK_INIT(task_getsid, selinux_task_getsid), diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 5a4eef59aeff..b393d29ae857 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -32,7 +32,7 @@ struct security_class_mapping secclass_map[] = { "setsockcreate", NULL } }, { "system", { "ipc_info", "syslog_read", "syslog_mod", - "syslog_console", "module_request", NULL } }, + "syslog_console", "module_request", "module_load", NULL } }, { "capability", { "chown", "dac_override", "dac_read_search", "fowner", "fsetid", "kill", "setgid", "setuid", "setpcap", diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c index 2bbb41822d8e..7f947f7c3331 100644 --- a/security/selinux/nlmsgtab.c +++ b/security/selinux/nlmsgtab.c @@ -80,9 +80,10 @@ static struct nlmsg_perm nlmsg_route_perms[] = static struct nlmsg_perm nlmsg_tcpdiag_perms[] = { - { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, - { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, - { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, + { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, + { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, + { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, + { SOCK_DESTROY_BACKPORT, NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE }, }; static struct nlmsg_perm nlmsg_xfrm_perms[] = diff --git a/sound/core/pcm.c b/sound/core/pcm.c index 9e4743e833be..a2c2f06060df 100644 --- a/sound/core/pcm.c +++ b/sound/core/pcm.c @@ -849,7 +849,7 @@ int snd_pcm_new_internal(struct snd_card *card, const char *id, int device, } EXPORT_SYMBOL(snd_pcm_new_internal); -static void free_pcm_kctl(struct snd_pcm_str *pstr) +static void free_chmap(struct snd_pcm_str *pstr) { if (pstr->chmap_kctl) { snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl); @@ -887,7 +887,7 @@ static void snd_pcm_free_stream(struct snd_pcm_str * pstr) kfree(setup); } #endif - free_pcm_kctl(pstr); + free_chmap(pstr); if (pstr->substream_count) put_device(&pstr->dev); } @@ -1152,7 +1152,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device) for (cidx = 0; cidx < 2; cidx++) { if (!pcm->internal) snd_unregister_device(&pcm->streams[cidx].dev); - free_pcm_kctl(&pcm->streams[cidx]); + free_chmap(&pcm->streams[cidx]); } mutex_unlock(&pcm->open_mutex); mutex_unlock(®ister_mutex); diff --git a/sound/core/timer.c b/sound/core/timer.c index f420cd8583da..12768f55f8d5 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -1968,6 +1968,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, qhead = tu->qhead++; tu->qhead %= tu->queue_size; + tu->qused--; spin_unlock_irq(&tu->qlock); if (tu->tread) { @@ -1981,7 +1982,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, } spin_lock_irq(&tu->qlock); - tu->qused--; if (err < 0) goto _error; result += unit; diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c index a9f7a75702d2..67628616506e 100644 --- a/sound/drivers/dummy.c +++ b/sound/drivers/dummy.c @@ -420,6 +420,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream) static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm) { + hrtimer_cancel(&dpcm->timer); tasklet_kill(&dpcm->tasklet); } diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c index 4667c3232b7f..74177189063c 100644 --- a/sound/pci/au88x0/au88x0_core.c +++ b/sound/pci/au88x0/au88x0_core.c @@ -1444,9 +1444,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma) int page, p, pp, delta, i; page = - (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) & - WT_SUBBUF_MASK) - >> WT_SUBBUF_SHIFT; + (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) + >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK; if (dma->nr_periods >= 4) delta = (page - dma->period_real) & 3; else { diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c index 1cb85aeb0cea..286f5e3686a3 100644 --- a/sound/pci/echoaudio/echoaudio.c +++ b/sound/pci/echoaudio/echoaudio.c @@ -2200,11 +2200,11 @@ static int snd_echo_resume(struct device *dev) u32 pipe_alloc_mask; int err; - commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL); + commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL); if (commpage_bak == NULL) return -ENOMEM; commpage = chip->comm_page; - memcpy(commpage_bak, commpage, sizeof(struct comm_page)); + memcpy(commpage_bak, commpage, sizeof(*commpage)); err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device); if (err < 0) { diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 367dbf0d285e..dc2fa576d60d 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -3994,6 +3994,8 @@ static hda_nid_t set_path_power(struct hda_codec *codec, hda_nid_t nid, for (n = 0; n < spec->paths.used; n++) { path = snd_array_elem(&spec->paths, n); + if (!path->depth) + continue; if (path->path[0] == nid || path->path[path->depth - 1] == nid) { bool pin_old = path->pin_enabled; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 411630e9c034..8218cace8fea 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -359,8 +359,12 @@ enum { #define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170) #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70) +#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171) +#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) +#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0) #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) -#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) +#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ + IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci) static char *driver_short_names[] = { [AZX_DRIVER_ICH] = "HDA Intel", @@ -1252,8 +1256,10 @@ static int azx_free(struct azx *chip) if (use_vga_switcheroo(hda)) { if (chip->disabled && hda->probe_continued) snd_hda_unlock_devices(&chip->bus); - if (hda->vga_switcheroo_registered) + if (hda->vga_switcheroo_registered) { vga_switcheroo_unregister_client(chip->pci); + vga_switcheroo_fini_domain_pm_ops(chip->card->dev); + } } if (bus->chip_init) { @@ -2204,6 +2210,15 @@ static const struct pci_device_id azx_ids[] = { /* Sunrise Point-LP */ { PCI_DEVICE(0x8086, 0x9d70), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, + /* Kabylake */ + { PCI_DEVICE(0x8086, 0xa171), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, + /* Kabylake-LP */ + { PCI_DEVICE(0x8086, 0x9d71), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, + /* Kabylake-H */ + { PCI_DEVICE(0x8086, 0xa2f0), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, /* Broxton-P(Apollolake) */ { PCI_DEVICE(0x8086, 0x5a98), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, @@ -2277,6 +2292,8 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, { PCI_DEVICE(0x1002, 0x157a), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, + { PCI_DEVICE(0x1002, 0x15b3), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, { PCI_DEVICE(0x1002, 0x793b), .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, { PCI_DEVICE(0x1002, 0x7919), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 4918ffa5ba68..abcb5a6a1cd9 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -335,6 +335,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0283: case 0x10ec0286: case 0x10ec0288: + case 0x10ec0295: case 0x10ec0298: alc_update_coef_idx(codec, 0x10, 1<<9, 0); break; @@ -342,6 +343,14 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0293: alc_update_coef_idx(codec, 0xa, 1<<13, 0); break; + case 0x10ec0234: + case 0x10ec0274: + case 0x10ec0294: + case 0x10ec0700: + case 0x10ec0701: + case 0x10ec0703: + alc_update_coef_idx(codec, 0x10, 1<<15, 0); + break; case 0x10ec0662: if ((coef & 0x00f0) == 0x0030) alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */ @@ -902,6 +911,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = { { 0x10ec0298, 0x1028, 0, "ALC3266" }, { 0x10ec0256, 0x1028, 0, "ALC3246" }, { 0x10ec0225, 0x1028, 0, "ALC3253" }, + { 0x10ec0295, 0x1028, 0, "ALC3254" }, { 0x10ec0670, 0x1025, 0, "ALC669X" }, { 0x10ec0676, 0x1025, 0, "ALC679X" }, { 0x10ec0282, 0x1043, 0, "ALC3229" }, @@ -2647,6 +2657,8 @@ enum { ALC269_TYPE_ALC255, ALC269_TYPE_ALC256, ALC269_TYPE_ALC225, + ALC269_TYPE_ALC294, + ALC269_TYPE_ALC700, }; /* @@ -2677,6 +2689,8 @@ static int alc269_parse_auto_config(struct hda_codec *codec) case ALC269_TYPE_ALC255: case ALC269_TYPE_ALC256: case ALC269_TYPE_ALC225: + case ALC269_TYPE_ALC294: + case ALC269_TYPE_ALC700: ssids = alc269_ssids; break; default: @@ -3609,13 +3623,20 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, static void alc_headset_mode_unplugged(struct hda_codec *codec) { static struct coef_fw coef0255[] = { - WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */ WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */ UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */ WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */ {} }; + static struct coef_fw coef0255_1[] = { + WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */ + {} + }; + static struct coef_fw coef0256[] = { + WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */ + {} + }; static struct coef_fw coef0233[] = { WRITE_COEF(0x1b, 0x0c0b), WRITE_COEF(0x45, 0xc429), @@ -3668,7 +3689,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) switch (codec->core.vendor_id) { case 0x10ec0255: + alc_process_coef_fw(codec, coef0255_1); + alc_process_coef_fw(codec, coef0255); + break; case 0x10ec0256: + alc_process_coef_fw(codec, coef0256); alc_process_coef_fw(codec, coef0255); break; case 0x10ec0233: @@ -3690,6 +3715,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) alc_process_coef_fw(codec, coef0668); break; case 0x10ec0225: + case 0x10ec0295: alc_process_coef_fw(codec, coef0225); break; } @@ -3790,6 +3816,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); break; case 0x10ec0225: + case 0x10ec0295: alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10); snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0225); @@ -3847,6 +3874,7 @@ static void alc_headset_mode_default(struct hda_codec *codec) switch (codec->core.vendor_id) { case 0x10ec0225: + case 0x10ec0295: alc_process_coef_fw(codec, coef0225); break; case 0x10ec0255: @@ -3884,6 +3912,12 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) WRITE_COEFEX(0x57, 0x03, 0x8ea6), {} }; + static struct coef_fw coef0256[] = { + WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */ + WRITE_COEF(0x1b, 0x0c6b), + WRITE_COEFEX(0x57, 0x03, 0x8ea6), + {} + }; static struct coef_fw coef0233[] = { WRITE_COEF(0x45, 0xd429), WRITE_COEF(0x1b, 0x0c2b), @@ -3924,9 +3958,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) switch (codec->core.vendor_id) { case 0x10ec0255: - case 0x10ec0256: alc_process_coef_fw(codec, coef0255); break; + case 0x10ec0256: + alc_process_coef_fw(codec, coef0256); + break; case 0x10ec0233: case 0x10ec0283: alc_process_coef_fw(codec, coef0233); @@ -3950,6 +3986,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) alc_process_coef_fw(codec, coef0688); break; case 0x10ec0225: + case 0x10ec0295: alc_process_coef_fw(codec, coef0225); break; } @@ -3965,6 +4002,12 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) WRITE_COEFEX(0x57, 0x03, 0x8ea6), {} }; + static struct coef_fw coef0256[] = { + WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */ + WRITE_COEF(0x1b, 0x0c6b), + WRITE_COEFEX(0x57, 0x03, 0x8ea6), + {} + }; static struct coef_fw coef0233[] = { WRITE_COEF(0x45, 0xe429), WRITE_COEF(0x1b, 0x0c2b), @@ -4005,9 +4048,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) switch (codec->core.vendor_id) { case 0x10ec0255: - case 0x10ec0256: alc_process_coef_fw(codec, coef0255); break; + case 0x10ec0256: + alc_process_coef_fw(codec, coef0256); + break; case 0x10ec0233: case 0x10ec0283: alc_process_coef_fw(codec, coef0233); @@ -4031,6 +4076,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) alc_process_coef_fw(codec, coef0688); break; case 0x10ec0225: + case 0x10ec0295: alc_process_coef_fw(codec, coef0225); break; } @@ -4114,6 +4160,7 @@ static void alc_determine_headset_type(struct hda_codec *codec) is_ctia = (val & 0x1c02) == 0x1c02; break; case 0x10ec0225: + case 0x10ec0295: alc_process_coef_fw(codec, coef0225); msleep(800); val = alc_read_coef_idx(codec, 0x46); @@ -4251,7 +4298,7 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec, static void alc255_set_default_jack_type(struct hda_codec *codec) { /* Set to iphone type */ - static struct coef_fw fw[] = { + static struct coef_fw alc255fw[] = { WRITE_COEF(0x1b, 0x880b), WRITE_COEF(0x45, 0xd089), WRITE_COEF(0x1b, 0x080b), @@ -4259,7 +4306,22 @@ static void alc255_set_default_jack_type(struct hda_codec *codec) WRITE_COEF(0x1b, 0x0c0b), {} }; - alc_process_coef_fw(codec, fw); + static struct coef_fw alc256fw[] = { + WRITE_COEF(0x1b, 0x884b), + WRITE_COEF(0x45, 0xd089), + WRITE_COEF(0x1b, 0x084b), + WRITE_COEF(0x46, 0x0004), + WRITE_COEF(0x1b, 0x0c4b), + {} + }; + switch (codec->core.vendor_id) { + case 0x10ec0255: + alc_process_coef_fw(codec, alc255fw); + break; + case 0x10ec0256: + alc_process_coef_fw(codec, alc256fw); + break; + } msleep(30); } @@ -5459,8 +5521,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), - SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), + SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), + SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), @@ -5571,6 +5634,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), + SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), @@ -5586,6 +5650,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), + SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460), + SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460), + SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), @@ -5671,7 +5738,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {} }; #define ALC225_STANDARD_PINS \ - {0x12, 0xb7a60130}, \ {0x21, 0x04211020} #define ALC256_STANDARD_PINS \ @@ -5696,14 +5762,31 @@ static const struct hda_model_fixup alc269_fixup_models[] = { static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ALC225_STANDARD_PINS, + {0x12, 0xb7a60130}, + {0x14, 0x901701a0}), + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC225_STANDARD_PINS, + {0x12, 0xb7a60130}, + {0x14, 0x901701b0}), + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC225_STANDARD_PINS, + {0x12, 0xb7a60150}, {0x14, 0x901701a0}), SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ALC225_STANDARD_PINS, + {0x12, 0xb7a60150}, {0x14, 0x901701b0}), + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC225_STANDARD_PINS, + {0x12, 0xb7a60130}, + {0x1b, 0x90170110}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, {0x14, 0x90170110}, {0x21, 0x02211020}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x14, 0x90170130}, + {0x21, 0x02211040}), + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60140}, {0x14, 0x90170110}, {0x21, 0x02211020}), @@ -5756,11 +5839,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x12, 0x90a60180}, {0x14, 0x90170130}, {0x21, 0x02211040}), + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x12, 0x90a60180}, + {0x14, 0x90170120}, + {0x21, 0x02211030}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60160}, {0x14, 0x90170120}, {0x21, 0x02211030}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x12, 0x90a60170}, + {0x14, 0x90170120}, + {0x21, 0x02211030}), + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, ALC256_STANDARD_PINS), SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, {0x12, 0x90a60130}, @@ -6026,8 +6117,22 @@ static int patch_alc269(struct hda_codec *codec) alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/ break; case 0x10ec0225: + case 0x10ec0295: spec->codec_variant = ALC269_TYPE_ALC225; break; + case 0x10ec0234: + case 0x10ec0274: + case 0x10ec0294: + spec->codec_variant = ALC269_TYPE_ALC294; + break; + case 0x10ec0700: + case 0x10ec0701: + case 0x10ec0703: + spec->codec_variant = ALC269_TYPE_ALC700; + spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ + alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */ + break; + } if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) { @@ -6942,6 +7047,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269), HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269), HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269), HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269), HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269), HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269), @@ -6952,6 +7058,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0269, "ALC269", patch_alc269), HDA_CODEC_ENTRY(0x10ec0270, "ALC270", patch_alc269), HDA_CODEC_ENTRY(0x10ec0272, "ALC272", patch_alc662), + HDA_CODEC_ENTRY(0x10ec0274, "ALC274", patch_alc269), HDA_CODEC_ENTRY(0x10ec0275, "ALC275", patch_alc269), HDA_CODEC_ENTRY(0x10ec0276, "ALC276", patch_alc269), HDA_CODEC_ENTRY(0x10ec0280, "ALC280", patch_alc269), @@ -6964,6 +7071,8 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269), HDA_CODEC_ENTRY(0x10ec0292, "ALC292", patch_alc269), HDA_CODEC_ENTRY(0x10ec0293, "ALC293", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0294, "ALC294", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269), HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269), HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861), HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd), @@ -6979,6 +7088,9 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662), HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662), HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680), + HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269), HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882), HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880), HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882), diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c index cda27c22812a..eb8fe212e163 100644 --- a/sound/soc/codecs/ak4642.c +++ b/sound/soc/codecs/ak4642.c @@ -560,6 +560,7 @@ static const struct regmap_config ak4642_regmap = { .max_register = FIL1_3, .reg_defaults = ak4642_reg, .num_reg_defaults = NUM_AK4642_REG_DEFAULTS, + .cache_type = REGCACHE_RBTREE, }; static const struct regmap_config ak4643_regmap = { @@ -568,6 +569,7 @@ static const struct regmap_config ak4643_regmap = { .max_register = SPK_MS, .reg_defaults = ak4643_reg, .num_reg_defaults = ARRAY_SIZE(ak4643_reg), + .cache_type = REGCACHE_RBTREE, }; static const struct regmap_config ak4648_regmap = { @@ -576,6 +578,7 @@ static const struct regmap_config ak4648_regmap = { .max_register = EQ_FBEQE, .reg_defaults = ak4648_reg, .num_reg_defaults = ARRAY_SIZE(ak4648_reg), + .cache_type = REGCACHE_RBTREE, }; static const struct ak4642_drvdata ak4642_drvdata = { diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c index 8f6276f1f3d4..281db1d07f57 100644 --- a/sound/soc/codecs/wcd9335.c +++ b/sound/soc/codecs/wcd9335.c @@ -143,6 +143,7 @@ static int cpe_debug_mode; #define DAPM_MICBIAS3_STANDALONE "MIC BIAS3 Standalone" #define DAPM_MICBIAS4_STANDALONE "MIC BIAS4 Standalone" +#define DAPM_LDO_H_STANDALONE "LDO_H" module_param(cpe_debug_mode, int, S_IRUGO | S_IWUSR | S_IWGRP); MODULE_PARM_DESC(cpe_debug_mode, "boot cpe in debug mode"); @@ -6186,6 +6187,55 @@ static int __tasha_codec_enable_micbias(struct snd_soc_dapm_widget *w, return 0; } +static int tasha_codec_ldo_h_control(struct snd_soc_dapm_widget *w, + int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec); + + if (SND_SOC_DAPM_EVENT_ON(event)) { + tasha->ldo_h_users++; + + if (tasha->ldo_h_users == 1) + snd_soc_update_bits(codec, WCD9335_LDOH_MODE, + 0x80, 0x80); + } + + if (SND_SOC_DAPM_EVENT_OFF(event)) { + tasha->ldo_h_users--; + + if (tasha->ldo_h_users < 0) + tasha->ldo_h_users = 0; + + if (tasha->ldo_h_users == 0) + snd_soc_update_bits(codec, WCD9335_LDOH_MODE, + 0x80, 0x00); + } + + return 0; +} + +static int tasha_codec_force_enable_ldo_h(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, + int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + wcd_resmgr_enable_master_bias(tasha->resmgr); + tasha_codec_ldo_h_control(w, event); + break; + case SND_SOC_DAPM_POST_PMD: + tasha_codec_ldo_h_control(w, event); + wcd_resmgr_disable_master_bias(tasha->resmgr); + break; + } + + return 0; +} + static int tasha_codec_force_enable_micbias(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) @@ -6218,6 +6268,29 @@ static int tasha_codec_enable_micbias(struct snd_soc_dapm_widget *w, return __tasha_codec_enable_micbias(w, event); } +static int tasha_codec_enable_standalone_ldo_h(struct snd_soc_codec *codec, + bool enable) +{ + int rc; + + if (enable) + rc = snd_soc_dapm_force_enable_pin( + snd_soc_codec_get_dapm(codec), + DAPM_LDO_H_STANDALONE); + else + rc = snd_soc_dapm_disable_pin( + snd_soc_codec_get_dapm(codec), + DAPM_LDO_H_STANDALONE); + + if (!rc) + snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec)); + else + dev_err(codec->dev, "%s: ldo_h force %s pin failed\n", + __func__, (enable ? "enable" : "disable")); + + return rc; +} + /* * tasha_codec_enable_standalone_micbias - enable micbias standalone * @codec: pointer to codec instance @@ -7772,6 +7845,34 @@ static const struct soc_enum tasha_conn_mad_enum = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tasha_conn_mad_text), tasha_conn_mad_text); +static int tasha_enable_ldo_h_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); + u8 val = 0; + + if (codec) + val = snd_soc_read(codec, WCD9335_LDOH_MODE) & 0x80; + + ucontrol->value.integer.value[0] = !!val; + + return 0; +} + +static int tasha_enable_ldo_h_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); + int value = ucontrol->value.integer.value[0]; + bool enable; + + enable = !!value; + if (codec) + tasha_codec_enable_standalone_ldo_h(codec, enable); + + return 0; +} + static int tasha_mad_input_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -8516,6 +8617,8 @@ static const struct snd_kcontrol_new tasha_snd_controls[] = { SOC_ENUM_EXT("MAD Input", tasha_conn_mad_enum, tasha_mad_input_get, tasha_mad_input_put), + SOC_SINGLE_EXT("LDO_H Enable", SND_SOC_NOPM, 0, 1, 0, + tasha_enable_ldo_h_get, tasha_enable_ldo_h_put), SOC_SINGLE_EXT("DMIC1_CLK_PIN_MODE", SND_SOC_NOPM, 17, 1, 0, tasha_pinctl_mode_get, tasha_pinctl_mode_put), @@ -10750,6 +10853,9 @@ static const struct snd_soc_dapm_widget tasha_dapm_widgets[] = { SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS4_STANDALONE, SND_SOC_NOPM, 0, 0, tasha_codec_force_enable_micbias, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY(DAPM_LDO_H_STANDALONE, SND_SOC_NOPM, 0, 0, + tasha_codec_force_enable_ldo_h, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_MUX("ANC0 FB MUX", SND_SOC_NOPM, 0, 0, &anc0_fb_mux), SND_SOC_DAPM_MUX("ANC1 FB MUX", SND_SOC_NOPM, 0, 0, &anc1_fb_mux), diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c index 7e4cd6ce55a7..9898c1fc7471 100644 --- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c +++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c @@ -665,6 +665,12 @@ static int wcd_cntl_do_boot(struct wcd_dsp_cntl *cntl) __func__); ret = -ETIMEDOUT; goto err_boot; + } else { + /* + * Re-initialize the return code to 0, as in success case, + * it will hold the remaining time for completion timeout + */ + ret = 0; } dev_dbg(codec->dev, "%s: WDSP booted in normal mode\n", __func__); @@ -877,6 +883,108 @@ static void wcd_cntl_debugfs_remove(struct wcd_dsp_cntl *cntl) debugfs_remove(cntl->entry); } +static int wcd_miscdev_release(struct inode *inode, struct file *filep) +{ + struct wcd_dsp_cntl *cntl = container_of(filep->private_data, + struct wcd_dsp_cntl, miscdev); + if (!cntl->m_dev || !cntl->m_ops || + !cntl->m_ops->vote_for_dsp) { + dev_err(cntl->codec->dev, + "%s: DSP not ready to boot\n", __func__); + return -EINVAL; + } + + /* Make sure the DSP users goes to zero upon closing dev node */ + while (cntl->boot_reqs > 0) { + cntl->m_ops->vote_for_dsp(cntl->m_dev, false); + cntl->boot_reqs--; + } + + return 0; +} + +static ssize_t wcd_miscdev_write(struct file *filep, const char __user *ubuf, + size_t count, loff_t *pos) +{ + struct wcd_dsp_cntl *cntl = container_of(filep->private_data, + struct wcd_dsp_cntl, miscdev); + char val[count]; + bool vote; + int ret = 0; + + if (count == 0 || count > 2) { + pr_err("%s: Invalid count = %zd\n", __func__, count); + ret = -EINVAL; + goto done; + } + + ret = copy_from_user(val, ubuf, count); + if (IS_ERR_VALUE(ret)) { + dev_err(cntl->codec->dev, + "%s: copy_from_user failed, err = %d\n", + __func__, ret); + ret = -EFAULT; + goto done; + } + + if (val[0] == '1') { + cntl->boot_reqs++; + vote = true; + } else if (val[0] == '0') { + if (cntl->boot_reqs == 0) { + dev_err(cntl->codec->dev, + "%s: WDSP already disabled\n", __func__); + ret = -EINVAL; + goto done; + } + cntl->boot_reqs--; + vote = false; + } else { + dev_err(cntl->codec->dev, "%s: Invalid value %s\n", + __func__, val); + ret = -EINVAL; + goto done; + } + + dev_dbg(cntl->codec->dev, + "%s: booted = %s, ref_cnt = %d, vote = %s\n", + __func__, cntl->is_wdsp_booted ? "true" : "false", + cntl->boot_reqs, vote ? "true" : "false"); + + if (cntl->m_dev && cntl->m_ops && + cntl->m_ops->vote_for_dsp) + ret = cntl->m_ops->vote_for_dsp(cntl->m_dev, vote); + else + ret = -EINVAL; +done: + if (ret) + return ret; + else + return count; +} + +static const struct file_operations wcd_miscdev_fops = { + .write = wcd_miscdev_write, + .release = wcd_miscdev_release, +}; + +static int wcd_cntl_miscdev_create(struct wcd_dsp_cntl *cntl) +{ + snprintf(cntl->miscdev_name, ARRAY_SIZE(cntl->miscdev_name), + "wcd_dsp%u_control", cntl->dsp_instance); + cntl->miscdev.minor = MISC_DYNAMIC_MINOR; + cntl->miscdev.name = cntl->miscdev_name; + cntl->miscdev.fops = &wcd_miscdev_fops; + cntl->miscdev.parent = cntl->codec->dev; + + return misc_register(&cntl->miscdev); +} + +static void wcd_cntl_miscdev_destroy(struct wcd_dsp_cntl *cntl) +{ + misc_deregister(&cntl->miscdev); +} + static int wcd_control_init(struct device *dev, void *priv_data) { struct wcd_dsp_cntl *cntl = priv_data; @@ -1009,13 +1117,20 @@ static int wcd_ctrl_component_bind(struct device *dev, goto done; } + ret = wcd_cntl_miscdev_create(cntl); + if (IS_ERR_VALUE(ret)) { + dev_err(dev, "%s: misc dev register failed, err = %d\n", + __func__, ret); + goto done; + } + snprintf(wcd_cntl_dir_name, WCD_CNTL_DIR_NAME_LEN_MAX, "%s%d", "wdsp", cntl->dsp_instance); ret = wcd_cntl_sysfs_init(wcd_cntl_dir_name, cntl); if (IS_ERR_VALUE(ret)) { dev_err(dev, "%s: sysfs_init failed, err = %d\n", __func__, ret); - goto done; + goto err_sysfs_init; } wcd_cntl_debugfs_init(wcd_cntl_dir_name, cntl); @@ -1029,7 +1144,7 @@ static int wcd_ctrl_component_bind(struct device *dev, /* Do not treat this as Fatal error */ dev_err(dev, "%s: Failed to create procfs entry %s\n", __func__, proc_name); - goto done; + goto err_sysfs_init; } cntl->ssr_entry.entry = entry; @@ -1048,6 +1163,10 @@ static int wcd_ctrl_component_bind(struct device *dev, } done: return ret; + +err_sysfs_init: + wcd_cntl_miscdev_destroy(cntl); + return ret; } static void wcd_ctrl_component_unbind(struct device *dev, @@ -1077,6 +1196,8 @@ static void wcd_ctrl_component_unbind(struct device *dev, /* Remove the debugfs entries */ wcd_cntl_debugfs_remove(cntl); + /* Remove the misc device */ + wcd_cntl_miscdev_destroy(cntl); } static const struct component_ops wcd_ctrl_component_ops = { diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h index 83c59ed7b676..e934638cc487 100644 --- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h +++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h @@ -105,6 +105,10 @@ struct wcd_dsp_cntl { /* SSR related */ struct wdsp_ssr_entry ssr_entry; struct mutex ssr_mutex; + + /* Misc device related */ + char miscdev_name[256]; + struct miscdevice miscdev; }; void wcd_dsp_cntl_init(struct snd_soc_codec *codec, diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c index d713edbbb355..5dbdb9a2df00 100644 --- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c +++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c @@ -870,11 +870,41 @@ static int tavil_get_hph_type(struct snd_kcontrol *kcontrol, return 0; } +static int tavil_hph_impedance_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + uint32_t zl, zr; + bool hphr; + struct soc_multi_mixer_control *mc; + struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); + struct wcd934x_mbhc *wcd934x_mbhc = tavil_soc_get_mbhc(codec); + + if (!wcd934x_mbhc) { + dev_err(codec->dev, "%s: mbhc not initialized!\n", __func__); + return -EINVAL; + } + + mc = (struct soc_multi_mixer_control *)(kcontrol->private_value); + hphr = mc->shift; + wcd_mbhc_get_impedance(&wcd934x_mbhc->wcd_mbhc, &zl, &zr); + dev_dbg(codec->dev, "%s: zl=%u(ohms), zr=%u(ohms)\n", __func__, zl, zr); + ucontrol->value.integer.value[0] = hphr ? zr : zl; + + return 0; +} + static const struct snd_kcontrol_new hph_type_detect_controls[] = { SOC_SINGLE_EXT("HPH Type", 0, 0, UINT_MAX, 0, tavil_get_hph_type, NULL), }; +static const struct snd_kcontrol_new impedance_detect_controls[] = { + SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0, + tavil_hph_impedance_get, NULL), + SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0, + tavil_hph_impedance_get, NULL), +}; + /* * tavil_mbhc_hs_detect: starts mbhc insertion/removal functionality * @codec: handle to snd_soc_codec * @@ -985,14 +1015,15 @@ int tavil_mbhc_init(struct wcd934x_mbhc **mbhc, struct snd_soc_codec *codec, 0; } + (*mbhc) = wcd934x_mbhc; + snd_soc_add_codec_controls(codec, impedance_detect_controls, + ARRAY_SIZE(impedance_detect_controls)); snd_soc_add_codec_controls(codec, hph_type_detect_controls, ARRAY_SIZE(hph_type_detect_controls)); snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04); snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01); - (*mbhc) = wcd934x_mbhc; - return 0; err: devm_kfree(codec->dev, wcd934x_mbhc); diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c index 0213d9ba4a59..6745aec41388 100644 --- a/sound/soc/codecs/wcd934x/wcd934x.c +++ b/sound/soc/codecs/wcd934x/wcd934x.c @@ -4394,14 +4394,14 @@ static const struct tavil_reg_mask_val tavil_pa_disable[] = { }; static const struct tavil_reg_mask_val tavil_ocp_en_seq[] = { - { WCD934X_RX_OCP_CTL, 0x0F, 0x01 }, /* OCP number of attempts is 1 */ + { WCD934X_RX_OCP_CTL, 0x0F, 0x02 }, /* OCP number of attempts is 2 */ { WCD934X_HPH_OCP_CTL, 0xFA, 0x3A }, /* OCP current limit */ { WCD934X_HPH_L_TEST, 0x01, 0x01 }, /* Enable HPHL OCP */ { WCD934X_HPH_R_TEST, 0x01, 0x01 }, /* Enable HPHR OCP */ }; static const struct tavil_reg_mask_val tavil_ocp_en_seq_1[] = { - { WCD934X_RX_OCP_CTL, 0x0F, 0x01 }, /* OCP number of attempts is 1 */ + { WCD934X_RX_OCP_CTL, 0x0F, 0x02 }, /* OCP number of attempts is 2 */ { WCD934X_HPH_OCP_CTL, 0xFA, 0x3A }, /* OCP current limit */ }; @@ -5136,7 +5136,7 @@ static int tavil_mad_input_put(struct snd_kcontrol *kcontrol, snd_soc_update_bits(codec, WCD934X_SOC_MAD_INP_SEL, 0x0F, tavil_mad_input); snd_soc_update_bits(codec, WCD934X_ANA_MAD_SETUP, - 0x03, mic_bias_found); + 0x07, mic_bias_found); return 0; } @@ -8035,10 +8035,11 @@ static const struct tavil_reg_mask_val tavil_codec_reg_defaults[] = { {WCD934X_CDC_TX6_TX_PATH_CFG1, 0x01, 0x00}, {WCD934X_CDC_TX7_TX_PATH_CFG1, 0x01, 0x00}, {WCD934X_CDC_TX8_TX_PATH_CFG1, 0x01, 0x00}, - {WCD934X_RX_OCP_CTL, 0x0F, 0x01}, /* OCP number of attempts is 1 */ + {WCD934X_RX_OCP_CTL, 0x0F, 0x02}, /* OCP number of attempts is 2 */ {WCD934X_HPH_OCP_CTL, 0xFF, 0x3A}, /* OCP current limit */ {WCD934X_HPH_L_TEST, 0x01, 0x01}, {WCD934X_HPH_R_TEST, 0x01, 0x01}, + {WCD934X_CPE_FLL_CONFIG_CTL_2, 0xFF, 0x20}, }; static const struct tavil_reg_mask_val tavil_codec_reg_init_1_1_val[] = { @@ -8676,6 +8677,8 @@ static int tavil_post_reset_cb(struct wcd9xxx *wcd9xxx) regcache_sync(codec->component.regmap); __tavil_cdc_mclk_enable(tavil, false); + tavil_update_cpr_defaults(tavil); + pdata = dev_get_platdata(codec->dev->parent); ret = tavil_handle_pdata(tavil, pdata); if (IS_ERR_VALUE(ret)) diff --git a/sound/soc/msm/msmcobalt.c b/sound/soc/msm/msmcobalt.c index 05a0e27cb45a..c295c26a6fd7 100644 --- a/sound/soc/msm/msmcobalt.c +++ b/sound/soc/msm/msmcobalt.c @@ -2994,7 +2994,7 @@ static bool msm_swap_gnd_mic(struct snd_soc_codec *codec) struct snd_soc_card *card = codec->component.card; struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card); - int value; + int value = 0; if (pdata->us_euro_gpio_p) { value = msm_cdc_pinctrl_get_state(pdata->us_euro_gpio_p); @@ -3253,6 +3253,8 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd) snd_soc_dapm_ignore_suspend(dapm, "Analog Mic6"); snd_soc_dapm_ignore_suspend(dapm, "MADINPUT"); snd_soc_dapm_ignore_suspend(dapm, "MAD_CPE_INPUT"); + snd_soc_dapm_ignore_suspend(dapm, "MAD_CPE_OUT1"); + snd_soc_dapm_ignore_suspend(dapm, "MAD_CPE_OUT2"); snd_soc_dapm_ignore_suspend(dapm, "EAR"); snd_soc_dapm_ignore_suspend(dapm, "LINEOUT1"); snd_soc_dapm_ignore_suspend(dapm, "LINEOUT2"); diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c index 564b67c9f76b..8f99a73fd29f 100644 --- a/sound/soc/msm/qdsp6v2/q6adm.c +++ b/sound/soc/msm/qdsp6v2/q6adm.c @@ -2633,7 +2633,8 @@ int adm_matrix_map(int path, struct route_payload payload_map, int perf_mode) if (port_idx < 0) { pr_err("%s: Invalid port_id 0x%x\n", __func__, payload_map.port_id[i]); - return -EINVAL; + ret = -EINVAL; + goto fail_cmd; } copp_idx = payload_map.copp_idx[i]; copps_list[i] = atomic_read(&this_adm.copp.id[port_idx] @@ -2670,6 +2671,12 @@ int adm_matrix_map(int path, struct route_payload payload_map, int perf_mode) for (i = 0; i < payload_map.num_copps; i++) { port_idx = afe_get_port_index(payload_map.port_id[i]); copp_idx = payload_map.copp_idx[i]; + if (port_idx < 0 || copp_idx < 0 || + (copp_idx > MAX_COPPS_PER_PORT - 1)) { + pr_err("%s: Invalid idx port_idx %d copp_idx %d\n", + __func__, port_idx, copp_idx); + continue; + } if (atomic_read( &this_adm.copp.topology[port_idx][copp_idx]) == ADM_CMD_COPP_OPEN_TOPOLOGY_ID_DTS_HPX) diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c index be0a8b2e3abe..78546fef7b3b 100644 --- a/sound/soc/msm/qdsp6v2/q6afe.c +++ b/sound/soc/msm/qdsp6v2/q6afe.c @@ -1301,7 +1301,7 @@ static int afe_send_port_topology_id(u16 port_id) u32 topology_id = 0; index = q6audio_get_port_index(port_id); - if (index < 0 || index > AFE_MAX_PORTS) { + if (index < 0 || index > AFE_MAX_PORTS - 1) { pr_err("%s: AFE port index[%d] invalid!\n", __func__, index); return -EINVAL; @@ -1545,7 +1545,7 @@ static int afe_send_codec_reg_page_config( static int afe_send_codec_reg_config( struct afe_param_cdc_reg_cfg_data *cdc_reg_cfg) { - int i, j, ret; + int i, j, ret = -EINVAL; int pkt_size, payload_size, reg_per_pkt, num_pkts, num_regs; struct afe_svc_cmd_cdc_reg_cfg *config; struct afe_svc_cmd_set_param *param; @@ -5720,6 +5720,13 @@ int afe_get_sp_ex_vi_ftm_data(struct afe_sp_ex_vi_get_param *ex_vi) } index = q6audio_get_port_index(port); + if (index < 0) { + pr_err("%s: invalid index %d port 0x%x\n", __func__, + index, port); + ret = -EINVAL; + goto done; + } + ex_vi->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); ex_vi->hdr.pkt_size = sizeof(*ex_vi); diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c index 3a108eba3b01..9f7d4e2cb532 100644 --- a/sound/usb/usb_audio_qmi_svc.c +++ b/sound/usb/usb_audio_qmi_svc.c @@ -147,6 +147,27 @@ enum mem_type { MEM_XFER_BUF, }; +enum usb_qmi_audio_format { + USB_QMI_PCM_FORMAT_S8 = 0, + USB_QMI_PCM_FORMAT_U8, + USB_QMI_PCM_FORMAT_S16_LE, + USB_QMI_PCM_FORMAT_S16_BE, + USB_QMI_PCM_FORMAT_U16_LE, + USB_QMI_PCM_FORMAT_U16_BE, + USB_QMI_PCM_FORMAT_S24_LE, + USB_QMI_PCM_FORMAT_S24_BE, + USB_QMI_PCM_FORMAT_U24_LE, + USB_QMI_PCM_FORMAT_U24_BE, + USB_QMI_PCM_FORMAT_S24_3LE, + USB_QMI_PCM_FORMAT_S24_3BE, + USB_QMI_PCM_FORMAT_U24_3LE, + USB_QMI_PCM_FORMAT_U24_3BE, + USB_QMI_PCM_FORMAT_S32_LE, + USB_QMI_PCM_FORMAT_S32_BE, + USB_QMI_PCM_FORMAT_U32_LE, + USB_QMI_PCM_FORMAT_U32_BE, +}; + static unsigned long uaudio_get_iova(unsigned long *curr_iova, size_t *curr_iova_size, struct list_head *head, size_t size) { @@ -767,6 +788,51 @@ static void uaudio_dev_release(struct kref *kref) wake_up(&dev->disconnect_wq); } +/* maps audio format received over QMI to asound.h based pcm format */ +int map_pcm_format(unsigned int fmt_received) +{ + switch (fmt_received) { + case USB_QMI_PCM_FORMAT_S8: + return SNDRV_PCM_FORMAT_S8; + case USB_QMI_PCM_FORMAT_U8: + return SNDRV_PCM_FORMAT_U8; + case USB_QMI_PCM_FORMAT_S16_LE: + return SNDRV_PCM_FORMAT_S16_LE; + case USB_QMI_PCM_FORMAT_S16_BE: + return SNDRV_PCM_FORMAT_S16_BE; + case USB_QMI_PCM_FORMAT_U16_LE: + return SNDRV_PCM_FORMAT_U16_LE; + case USB_QMI_PCM_FORMAT_U16_BE: + return SNDRV_PCM_FORMAT_U16_BE; + case USB_QMI_PCM_FORMAT_S24_LE: + return SNDRV_PCM_FORMAT_S24_LE; + case USB_QMI_PCM_FORMAT_S24_BE: + return SNDRV_PCM_FORMAT_S24_BE; + case USB_QMI_PCM_FORMAT_U24_LE: + return SNDRV_PCM_FORMAT_U24_LE; + case USB_QMI_PCM_FORMAT_U24_BE: + return SNDRV_PCM_FORMAT_U24_BE; + case USB_QMI_PCM_FORMAT_S24_3LE: + return SNDRV_PCM_FORMAT_S24_3LE; + case USB_QMI_PCM_FORMAT_S24_3BE: + return SNDRV_PCM_FORMAT_S24_3BE; + case USB_QMI_PCM_FORMAT_U24_3LE: + return SNDRV_PCM_FORMAT_U24_3LE; + case USB_QMI_PCM_FORMAT_U24_3BE: + return SNDRV_PCM_FORMAT_U24_3BE; + case USB_QMI_PCM_FORMAT_S32_LE: + return SNDRV_PCM_FORMAT_S32_LE; + case USB_QMI_PCM_FORMAT_S32_BE: + return SNDRV_PCM_FORMAT_S32_BE; + case USB_QMI_PCM_FORMAT_U32_LE: + return SNDRV_PCM_FORMAT_U32_LE; + case USB_QMI_PCM_FORMAT_U32_BE: + return SNDRV_PCM_FORMAT_U32_BE; + default: + return -EINVAL; + } +} + static int handle_uaudio_stream_req(void *req_h, void *req) { struct qmi_uaudio_stream_req_msg_v01 *req_msg; @@ -775,6 +841,7 @@ static int handle_uaudio_stream_req(void *req_h, void *req) struct snd_usb_audio *chip = NULL; struct uaudio_qmi_svc *svc = uaudio_svc; struct intf_info *info; + int pcm_format; u8 pcm_card_num, pcm_dev_num, direction; int intf_num = -1, ret = 0; @@ -802,6 +869,14 @@ static int handle_uaudio_stream_req(void *req_h, void *req) goto response; } + pcm_format = map_pcm_format(req_msg->audio_format); + if (pcm_format == -EINVAL) { + pr_err("%s: unsupported pcm format received %d\n", + __func__, req_msg->audio_format); + ret = -EINVAL; + goto response; + } + subs = find_snd_usb_substream(pcm_card_num, pcm_dev_num, direction, &chip, uaudio_disconnect_cb); if (!subs || !chip || atomic_read(&chip->shutdown)) { @@ -820,7 +895,7 @@ static int handle_uaudio_stream_req(void *req_h, void *req) goto response; } - subs->pcm_format = req_msg->audio_format; + subs->pcm_format = pcm_format; subs->channels = req_msg->number_of_ch; subs->cur_rate = req_msg->bit_rate; diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 39c38cb45b00..eeb21eb43898 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST @@ -57,6 +57,7 @@ include/asm-generic/bitops/const_hweight.h include/asm-generic/bitops/fls64.h include/asm-generic/bitops/__fls.h include/asm-generic/bitops/fls.h +include/linux/coresight-pmu.h include/linux/perf_event.h include/linux/list.h include/linux/hash.h diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 0d19d5447d6c..34846e71fdbd 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -77,6 +77,9 @@ include config/utilities.mak # Define NO_AUXTRACE if you do not want AUX area tracing support # # Define NO_LIBBPF if you do not want BPF support +# +# Define NO_CSTRACE if you do not want CoreSight trace decoding support +# # As per kernel Makefile, avoid funny character set dependencies unexport LC_ALL diff --git a/tools/perf/arch/arm/util/Build b/tools/perf/arch/arm/util/Build index d22e3d07de3d..71de3fc40502 100644 --- a/tools/perf/arch/arm/util/Build +++ b/tools/perf/arch/arm/util/Build @@ -2,3 +2,5 @@ libperf-$(CONFIG_DWARF) += dwarf-regs.o libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o + +libperf-$(CONFIG_AUXTRACE) += pmu.o auxtrace.o cs-etm.o diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c new file mode 100644 index 000000000000..95c38b683159 --- /dev/null +++ b/tools/perf/arch/arm/util/auxtrace.c @@ -0,0 +1,54 @@ +/* + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <stdbool.h> +#include <linux/coresight-pmu.h> + +#include "../../util/auxtrace.h" +#include "../../util/evlist.h" +#include "../../util/pmu.h" +#include "cs-etm.h" + +struct auxtrace_record +*auxtrace_record__init(struct perf_evlist *evlist, int *err) +{ + struct perf_pmu *cs_etm_pmu; + struct perf_evsel *evsel; + bool found_etm = false; + + cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME); + + if (evlist) { + evlist__for_each(evlist, evsel) { + if (cs_etm_pmu && + evsel->attr.type == cs_etm_pmu->type) + found_etm = true; + } + } + + if (found_etm) + return cs_etm_record_init(err); + + /* + * Clear 'err' even if we haven't found a cs_etm event - that way perf + * record can still be used even if tracers aren't present. The NULL + * return value will take care of telling the infrastructure HW tracing + * isn't available. + */ + *err = 0; + return NULL; +} diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c new file mode 100644 index 000000000000..13a2188dc5dc --- /dev/null +++ b/tools/perf/arch/arm/util/cs-etm.c @@ -0,0 +1,563 @@ +/* + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <api/fs/fs.h> +#include <linux/bitops.h> +#include <linux/coresight-pmu.h> +#include <linux/kernel.h> +#include <linux/log2.h> +#include <linux/types.h> + +#include "cs-etm.h" +#include "../../perf.h" +#include "../../util/auxtrace.h" +#include "../../util/cpumap.h" +#include "../../util/evlist.h" +#include "../../util/pmu.h" +#include "../../util/thread_map.h" +#include "../../util/cs-etm.h" + +#include <stdlib.h> + +struct cs_etm_recording { + struct auxtrace_record itr; + struct perf_pmu *cs_etm_pmu; + struct perf_evlist *evlist; + bool snapshot_mode; + size_t snapshot_size; +}; + +static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu); + +static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr, + struct record_opts *opts, + const char *str) +{ + struct cs_etm_recording *ptr = + container_of(itr, struct cs_etm_recording, itr); + unsigned long long snapshot_size = 0; + char *endptr; + + if (str) { + snapshot_size = strtoull(str, &endptr, 0); + if (*endptr || snapshot_size > SIZE_MAX) + return -1; + } + + opts->auxtrace_snapshot_mode = true; + opts->auxtrace_snapshot_size = snapshot_size; + ptr->snapshot_size = snapshot_size; + + return 0; +} + +static int cs_etm_recording_options(struct auxtrace_record *itr, + struct perf_evlist *evlist, + struct record_opts *opts) +{ + struct cs_etm_recording *ptr = + container_of(itr, struct cs_etm_recording, itr); + struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; + struct perf_evsel *evsel, *cs_etm_evsel = NULL; + const struct cpu_map *cpus = evlist->cpus; + bool privileged = (geteuid() == 0 || perf_event_paranoid() < 0); + + ptr->evlist = evlist; + ptr->snapshot_mode = opts->auxtrace_snapshot_mode; + + evlist__for_each(evlist, evsel) { + if (evsel->attr.type == cs_etm_pmu->type) { + if (cs_etm_evsel) { + pr_err("There may be only one %s event\n", + CORESIGHT_ETM_PMU_NAME); + return -EINVAL; + } + evsel->attr.freq = 0; + evsel->attr.sample_period = 1; + cs_etm_evsel = evsel; + opts->full_auxtrace = true; + } + } + + /* no need to continue if at least one event of interest was found */ + if (!cs_etm_evsel) + return 0; + + if (opts->use_clockid) { + pr_err("Cannot use clockid (-k option) with %s\n", + CORESIGHT_ETM_PMU_NAME); + return -EINVAL; + } + + /* we are in snapshot mode */ + if (opts->auxtrace_snapshot_mode) { + /* + * No size were given to '-S' or '-m,', so go with + * the default + */ + if (!opts->auxtrace_snapshot_size && + !opts->auxtrace_mmap_pages) { + if (privileged) { + opts->auxtrace_mmap_pages = MiB(4) / page_size; + } else { + opts->auxtrace_mmap_pages = + KiB(128) / page_size; + if (opts->mmap_pages == UINT_MAX) + opts->mmap_pages = KiB(256) / page_size; + } + } else if (!opts->auxtrace_mmap_pages && !privileged && + opts->mmap_pages == UINT_MAX) { + opts->mmap_pages = KiB(256) / page_size; + } + + /* + * '-m,xyz' was specified but no snapshot size, so make the + * snapshot size as big as the auxtrace mmap area. + */ + if (!opts->auxtrace_snapshot_size) { + opts->auxtrace_snapshot_size = + opts->auxtrace_mmap_pages * (size_t)page_size; + } + + /* + * -Sxyz was specified but no auxtrace mmap area, so make the + * auxtrace mmap area big enough to fit the requested snapshot + * size. + */ + if (!opts->auxtrace_mmap_pages) { + size_t sz = opts->auxtrace_snapshot_size; + + sz = round_up(sz, page_size) / page_size; + opts->auxtrace_mmap_pages = roundup_pow_of_two(sz); + } + + /* Snapshost size can't be bigger than the auxtrace area */ + if (opts->auxtrace_snapshot_size > + opts->auxtrace_mmap_pages * (size_t)page_size) { + pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n", + opts->auxtrace_snapshot_size, + opts->auxtrace_mmap_pages * (size_t)page_size); + return -EINVAL; + } + + /* Something went wrong somewhere - this shouldn't happen */ + if (!opts->auxtrace_snapshot_size || + !opts->auxtrace_mmap_pages) { + pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n"); + return -EINVAL; + } + } + + /* We are in full trace mode but '-m,xyz' wasn't specified */ + if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) { + if (privileged) { + opts->auxtrace_mmap_pages = MiB(4) / page_size; + } else { + opts->auxtrace_mmap_pages = KiB(128) / page_size; + if (opts->mmap_pages == UINT_MAX) + opts->mmap_pages = KiB(256) / page_size; + } + + } + + /* Validate auxtrace_mmap_pages provided by user */ + if (opts->auxtrace_mmap_pages) { + unsigned int max_page = (KiB(128) / page_size); + size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size; + + if (!privileged && + opts->auxtrace_mmap_pages > max_page) { + opts->auxtrace_mmap_pages = max_page; + pr_err("auxtrace too big, truncating to %d\n", + max_page); + } + + if (!is_power_of_2(sz)) { + pr_err("Invalid mmap size for %s: must be a power of 2\n", + CORESIGHT_ETM_PMU_NAME); + return -EINVAL; + } + } + + if (opts->auxtrace_snapshot_mode) + pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME, + opts->auxtrace_snapshot_size); + + if (cs_etm_evsel) { + /* + * To obtain the auxtrace buffer file descriptor, the auxtrace + * event must come first. + */ + perf_evlist__to_front(evlist, cs_etm_evsel); + /* + * In the case of per-cpu mmaps, we need the CPU on the + * AUX event. + */ + if (!cpu_map__empty(cpus)) + perf_evsel__set_sample_bit(cs_etm_evsel, CPU); + } + + /* Add dummy event to keep tracking */ + if (opts->full_auxtrace) { + struct perf_evsel *tracking_evsel; + int err; + + err = parse_events(evlist, "dummy:u", NULL); + if (err) + return err; + + tracking_evsel = perf_evlist__last(evlist); + perf_evlist__set_tracking_event(evlist, tracking_evsel); + + tracking_evsel->attr.freq = 0; + tracking_evsel->attr.sample_period = 1; + + /* In per-cpu case, always need the time of mmap events etc */ + if (!cpu_map__empty(cpus)) + perf_evsel__set_sample_bit(tracking_evsel, TIME); + } + + return 0; +} + +static u64 cs_etm_get_config(struct auxtrace_record *itr) +{ + u64 config = 0; + struct cs_etm_recording *ptr = + container_of(itr, struct cs_etm_recording, itr); + struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; + struct perf_evlist *evlist = ptr->evlist; + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + if (evsel->attr.type == cs_etm_pmu->type) { + /* + * Variable perf_event_attr::config is assigned to + * ETMv3/PTM. The bit fields have been made to match + * the ETMv3.5 ETRMCR register specification. See the + * PMU_FORMAT_ATTR() declarations in + * drivers/hwtracing/coresight/coresight-perf.c for + * details. + */ + config = evsel->attr.config; + break; + } + } + + return config; +} + +static size_t +cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ + int i; + int etmv3 = 0, etmv4 = 0; + const struct cpu_map *cpus = evlist->cpus; + + /* cpu map is not empty, we have specific CPUs to work with */ + if (!cpu_map__empty(cpus)) { + for (i = 0; i < cpu_map__nr(cpus); i++) { + if (cs_etm_is_etmv4(itr, cpus->map[i])) + etmv4++; + else + etmv3++; + } + } else { + /* get configuration for all CPUs in the system */ + for (i = 0; i < cpu__max_cpu(); i++) { + if (cs_etm_is_etmv4(itr, i)) + etmv4++; + else + etmv3++; + } + } + + return (CS_ETM_HEADER_SIZE + + (etmv4 * CS_ETMV4_PRIV_SIZE) + + (etmv3 * CS_ETMV3_PRIV_SIZE)); +} + +static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = { + [CS_ETM_ETMCCER] = "mgmt/etmccer", + [CS_ETM_ETMIDR] = "mgmt/etmidr", +}; + +static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = { + [CS_ETMV4_TRCIDR0] = "trcidr/trcidr0", + [CS_ETMV4_TRCIDR1] = "trcidr/trcidr1", + [CS_ETMV4_TRCIDR2] = "trcidr/trcidr2", + [CS_ETMV4_TRCIDR8] = "trcidr/trcidr8", + [CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus", +}; + +static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu) +{ + bool ret = false; + char path[PATH_MAX]; + int scan; + unsigned int val; + struct cs_etm_recording *ptr = + container_of(itr, struct cs_etm_recording, itr); + struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; + + /* Take any of the RO files for ETMv4 and see if it present */ + snprintf(path, PATH_MAX, "cpu%d/%s", + cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); + scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val); + + /* The file was read successfully, we have a winner */ + if (scan == 1) + ret = true; + + return ret; +} + +static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path) +{ + char pmu_path[PATH_MAX]; + int scan; + unsigned int val = 0; + + /* Get RO metadata from sysfs */ + snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path); + + scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val); + if (scan != 1) + pr_err("%s: error reading: %s\n", __func__, pmu_path); + + return val; +} + +static void cs_etm_get_metadata(int cpu, u32 *offset, + struct auxtrace_record *itr, + struct auxtrace_info_event *info) +{ + u32 increment; + u64 magic; + struct cs_etm_recording *ptr = + container_of(itr, struct cs_etm_recording, itr); + struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; + + /* first see what kind of tracer this cpu is affined to */ + if (cs_etm_is_etmv4(itr, cpu)) { + magic = __perf_cs_etmv4_magic; + /* Get trace configuration register */ + info->priv[*offset + CS_ETMV4_TRCCONFIGR] = + cs_etm_get_config(itr); + /* Get traceID from the framework */ + info->priv[*offset + CS_ETMV4_TRCTRACEIDR] = + coresight_get_trace_id(cpu); + /* Get read-only information from sysFS */ + info->priv[*offset + CS_ETMV4_TRCIDR0] = + cs_etm_get_ro(cs_etm_pmu, cpu, + metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); + info->priv[*offset + CS_ETMV4_TRCIDR1] = + cs_etm_get_ro(cs_etm_pmu, cpu, + metadata_etmv4_ro[CS_ETMV4_TRCIDR1]); + info->priv[*offset + CS_ETMV4_TRCIDR2] = + cs_etm_get_ro(cs_etm_pmu, cpu, + metadata_etmv4_ro[CS_ETMV4_TRCIDR2]); + info->priv[*offset + CS_ETMV4_TRCIDR8] = + cs_etm_get_ro(cs_etm_pmu, cpu, + metadata_etmv4_ro[CS_ETMV4_TRCIDR8]); + info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] = + cs_etm_get_ro(cs_etm_pmu, cpu, + metadata_etmv4_ro + [CS_ETMV4_TRCAUTHSTATUS]); + + /* How much space was used */ + increment = CS_ETMV4_PRIV_MAX; + } else { + magic = __perf_cs_etmv3_magic; + /* Get configuration register */ + info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr); + /* Get traceID from the framework */ + info->priv[*offset + CS_ETM_ETMTRACEIDR] = + coresight_get_trace_id(cpu); + /* Get read-only information from sysFS */ + info->priv[*offset + CS_ETM_ETMCCER] = + cs_etm_get_ro(cs_etm_pmu, cpu, + metadata_etmv3_ro[CS_ETM_ETMCCER]); + info->priv[*offset + CS_ETM_ETMIDR] = + cs_etm_get_ro(cs_etm_pmu, cpu, + metadata_etmv3_ro[CS_ETM_ETMIDR]); + + /* How much space was used */ + increment = CS_ETM_PRIV_MAX; + } + + /* Build generic header portion */ + info->priv[*offset + CS_ETM_MAGIC] = magic; + info->priv[*offset + CS_ETM_CPU] = cpu; + /* Where the next CPU entry should start from */ + *offset += increment; +} + +static int cs_etm_info_fill(struct auxtrace_record *itr, + struct perf_session *session, + struct auxtrace_info_event *info, + size_t priv_size) +{ + int i; + u32 offset; + u64 nr_cpu, type; + const struct cpu_map *cpus = session->evlist->cpus; + struct cs_etm_recording *ptr = + container_of(itr, struct cs_etm_recording, itr); + struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; + + if (priv_size != cs_etm_info_priv_size(itr, session->evlist)) + return -EINVAL; + + if (!session->evlist->nr_mmaps) + return -EINVAL; + + /* If the cpu_map is empty all CPUs are involved */ + nr_cpu = cpu_map__empty(cpus) ? cpu__max_cpu() : cpu_map__nr(cpus); + /* Get PMU type as dynamically assigned by the core */ + type = cs_etm_pmu->type; + + /* First fill out the session header */ + info->type = PERF_AUXTRACE_CS_ETM; + info->priv[CS_HEADER_VERSION_0] = 0; + info->priv[CS_PMU_TYPE_CPUS] = type << 32; + info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu; + info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode; + + offset = CS_ETM_SNAPSHOT + 1; + + /* cpu map is not empty, we have specific CPUs to work with */ + if (!cpu_map__empty(cpus)) { + for (i = 0; i < cpu_map__nr(cpus) && offset < priv_size; i++) + cs_etm_get_metadata(cpus->map[i], &offset, itr, info); + } else { + /* get configuration for all CPUs in the system */ + for (i = 0; i < cpu__max_cpu(); i++) + cs_etm_get_metadata(i, &offset, itr, info); + } + + return 0; +} + +static int cs_etm_find_snapshot(struct auxtrace_record *itr __maybe_unused, + int idx, struct auxtrace_mmap *mm, + unsigned char *data __maybe_unused, + u64 *head, u64 *old) +{ + pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n", + __func__, idx, (size_t)*old, (size_t)*head, mm->len); + + *old = *head; + *head += mm->len; + + return 0; +} + +static int cs_etm_snapshot_start(struct auxtrace_record *itr) +{ + struct cs_etm_recording *ptr = + container_of(itr, struct cs_etm_recording, itr); + struct perf_evsel *evsel; + + evlist__for_each(ptr->evlist, evsel) { + if (evsel->attr.type == ptr->cs_etm_pmu->type) + return perf_evsel__disable(evsel); + } + return -EINVAL; +} + +static int cs_etm_snapshot_finish(struct auxtrace_record *itr) +{ + struct cs_etm_recording *ptr = + container_of(itr, struct cs_etm_recording, itr); + struct perf_evsel *evsel; + + evlist__for_each(ptr->evlist, evsel) { + int nthreads = thread_map__nr(evsel->threads); + int ncpus = cpu_map__nr(evsel->cpus); + + if (evsel->attr.type == ptr->cs_etm_pmu->type) { + return perf_evsel__enable(evsel, ncpus, nthreads); + } + } + return -EINVAL; +} + +static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused) +{ + return (((u64) rand() << 0) & 0x00000000FFFFFFFFull) | + (((u64) rand() << 32) & 0xFFFFFFFF00000000ull); +} + +static void cs_etm_recording_free(struct auxtrace_record *itr) +{ + struct cs_etm_recording *ptr = + container_of(itr, struct cs_etm_recording, itr); + free(ptr); +} + +static int cs_etm_read_finish(struct auxtrace_record *itr, int idx) +{ + struct cs_etm_recording *ptr = + container_of(itr, struct cs_etm_recording, itr); + struct perf_evsel *evsel; + + evlist__for_each(ptr->evlist, evsel) { + if (evsel->attr.type == ptr->cs_etm_pmu->type) + return perf_evlist__enable_event_idx(ptr->evlist, + evsel, idx); + } + + return -EINVAL; +} + +struct auxtrace_record *cs_etm_record_init(int *err) +{ + struct perf_pmu *cs_etm_pmu; + struct cs_etm_recording *ptr; + + cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME); + + if (!cs_etm_pmu) { + *err = -EINVAL; + goto out; + } + + ptr = zalloc(sizeof(struct cs_etm_recording)); + if (!ptr) { + *err = -ENOMEM; + goto out; + } + + ptr->cs_etm_pmu = cs_etm_pmu; + ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options; + ptr->itr.recording_options = cs_etm_recording_options; + ptr->itr.info_priv_size = cs_etm_info_priv_size; + ptr->itr.info_fill = cs_etm_info_fill; + ptr->itr.find_snapshot = cs_etm_find_snapshot; + ptr->itr.snapshot_start = cs_etm_snapshot_start; + ptr->itr.snapshot_finish = cs_etm_snapshot_finish; + ptr->itr.reference = cs_etm_reference; + ptr->itr.free = cs_etm_recording_free; + ptr->itr.read_finish = cs_etm_read_finish; + + *err = 0; + return &ptr->itr; +out: + return NULL; +} diff --git a/tools/perf/arch/arm/util/cs-etm.h b/tools/perf/arch/arm/util/cs-etm.h new file mode 100644 index 000000000000..909f486d02d1 --- /dev/null +++ b/tools/perf/arch/arm/util/cs-etm.h @@ -0,0 +1,23 @@ +/* + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef INCLUDE__PERF_CS_ETM_H__ +#define INCLUDE__PERF_CS_ETM_H__ + +struct auxtrace_record *cs_etm_record_init(int *err); + +#endif diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c new file mode 100644 index 000000000000..af9fb666b44f --- /dev/null +++ b/tools/perf/arch/arm/util/pmu.c @@ -0,0 +1,34 @@ +/* + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <string.h> +#include <linux/coresight-pmu.h> +#include <linux/perf_event.h> + +#include "../../util/pmu.h" + +struct perf_event_attr +*perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused) +{ +#ifdef HAVE_AUXTRACE_SUPPORT + if (!strcmp(pmu->name, CORESIGHT_ETM_PMU_NAME)) { + /* add ETM default config here */ + pmu->selectable = true; + } +#endif + return NULL; +} diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build index e58123a8912b..f92918154fec 100644 --- a/tools/perf/arch/arm64/util/Build +++ b/tools/perf/arch/arm64/util/Build @@ -1,2 +1,6 @@ libperf-$(CONFIG_DWARF) += dwarf-regs.o libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o + +libperf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \ + ../../arm/util/auxtrace.o \ + ../../arm/util/cs-etm.o diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c index 9b94ce520917..4685a40777cc 100644 --- a/tools/perf/arch/x86/util/intel-bts.c +++ b/tools/perf/arch/x86/util/intel-bts.c @@ -60,7 +60,9 @@ struct branch { u64 misc; }; -static size_t intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused) +static size_t +intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { return INTEL_BTS_AUXTRACE_PRIV_SIZE; } diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c index b02af064f0f9..e5c1f2e21f87 100644 --- a/tools/perf/arch/x86/util/intel-pt.c +++ b/tools/perf/arch/x86/util/intel-pt.c @@ -273,7 +273,9 @@ intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu) return attr; } -static size_t intel_pt_info_priv_size(struct auxtrace_record *itr __maybe_unused) +static size_t +intel_pt_info_priv_size(struct auxtrace_record *itr __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { return INTEL_PT_AUXTRACE_PRIV_SIZE; } diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 99d127fe9c35..ac369c494036 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -626,12 +626,16 @@ static int __cmd_inject(struct perf_inject *inject) ret = perf_session__process_events(session); if (!file_out->is_pipe) { - if (inject->build_ids) { + if (inject->build_ids) perf_header__set_feat(&session->header, HEADER_BUILD_ID); - if (inject->have_auxtrace) - dsos__hit_all(session); - } + /* + * Keep all buildids when there is unprocessed AUX data because + * it is not known which ones the AUX trace hits. + */ + if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) && + inject->have_auxtrace && !inject->itrace_synth_opts.set) + dsos__hit_all(session); /* * The AUX areas have been removed and replaced with * synthesized hardware events, so clear the feature flag and diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 199fc31e3919..1b9decd5fbf1 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -276,6 +276,7 @@ static int record__open(struct record *rec) struct perf_evlist *evlist = rec->evlist; struct perf_session *session = rec->session; struct record_opts *opts = &rec->opts; + struct perf_evsel_config_term *err_term; int rc = 0; perf_evlist__config(evlist, opts); @@ -305,6 +306,14 @@ try_again: goto out; } + if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) { + error("failed to set config \"%s\" on event %s with %d (%s)\n", + err_term->val.drv_cfg, perf_evsel__name(pos), errno, + strerror_r(errno, msg, sizeof(msg))); + rc = -1; + goto out; + } + if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false, opts->auxtrace_mmap_pages, opts->auxtrace_snapshot_mode) < 0) { diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 72b5deb4bd79..368d1e1561f7 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -92,7 +92,8 @@ static struct { .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | - PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | + PERF_OUTPUT_EVNAME | PERF_OUTPUT_ADDR | + PERF_OUTPUT_IP | PERF_OUTPUT_SYM | PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD, diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index de89ec574361..405c1c1e2975 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -433,6 +433,24 @@ endif grep-libs = $(filter -l%,$(1)) strip-libs = $(filter-out -l%,$(1)) +ifdef CSTRACE_PATH + ifeq (${IS_64_BIT}, 1) + CSTRACE_LNX = linux64 + else + CSTRACE_LNX = linux + endif + ifeq (${DEBUG}, 1) + LIBCSTRACE = -lcstraced_c_api -lcstraced + CSTRACE_LIB_PATH = $(CSTRACE_PATH)/lib/$(CSTRACE_LNX)/dbg + else + LIBCSTRACE = -lcstraced_c_api -lcstraced + CSTRACE_LIB_PATH = $(CSTRACE_PATH)/lib/$(CSTRACE_LNX)/rel + endif + $(call detected,CSTRACE) + $(call detected_var,CSTRACE_PATH) + EXTLIBS += -L$(CSTRACE_LIB_PATH) $(LIBCSTRACE) -lstdc++ +endif + ifdef NO_LIBPERL CFLAGS += -DNO_LIBPERL else @@ -647,9 +665,14 @@ ifdef LIBBABELTRACE endif ifndef NO_AUXTRACE - ifeq ($(feature-get_cpuid), 0) - msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); - NO_AUXTRACE := 1 + ifeq ($(ARCH),x86) + ifeq ($(feature-get_cpuid), 0) + msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); + NO_AUXTRACE := 1 + else + $(call detected,CONFIG_AUXTRACE) + CFLAGS += -DHAVE_AUXTRACE_SUPPORT + endif else $(call detected,CONFIG_AUXTRACE) CFLAGS += -DHAVE_AUXTRACE_SUPPORT diff --git a/tools/perf/scripts/python/cs-trace-disasm.py b/tools/perf/scripts/python/cs-trace-disasm.py new file mode 100644 index 000000000000..429d0d2d7a23 --- /dev/null +++ b/tools/perf/scripts/python/cs-trace-disasm.py @@ -0,0 +1,124 @@ +# perf script event handlers, generated by perf script -g python +# Licensed under the terms of the GNU GPL License version 2 + +# The common_* event handler fields are the most useful fields common to +# all events. They don't necessarily correspond to the 'common_*' fields +# in the format files. Those fields not available as handler params can +# be retrieved using Python functions of the form common_*(context). +# See the perf-trace-python Documentation for the list of available functions. + +import os +import sys + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from subprocess import * +from Core import * +import re; + +from optparse import OptionParser + +# +# Add options to specify vmlinux file and the objdump executable +# +parser = OptionParser() +parser.add_option("-k", "--vmlinux", dest="vmlinux_name", + help="path to vmlinux file") +parser.add_option("-d", "--objdump", dest="objdump_name", + help="name of objdump executable (in path)") +(options, args) = parser.parse_args() + +if (options.objdump_name == None): + sys.exit("No objdump executable specified - use -d or --objdump option") + +# initialize global dicts and regular expression + +build_ids = dict(); +mmaps = dict(); +disasm_cache = dict(); +disasm_re = re.compile("^\s*([0-9a-fA-F]+):") + +cache_size = 16*1024 + +def trace_begin(): + cmd_output = check_output(["perf", "buildid-list"]).split('\n'); + bid_re = re.compile("([a-fA-f0-9]+)[ \t]([^ \n]+)") + for line in cmd_output: + m = bid_re.search(line) + if (m != None) : + build_ids[m.group(2)] = \ + os.environ['PERF_BUILDID_DIR'] + \ + m.group(2) + "/" + m.group(1); + + if ((options.vmlinux_name != None) and ("[kernel.kallsyms]" in build_ids)): + build_ids['[kernel.kallsyms]'] = options.vmlinux_name; + else: + del build_ids['[kernel.kallsyms]'] + + mmap_re = re.compile("PERF_RECORD_MMAP2 -?[0-9]+/[0-9]+: \[(0x[0-9a-fA-F]+).*:\s.*\s(.*.so)") + cmd_output= check_output("perf script --show-mmap-events | fgrep PERF_RECORD_MMAP2",shell=True).split('\n') + for line in cmd_output: + m = mmap_re.search(line) + if (m != None) : + mmaps[m.group(2)] = int(m.group(1),0) + + + +def trace_end(): + pass + +def process_event(t): + global cache_size + global options + + sample = t['sample'] + dso = t['dso'] + + # don't let the cache get too big, but don't bother with a fancy replacement policy + # just clear it when it hits max size + + if (len(disasm_cache) > cache_size): + disasm_cache.clear(); + + cpu = format(sample['cpu'], "d"); + addr_range = format(sample['ip'],"x") + ":" + format(sample['addr'],"x"); + + try: + disasm_output = disasm_cache[addr_range]; + except: + try: + fname = build_ids[dso]; + except KeyError: + if (dso == '[kernel.kallsyms]'): + return; + fname = dso; + + if (dso in mmaps): + offset = mmaps[dso]; + disasm = [options.objdump_name,"-d","-z", "--adjust-vma="+format(offset,"#x"),"--start-address="+format(sample['ip'],"#x"),"--stop-address="+format(sample['addr'],"#x"), fname] + else: + offset = 0 + disasm = [options.objdump_name,"-d","-z", "--start-address="+format(sample['ip'],"#x"),"--stop-address="+format(sample['addr'],"#x"),fname] + disasm_output = check_output(disasm).split('\n') + disasm_cache[addr_range] = disasm_output; + + print "FILE: %s\tCPU: %s" % (dso, cpu); + for line in disasm_output: + m = disasm_re.search(line) + if (m != None) : + try: + print "\t",line + except: + exit(1); + else: + continue; + +def trace_unhandled(event_name, context, event_fields_dict): + print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]) + +def print_header(event_name, cpu, secs, nsecs, pid, comm): + print "print_header" + print "%-20s %5u %05u.%09u %8u %-20s " % \ + (event_name, cpu, secs, nsecs, pid, comm), diff --git a/tools/perf/scripts/python/cs-trace-ranges.py b/tools/perf/scripts/python/cs-trace-ranges.py new file mode 100644 index 000000000000..c8edacba0f83 --- /dev/null +++ b/tools/perf/scripts/python/cs-trace-ranges.py @@ -0,0 +1,44 @@ +# +# Copyright(C) 2016 Linaro Limited. All rights reserved. +# Author: Tor Jeremiassen <tor.jeremiassen@linaro.org> +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 as published by +# the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program. If not, see <http://www.gnu.org/licenses/>. +# + +import os +import sys + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * + +def trace_begin(): + pass; + +def trace_end(): + pass + +def process_event(t): + + sample = t['sample'] + + print "range:",format(sample['ip'],"x"),"-",format(sample['addr'],"x") + +def trace_unhandled(event_name, context, event_fields_dict): + print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]) + +def print_header(event_name, cpu, secs, nsecs, pid, comm): + print "print_header" + print "%-20s %5u %05u.%09u %8u %-20s " % \ + (event_name, cpu, secs, nsecs, pid, comm), diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c index ec16f7812c8b..6ebfdee3e2c6 100644 --- a/tools/perf/tests/bpf.c +++ b/tools/perf/tests/bpf.c @@ -146,7 +146,7 @@ prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name) return obj; } -static int __test__bpf(int index) +static int __test__bpf(int idx) { int ret; void *obj_buf; @@ -154,27 +154,27 @@ static int __test__bpf(int index) struct bpf_object *obj; ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz, - bpf_testcase_table[index].prog_id, + bpf_testcase_table[idx].prog_id, true); if (ret != TEST_OK || !obj_buf || !obj_buf_sz) { pr_debug("Unable to get BPF object, %s\n", - bpf_testcase_table[index].msg_compile_fail); - if (index == 0) + bpf_testcase_table[idx].msg_compile_fail); + if (idx == 0) return TEST_SKIP; else return TEST_FAIL; } obj = prepare_bpf(obj_buf, obj_buf_sz, - bpf_testcase_table[index].name); + bpf_testcase_table[idx].name); if (!obj) { ret = TEST_FAIL; goto out; } ret = do_test(obj, - bpf_testcase_table[index].target_func, - bpf_testcase_table[index].expect_result); + bpf_testcase_table[idx].target_func, + bpf_testcase_table[idx].expect_result); out: bpf__clear(); return ret; diff --git a/tools/perf/tests/llvm.c b/tools/perf/tests/llvm.c index bc4cf507cde5..366e38ba8b49 100644 --- a/tools/perf/tests/llvm.c +++ b/tools/perf/tests/llvm.c @@ -50,7 +50,7 @@ static struct { int test_llvm__fetch_bpf_obj(void **p_obj_buf, size_t *p_obj_buf_sz, - enum test_llvm__testcase index, + enum test_llvm__testcase idx, bool force) { const char *source; @@ -59,11 +59,11 @@ test_llvm__fetch_bpf_obj(void **p_obj_buf, char *tmpl_new = NULL, *clang_opt_new = NULL; int err, old_verbose, ret = TEST_FAIL; - if (index >= __LLVM_TESTCASE_MAX) + if (idx >= __LLVM_TESTCASE_MAX) return TEST_FAIL; - source = bpf_source_table[index].source; - desc = bpf_source_table[index].desc; + source = bpf_source_table[idx].source; + desc = bpf_source_table[idx].desc; perf_config(perf_config_cb, NULL); diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 591b3fe3ed49..a8d806503a45 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -84,6 +84,8 @@ libperf-$(CONFIG_AUXTRACE) += auxtrace.o libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/ libperf-$(CONFIG_AUXTRACE) += intel-pt.o libperf-$(CONFIG_AUXTRACE) += intel-bts.o +libperf-$(CONFIG_AUXTRACE) += cs-etm.o +libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder/ libperf-y += parse-branch-options.o libperf-y += parse-regs-options.o diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index 7f10430af39c..67551225764e 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -49,6 +49,7 @@ #include "intel-pt.h" #include "intel-bts.h" +#include "cs-etm.h" int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, struct auxtrace_mmap_params *mp, @@ -478,10 +479,11 @@ void auxtrace_heap__pop(struct auxtrace_heap *heap) heap_array[last].ordinal); } -size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr) +size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr, + struct perf_evlist *evlist) { if (itr) - return itr->info_priv_size(itr); + return itr->info_priv_size(itr, evlist); return 0; } @@ -852,7 +854,7 @@ int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr, int err; pr_debug2("Synthesizing auxtrace information\n"); - priv_size = auxtrace_record__info_priv_size(itr); + priv_size = auxtrace_record__info_priv_size(itr, session->evlist); ev = zalloc(sizeof(struct auxtrace_info_event) + priv_size); if (!ev) return -ENOMEM; @@ -891,6 +893,8 @@ int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused, return intel_pt_process_auxtrace_info(event, session); case PERF_AUXTRACE_INTEL_BTS: return intel_bts_process_auxtrace_info(event, session); + case PERF_AUXTRACE_CS_ETM: + return cs_etm__process_auxtrace_info(event, session); case PERF_AUXTRACE_UNKNOWN: default: return -EINVAL; diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h index b86f90db1352..adb53e7bcabf 100644 --- a/tools/perf/util/auxtrace.h +++ b/tools/perf/util/auxtrace.h @@ -41,6 +41,7 @@ enum auxtrace_type { PERF_AUXTRACE_UNKNOWN, PERF_AUXTRACE_INTEL_PT, PERF_AUXTRACE_INTEL_BTS, + PERF_AUXTRACE_CS_ETM, }; enum itrace_period_type { @@ -293,7 +294,8 @@ struct auxtrace_record { int (*recording_options)(struct auxtrace_record *itr, struct perf_evlist *evlist, struct record_opts *opts); - size_t (*info_priv_size)(struct auxtrace_record *itr); + size_t (*info_priv_size)(struct auxtrace_record *itr, + struct perf_evlist *evlist); int (*info_fill)(struct auxtrace_record *itr, struct perf_session *session, struct auxtrace_info_event *auxtrace_info, @@ -429,7 +431,8 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, int auxtrace_record__options(struct auxtrace_record *itr, struct perf_evlist *evlist, struct record_opts *opts); -size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr); +size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr, + struct perf_evlist *evlist); int auxtrace_record__info_fill(struct auxtrace_record *itr, struct perf_session *session, struct auxtrace_info_event *auxtrace_info, diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index 6a7e273a514a..52d320e922e3 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c @@ -145,7 +145,7 @@ static int asnprintf(char **strp, size_t size, const char *fmt, ...) return ret; } -static char *build_id__filename(const char *sbuild_id, char *bf, size_t size) +char *build_id__filename(const char *sbuild_id, char *bf, size_t size) { char *tmp = bf; int ret = asnprintf(&bf, size, "%s/.build-id/%.2s/%s", buildid_dir, diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h index 27a14a8a945b..eb2c2b6e1dab 100644 --- a/tools/perf/util/build-id.h +++ b/tools/perf/util/build-id.h @@ -11,6 +11,7 @@ extern struct perf_tool build_id__mark_dso_hit_ops; struct dso; +char *build_id__filename(const char *sbuild_id, char *bf, size_t size); int build_id__sprintf(const u8 *build_id, int len, char *bf); int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id); int filename__sprintf_build_id(const char *pathname, char *sbuild_id); diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 10af1e7524fb..6523e1a8eea5 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -7,6 +7,10 @@ #include <stdlib.h> #include "asm/bug.h" +static int max_cpu_num; +static int max_node_num; +static int *cpunode_map; + static struct cpu_map *cpu_map__default_new(void) { struct cpu_map *cpus; @@ -435,6 +439,32 @@ out: pr_err("Failed to read max nodes, using default of %d\n", max_node_num); } +int cpu__max_node(void) +{ + if (unlikely(!max_node_num)) + set_max_node_num(); + + return max_node_num; +} + +int cpu__max_cpu(void) +{ + if (unlikely(!max_cpu_num)) + set_max_cpu_num(); + + return max_cpu_num; +} + +int cpu__get_node(int cpu) +{ + if (unlikely(cpunode_map == NULL)) { + pr_debug("cpu_map not initialized\n"); + return -1; + } + + return cpunode_map[cpu]; +} + static int init_cpunode_map(void) { int i; diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 85f7772457fa..d6184ba929b6 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -56,37 +56,11 @@ static inline bool cpu_map__empty(const struct cpu_map *map) return map ? map->map[0] == -1 : true; } -int max_cpu_num; -int max_node_num; -int *cpunode_map; - int cpu__setup_cpunode_map(void); -static inline int cpu__max_node(void) -{ - if (unlikely(!max_node_num)) - pr_debug("cpu_map not initialized\n"); - - return max_node_num; -} - -static inline int cpu__max_cpu(void) -{ - if (unlikely(!max_cpu_num)) - pr_debug("cpu_map not initialized\n"); - - return max_cpu_num; -} - -static inline int cpu__get_node(int cpu) -{ - if (unlikely(cpunode_map == NULL)) { - pr_debug("cpu_map not initialized\n"); - return -1; - } - - return cpunode_map[cpu]; -} +int cpu__max_node(void); +int cpu__max_cpu(void); +int cpu__get_node(int cpu); int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res, int (*f)(struct cpu_map *map, int cpu, void *data), diff --git a/tools/perf/util/cs-etm-decoder/Build b/tools/perf/util/cs-etm-decoder/Build new file mode 100644 index 000000000000..d4896fec940c --- /dev/null +++ b/tools/perf/util/cs-etm-decoder/Build @@ -0,0 +1,7 @@ +ifeq ($(CSTRACE_PATH),) +libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder-stub.o +else +CFLAGS_cs-etm-decoder.o += -I$(CSTRACE_PATH)/include +libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder.o +endif + diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder-stub.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder-stub.c new file mode 100644 index 000000000000..38f2b756fe10 --- /dev/null +++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder-stub.c @@ -0,0 +1,91 @@ +/* + * + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Tor Jeremiassen <tor.jeremiassen@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU GEneral Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <stdlib.h> + +#include "cs-etm-decoder.h" +#include "../util.h" + + +struct cs_etm_decoder +{ + void *state; + int dummy; +}; + +int cs_etm_decoder__flush(struct cs_etm_decoder *decoder) +{ + (void) decoder; + return -1; +} + +int cs_etm_decoder__add_bin_file(struct cs_etm_decoder *decoder, uint64_t offset, uint64_t address, uint64_t len, const char *fname) +{ + (void) decoder; + (void) offset; + (void) address; + (void) len; + (void) fname; + return -1; +} + +const struct cs_etm_state *cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder, + uint64_t indx, + const uint8_t *buf, + size_t len, + size_t *consumed) +{ + (void) decoder; + (void) indx; + (void) buf; + (void) len; + (void) consumed; + return NULL; +} + +int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder, uint64_t address, uint64_t len, cs_etm_mem_cb_type cb_func) +{ + (void) decoder; + (void) address; + (void) len; + (void) cb_func; + return -1; +} + +int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder, + struct cs_etm_packet *packet) +{ + (void) decoder; + (void) packet; + return -1; +} + +struct cs_etm_decoder *cs_etm_decoder__new(uint32_t num_cpu, struct cs_etm_decoder_params *d_params, struct cs_etm_trace_params t_params[]) +{ + (void) num_cpu; + (void) d_params; + (void) t_params; + return NULL; +} + + +void cs_etm_decoder__free(struct cs_etm_decoder *decoder) +{ + (void) decoder; + return; +} diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c new file mode 100644 index 000000000000..c6f23d63590d --- /dev/null +++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c @@ -0,0 +1,503 @@ +/* + * + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Tor Jeremiassen <tor.jeremiassen@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU GEneral Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/err.h> +#include <stdlib.h> + +#include "../cs-etm.h" +#include "cs-etm-decoder.h" +#include "../util.h" +#include "../util/intlist.h" + +#include "c_api/opencsd_c_api.h" +#include "ocsd_if_types.h" +#include "etmv4/trc_pkt_types_etmv4.h" + +#define MAX_BUFFER 1024 + + + +struct cs_etm_decoder +{ + struct cs_etm_state state; + dcd_tree_handle_t dcd_tree; + void (*packet_printer)(const char *); + cs_etm_mem_cb_type mem_access; + ocsd_datapath_resp_t prev_return; + size_t prev_processed; + bool trace_on; + bool discontinuity; + struct cs_etm_packet packet_buffer[MAX_BUFFER]; + uint32_t packet_count; + uint32_t head; + uint32_t tail; + uint32_t end_tail; +}; + +static uint32_t cs_etm_decoder__mem_access(const void *context, + const ocsd_vaddr_t address, + const ocsd_mem_space_acc_t mem_space, + const uint32_t req_size, + uint8_t *buffer) +{ + struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context; + (void) mem_space; + + return decoder->mem_access(decoder->state.data,address,req_size,buffer); +} + +static int cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params, + ocsd_etmv4_cfg *config) +{ + config->reg_configr = params->reg_configr; + config->reg_traceidr = params->reg_traceidr; + config->reg_idr0 = params->reg_idr0; + config->reg_idr1 = params->reg_idr1; + config->reg_idr2 = params->reg_idr2; + config->reg_idr8 = params->reg_idr8; + + config->reg_idr9 = 0; + config->reg_idr10 = 0; + config->reg_idr11 = 0; + config->reg_idr12 = 0; + config->reg_idr13 = 0; + config->arch_ver = ARCH_V8; + config->core_prof = profile_CortexA; + + return 0; +} + +static int cs_etm_decoder__flush_packet(struct cs_etm_decoder *decoder) +{ + int err = 0; + + if (decoder == NULL) return -1; + + if (decoder->packet_count >= 31) return -1; + + if (decoder->tail != decoder->end_tail) { + decoder->tail = (decoder->tail + 1) & (MAX_BUFFER - 1); + decoder->packet_count++; + } + + return err; +} + +int cs_etm_decoder__flush(struct cs_etm_decoder *decoder) +{ + return cs_etm_decoder__flush_packet(decoder); +} + +static int cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder, + const ocsd_generic_trace_elem *elem, + const uint8_t trace_chan_id, + enum cs_etm_sample_type sample_type) +{ + int err = 0; + uint32_t et = 0; + struct int_node *inode = NULL; + + if (decoder == NULL) return -1; + + if (decoder->packet_count >= 31) return -1; + + err = cs_etm_decoder__flush_packet(decoder); + + if (err) return err; + + et = decoder->end_tail; + /* Search the RB tree for the cpu associated with this traceID */ + inode = intlist__find(traceid_list, trace_chan_id); + if (!inode) + return PTR_ERR(inode); + + decoder->packet_buffer[et].sample_type = sample_type; + decoder->packet_buffer[et].start_addr = elem->st_addr; + decoder->packet_buffer[et].end_addr = elem->en_addr; + decoder->packet_buffer[et].exc = false; + decoder->packet_buffer[et].exc_ret = false; + decoder->packet_buffer[et].cpu = *((int*)inode->priv); + + et = (et + 1) & (MAX_BUFFER - 1); + + decoder->end_tail = et; + + return err; +} + +static int cs_etm_decoder__mark_exception(struct cs_etm_decoder *decoder) +{ + int err = 0; + + if (decoder == NULL) return -1; + + decoder->packet_buffer[decoder->end_tail].exc = true; + + return err; +} + +static int cs_etm_decoder__mark_exception_return(struct cs_etm_decoder *decoder) +{ + int err = 0; + + if (decoder == NULL) return -1; + + decoder->packet_buffer[decoder->end_tail].exc_ret = true; + + return err; +} + +static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer( + const void *context, + const ocsd_trc_index_t indx, + const uint8_t trace_chan_id, + const ocsd_generic_trace_elem *elem) +{ + ocsd_datapath_resp_t resp = OCSD_RESP_CONT; + struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context; + + (void) indx; + (void) trace_chan_id; + + switch (elem->elem_type) { + case OCSD_GEN_TRC_ELEM_UNKNOWN: + break; + case OCSD_GEN_TRC_ELEM_NO_SYNC: + decoder->trace_on = false; + break; + case OCSD_GEN_TRC_ELEM_TRACE_ON: + decoder->trace_on = true; + break; + //case OCSD_GEN_TRC_ELEM_TRACE_OVERFLOW: + //decoder->trace_on = false; + //decoder->discontinuity = true; + //break; + case OCSD_GEN_TRC_ELEM_INSTR_RANGE: + cs_etm_decoder__buffer_packet(decoder,elem, + trace_chan_id, CS_ETM_RANGE); + resp = OCSD_RESP_WAIT; + break; + case OCSD_GEN_TRC_ELEM_EXCEPTION: + cs_etm_decoder__mark_exception(decoder); + break; + case OCSD_GEN_TRC_ELEM_EXCEPTION_RET: + cs_etm_decoder__mark_exception_return(decoder); + break; + case OCSD_GEN_TRC_ELEM_PE_CONTEXT: + case OCSD_GEN_TRC_ELEM_EO_TRACE: + case OCSD_GEN_TRC_ELEM_ADDR_NACC: + case OCSD_GEN_TRC_ELEM_TIMESTAMP: + case OCSD_GEN_TRC_ELEM_CYCLE_COUNT: + //case OCSD_GEN_TRC_ELEM_TS_WITH_CC: + case OCSD_GEN_TRC_ELEM_EVENT: + default: + break; + } + + decoder->state.err = 0; + + return resp; +} + +static ocsd_datapath_resp_t cs_etm_decoder__etmv4i_packet_printer( + const void *context, + const ocsd_datapath_op_t op, + const ocsd_trc_index_t indx, + const ocsd_etmv4_i_pkt *pkt) +{ + const size_t PACKET_STR_LEN = 1024; + ocsd_datapath_resp_t ret = OCSD_RESP_CONT; + char packet_str[PACKET_STR_LEN]; + size_t offset; + struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context; + + sprintf(packet_str,"%ld: ", (long int) indx); + offset = strlen(packet_str); + + switch(op) { + case OCSD_OP_DATA: + if (ocsd_pkt_str(OCSD_PROTOCOL_ETMV4I, + (void *)pkt, + packet_str+offset, + PACKET_STR_LEN-offset) != OCSD_OK) + ret = OCSD_RESP_FATAL_INVALID_PARAM; + break; + case OCSD_OP_EOT: + sprintf(packet_str,"**** END OF TRACE ****\n"); + break; + case OCSD_OP_FLUSH: + case OCSD_OP_RESET: + default: + break; + } + + decoder->packet_printer(packet_str); + + return ret; +} + +static int cs_etm_decoder__create_etmv4i_packet_printer(struct cs_etm_decoder_params *d_params, struct cs_etm_trace_params *t_params, + + struct cs_etm_decoder *decoder) +{ + ocsd_etmv4_cfg trace_config; + int ret = 0; + + if (d_params->packet_printer == NULL) + return -1; + + ret = cs_etm_decoder__gen_etmv4_config(t_params,&trace_config); + + if (ret != 0) + return -1; + + decoder->packet_printer = d_params->packet_printer; + + ret = ocsd_dt_create_etmv4i_pkt_proc(decoder->dcd_tree, + &trace_config, + cs_etm_decoder__etmv4i_packet_printer, + decoder); + + return ret; +} + +static int cs_etm_decoder__create_etmv4i_packet_decoder(struct cs_etm_decoder_params *d_params, struct cs_etm_trace_params *t_params, + struct cs_etm_decoder *decoder) +{ + ocsd_etmv4_cfg trace_config; + int ret = 0; + decoder->packet_printer = d_params->packet_printer; + + ret = cs_etm_decoder__gen_etmv4_config(t_params,&trace_config); + + if (ret != 0) + return -1; + + ret = ocsd_dt_create_etmv4i_decoder(decoder->dcd_tree,&trace_config); + + if (ret != OCSD_OK) + return -1; + + ret = ocsd_dt_set_gen_elem_outfn(decoder->dcd_tree, + cs_etm_decoder__gen_trace_elem_printer, decoder); + return ret; +} + +int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder, uint64_t address, uint64_t len, cs_etm_mem_cb_type cb_func) +{ + int err; + + decoder->mem_access = cb_func; + err = ocsd_dt_add_callback_mem_acc(decoder->dcd_tree, + address, + address+len-1, + OCSD_MEM_SPACE_ANY, + cs_etm_decoder__mem_access, + decoder); + return err; +} + + +int cs_etm_decoder__add_bin_file(struct cs_etm_decoder *decoder, uint64_t offset, uint64_t address, uint64_t len, const char *fname) +{ + int err = 0; + file_mem_region_t region; + + (void) len; + if (NULL == decoder) + return -1; + + if (NULL == decoder->dcd_tree) + return -1; + + region.file_offset = offset; + region.start_address = address; + region.region_size = len; + err = ocsd_dt_add_binfile_region_mem_acc(decoder->dcd_tree, + ®ion, + 1, + OCSD_MEM_SPACE_ANY, + fname); + + return err; +} + +const struct cs_etm_state *cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder, + uint64_t indx, + const uint8_t *buf, + size_t len, + size_t *consumed) +{ + int ret = 0; + ocsd_datapath_resp_t dp_ret = decoder->prev_return; + size_t processed = 0; + + if (decoder->packet_count > 0) { + decoder->state.err = ret; + *consumed = processed; + return &(decoder->state); + } + + while ((processed < len) && (0 == ret)) { + + if (OCSD_DATA_RESP_IS_CONT(dp_ret)) { + uint32_t count; + dp_ret = ocsd_dt_process_data(decoder->dcd_tree, + OCSD_OP_DATA, + indx+processed, + len - processed, + &buf[processed], + &count); + processed += count; + + } else if (OCSD_DATA_RESP_IS_WAIT(dp_ret)) { + dp_ret = ocsd_dt_process_data(decoder->dcd_tree, + OCSD_OP_FLUSH, + 0, + 0, + NULL, + NULL); + break; + } else { + ret = -1; + } + } + if (OCSD_DATA_RESP_IS_WAIT(dp_ret)) { + if (OCSD_DATA_RESP_IS_CONT(decoder->prev_return)) { + decoder->prev_processed = processed; + } + processed = 0; + } else if (OCSD_DATA_RESP_IS_WAIT(decoder->prev_return)) { + processed = decoder->prev_processed; + decoder->prev_processed = 0; + } + *consumed = processed; + decoder->prev_return = dp_ret; + decoder->state.err = ret; + return &(decoder->state); +} + +int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder, + struct cs_etm_packet *packet) +{ + if (decoder->packet_count == 0) return -1; + + if (packet == NULL) return -1; + + *packet = decoder->packet_buffer[decoder->head]; + + decoder->head = (decoder->head + 1) & (MAX_BUFFER - 1); + + decoder->packet_count--; + + return 0; +} + +static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder) +{ + unsigned i; + + decoder->head = 0; + decoder->tail = 0; + decoder->end_tail = 0; + decoder->packet_count = 0; + for (i = 0; i < MAX_BUFFER; i++) { + decoder->packet_buffer[i].start_addr = 0xdeadbeefdeadbeefUL; + decoder->packet_buffer[i].end_addr = 0xdeadbeefdeadbeefUL; + decoder->packet_buffer[i].exc = false; + decoder->packet_buffer[i].exc_ret = false; + decoder->packet_buffer[i].cpu = INT_MIN; + } +} + +struct cs_etm_decoder *cs_etm_decoder__new(uint32_t num_cpu, struct cs_etm_decoder_params *d_params, struct cs_etm_trace_params t_params[]) +{ + struct cs_etm_decoder *decoder; + ocsd_dcd_tree_src_t format; + uint32_t flags; + int ret; + size_t i; + + if ((t_params == NULL) || (d_params == 0)) { + return NULL; + } + + decoder = zalloc(sizeof(struct cs_etm_decoder)); + + if (decoder == NULL) { + return NULL; + } + + decoder->state.data = d_params->data; + decoder->prev_return = OCSD_RESP_CONT; + cs_etm_decoder__clear_buffer(decoder); + format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED : + OCSD_TRC_SRC_SINGLE); + flags = 0; + flags |= (d_params->fsyncs ? OCSD_DFRMTR_HAS_FSYNCS : 0); + flags |= (d_params->hsyncs ? OCSD_DFRMTR_HAS_HSYNCS : 0); + flags |= (d_params->frame_aligned ? OCSD_DFRMTR_FRAME_MEM_ALIGN : 0); + + /* Create decode tree for the data source */ + decoder->dcd_tree = ocsd_create_dcd_tree(format,flags); + + if (decoder->dcd_tree == 0) { + goto err_free_decoder; + } + + for (i = 0; i < num_cpu; ++i) { + switch (t_params[i].protocol) + { + case CS_ETM_PROTO_ETMV4i: + if (d_params->operation == CS_ETM_OPERATION_PRINT) { + ret = cs_etm_decoder__create_etmv4i_packet_printer(d_params,&t_params[i],decoder); + } else if (d_params->operation == CS_ETM_OPERATION_DECODE) { + ret = cs_etm_decoder__create_etmv4i_packet_decoder(d_params,&t_params[i],decoder); + } else { + ret = -CS_ETM_ERR_PARAM; + } + if (ret != 0) { + goto err_free_decoder_tree; + } + break; + default: + goto err_free_decoder_tree; + break; + } + } + + + return decoder; + +err_free_decoder_tree: + ocsd_destroy_dcd_tree(decoder->dcd_tree); +err_free_decoder: + free(decoder); + return NULL; +} + + +void cs_etm_decoder__free(struct cs_etm_decoder *decoder) +{ + if (decoder == NULL) return; + + ocsd_destroy_dcd_tree(decoder->dcd_tree); + decoder->dcd_tree = NULL; + + free(decoder); +} diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h new file mode 100644 index 000000000000..38c5ae84ed07 --- /dev/null +++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h @@ -0,0 +1,118 @@ +/* + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Tor Jeremiassen <tor.jeremiassen@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU GEneral Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef INCLUDE__CS_ETM_DECODER_H__ +#define INCLUDE__CS_ETM_DECODER_H__ + +#include <linux/types.h> +#include <stdio.h> + +struct cs_etm_decoder; + +struct cs_etm_buffer { + const unsigned char *buf; + size_t len; + uint64_t offset; + //bool consecutive; + uint64_t ref_timestamp; + //uint64_t trace_nr; +}; + +enum cs_etm_sample_type { + CS_ETM_RANGE = 1 << 0, +}; + +struct cs_etm_state { + int err; + void *data; + unsigned isa; + uint64_t start; + uint64_t end; + uint64_t timestamp; +}; + +struct cs_etm_packet { + enum cs_etm_sample_type sample_type; + uint64_t start_addr; + uint64_t end_addr; + bool exc; + bool exc_ret; + int cpu; +}; + + +struct cs_etm_queue; +typedef uint32_t (*cs_etm_mem_cb_type)(struct cs_etm_queue *, uint64_t, size_t, uint8_t *); + +struct cs_etm_trace_params { + void *etmv4i_packet_handler; + uint32_t reg_idr0; + uint32_t reg_idr1; + uint32_t reg_idr2; + uint32_t reg_idr8; + uint32_t reg_configr; + uint32_t reg_traceidr; + int protocol; +}; + +struct cs_etm_decoder_params { + int operation; + void (*packet_printer)(const char *); + cs_etm_mem_cb_type mem_acc_cb; + bool formatted; + bool fsyncs; + bool hsyncs; + bool frame_aligned; + void *data; +}; + +enum { + CS_ETM_PROTO_ETMV3 = 1, + CS_ETM_PROTO_ETMV4i, + CS_ETM_PROTO_ETMV4d, +}; + +enum { + CS_ETM_OPERATION_PRINT = 1, + CS_ETM_OPERATION_DECODE, +}; + +enum { + CS_ETM_ERR_NOMEM = 1, + CS_ETM_ERR_NODATA, + CS_ETM_ERR_PARAM, +}; + + +struct cs_etm_decoder *cs_etm_decoder__new(uint32_t num_cpu, struct cs_etm_decoder_params *,struct cs_etm_trace_params []); + +int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *, uint64_t, uint64_t, cs_etm_mem_cb_type); + +int cs_etm_decoder__flush(struct cs_etm_decoder *); +void cs_etm_decoder__free(struct cs_etm_decoder *); +int cs_etm_decoder__get_packet(struct cs_etm_decoder *, struct cs_etm_packet *); + +int cs_etm_decoder__add_bin_file(struct cs_etm_decoder *, uint64_t, uint64_t, uint64_t, const char *); + +const struct cs_etm_state *cs_etm_decoder__process_data_block(struct cs_etm_decoder *, + uint64_t, + const uint8_t *, + size_t, + size_t *); + +#endif /* INCLUDE__CS_ETM_DECODER_H__ */ + diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c new file mode 100644 index 000000000000..ca93257a6cb7 --- /dev/null +++ b/tools/perf/util/cs-etm.c @@ -0,0 +1,1533 @@ +/* + * Copyright(C) 2016 Linaro Limited. All rights reserved. + * Author: Tor Jeremiassen <tor.jeremiassen@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/log2.h> + +#include "perf.h" +#include "thread_map.h" +#include "thread.h" +#include "thread-stack.h" +#include "callchain.h" +#include "auxtrace.h" +#include "evlist.h" +#include "machine.h" +#include "util.h" +#include "util/intlist.h" +#include "color.h" +#include "cs-etm.h" +#include "cs-etm-decoder/cs-etm-decoder.h" +#include "debug.h" + +#include <stdlib.h> + +#define KiB(x) ((x) * 1024) +#define MiB(x) ((x) * 1024 * 1024) +#define MAX_TIMESTAMP (~0ULL) + +struct cs_etm_auxtrace { + struct auxtrace auxtrace; + struct auxtrace_queues queues; + struct auxtrace_heap heap; + u64 **metadata; + u32 auxtrace_type; + struct perf_session *session; + struct machine *machine; + struct perf_evsel *switch_evsel; + struct thread *unknown_thread; + uint32_t num_cpu; + bool timeless_decoding; + bool sampling_mode; + bool snapshot_mode; + bool data_queued; + bool sync_switch; + bool synth_needs_swap; + int have_sched_switch; + + bool sample_instructions; + u64 instructions_sample_type; + u64 instructions_sample_period; + u64 instructions_id; + struct itrace_synth_opts synth_opts; + unsigned pmu_type; +}; + +struct cs_etm_queue { + struct cs_etm_auxtrace *etm; + unsigned queue_nr; + struct auxtrace_buffer *buffer; + const struct cs_etm_state *state; + struct ip_callchain *chain; + union perf_event *event_buf; + bool on_heap; + bool step_through_buffers; + bool use_buffer_pid_tid; + pid_t pid, tid; + int cpu; + struct thread *thread; + u64 time; + u64 timestamp; + bool stop; + struct cs_etm_decoder *decoder; + u64 offset; + bool eot; + bool kernel_mapped; +}; + +static int cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq); +static int cs_etm__update_queues(struct cs_etm_auxtrace *); +static int cs_etm__process_queues(struct cs_etm_auxtrace *, u64); +static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *, pid_t, u64); +static uint32_t cs_etm__mem_access(struct cs_etm_queue *, uint64_t , size_t , uint8_t *); + +static void cs_etm__packet_dump(const char *pkt_string) +{ + const char *color = PERF_COLOR_BLUE; + + color_fprintf(stdout,color, " %s\n", pkt_string); + fflush(stdout); +} + +static void cs_etm__dump_event(struct cs_etm_auxtrace *etm, + struct auxtrace_buffer *buffer) +{ + const char *color = PERF_COLOR_BLUE; + struct cs_etm_decoder_params d_params; + struct cs_etm_trace_params *t_params; + struct cs_etm_decoder *decoder; + size_t buffer_used = 0; + size_t i; + + fprintf(stdout,"\n"); + color_fprintf(stdout, color, + ". ... CoreSight ETM Trace data: size %zu bytes\n", + buffer->size); + + t_params = zalloc(sizeof(struct cs_etm_trace_params) * etm->num_cpu); + for (i = 0; i < etm->num_cpu; ++i) { + t_params[i].protocol = CS_ETM_PROTO_ETMV4i; + t_params[i].reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0]; + t_params[i].reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1]; + t_params[i].reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2]; + t_params[i].reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8]; + t_params[i].reg_configr = etm->metadata[i][CS_ETMV4_TRCCONFIGR]; + t_params[i].reg_traceidr = etm->metadata[i][CS_ETMV4_TRCTRACEIDR]; + //[CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %"PRIx64"\n", + } + d_params.packet_printer = cs_etm__packet_dump; + d_params.operation = CS_ETM_OPERATION_PRINT; + d_params.formatted = true; + d_params.fsyncs = false; + d_params.hsyncs = false; + d_params.frame_aligned = true; + + decoder = cs_etm_decoder__new(etm->num_cpu,&d_params, t_params); + + zfree(&t_params); + + if (decoder == NULL) { + return; + } + do { + size_t consumed; + cs_etm_decoder__process_data_block(decoder,buffer->offset,&(((uint8_t *)buffer->data)[buffer_used]),buffer->size - buffer_used, &consumed); + buffer_used += consumed; + } while(buffer_used < buffer->size); + cs_etm_decoder__free(decoder); +} + +static int cs_etm__flush_events(struct perf_session *session, struct perf_tool *tool){ + struct cs_etm_auxtrace *etm = container_of(session->auxtrace, + struct cs_etm_auxtrace, + auxtrace); + + int ret; + + if (dump_trace) + return 0; + + if (!tool->ordered_events) + return -EINVAL; + + ret = cs_etm__update_queues(etm); + + if (ret < 0) + return ret; + + if (etm->timeless_decoding) + return cs_etm__process_timeless_queues(etm,-1,MAX_TIMESTAMP - 1); + + return cs_etm__process_queues(etm, MAX_TIMESTAMP); +} + +static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm, + struct auxtrace_queue *queue) +{ + struct cs_etm_queue *etmq = queue->priv; + + if ((queue->tid == -1) || (etm->have_sched_switch)) { + etmq->tid = machine__get_current_tid(etm->machine, etmq->cpu); + thread__zput(etmq->thread); + } + + if ((!etmq->thread) && (etmq->tid != -1)) { + etmq->thread = machine__find_thread(etm->machine,-1,etmq->tid); + } + + if (etmq->thread) { + etmq->pid = etmq->thread->pid_; + if (queue->cpu == -1) { + etmq->cpu = etmq->thread->cpu; + } + } +} + +static void cs_etm__free_queue(void *priv) +{ + struct cs_etm_queue *etmq = priv; + + if (!etmq) + return; + + thread__zput(etmq->thread); + cs_etm_decoder__free(etmq->decoder); + zfree(&etmq->event_buf); + zfree(&etmq->chain); + free(etmq); +} + +static void cs_etm__free_events(struct perf_session *session) +{ + struct cs_etm_auxtrace *aux = container_of(session->auxtrace, + struct cs_etm_auxtrace, + auxtrace); + + struct auxtrace_queues *queues = &(aux->queues); + + unsigned i; + + for (i = 0; i < queues->nr_queues; ++i) { + cs_etm__free_queue(queues->queue_array[i].priv); + queues->queue_array[i].priv = 0; + } + + auxtrace_queues__free(queues); + +} + +static void cs_etm__free(struct perf_session *session) +{ + + size_t i; + struct int_node *inode, *tmp; + struct cs_etm_auxtrace *aux = container_of(session->auxtrace, + struct cs_etm_auxtrace, + auxtrace); + auxtrace_heap__free(&aux->heap); + cs_etm__free_events(session); + session->auxtrace = NULL; + + /* First remove all traceID/CPU# nodes from the RB tree */ + intlist__for_each_safe(inode, tmp, traceid_list) + intlist__remove(traceid_list, inode); + /* Then the RB tree itself */ + intlist__delete(traceid_list); + + //thread__delete(aux->unknown_thread); + for (i = 0; i < aux->num_cpu; ++i) { + zfree(&aux->metadata[i]); + } + zfree(&aux->metadata); + free(aux); +} + +static void cs_etm__use_buffer_pid_tid(struct cs_etm_queue *etmq, + struct auxtrace_queue *queue, + struct auxtrace_buffer *buffer) +{ + if ((queue->cpu == -1) && (buffer->cpu != -1)) { + etmq->cpu = buffer->cpu; + } + + etmq->pid = buffer->pid; + etmq->tid = buffer->tid; + + thread__zput(etmq->thread); + + if (etmq->tid != -1) { + if (etmq->pid != -1) { + etmq->thread = machine__findnew_thread(etmq->etm->machine, + etmq->pid, + etmq->tid); + } else { + etmq->thread = machine__findnew_thread(etmq->etm->machine, + -1, + etmq->tid); + } + } +} + + +static int cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq) +{ + struct auxtrace_buffer *aux_buffer = etmq->buffer; + struct auxtrace_buffer *old_buffer = aux_buffer; + struct auxtrace_queue *queue; + + if (etmq->stop) { + buff->len = 0; + return 0; + } + + queue = &etmq->etm->queues.queue_array[etmq->queue_nr]; + + aux_buffer = auxtrace_buffer__next(queue,aux_buffer); + + if (!aux_buffer) { + if (old_buffer) { + auxtrace_buffer__drop_data(old_buffer); + } + buff->len = 0; + return 0; + } + + etmq->buffer = aux_buffer; + + if (!aux_buffer->data) { + int fd = perf_data_file__fd(etmq->etm->session->file); + + aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd); + if (!aux_buffer->data) + return -ENOMEM; + } + + if (old_buffer) + auxtrace_buffer__drop_data(old_buffer); + + if (aux_buffer->use_data) { + buff->offset = aux_buffer->offset; + buff->len = aux_buffer->use_size; + buff->buf = aux_buffer->use_data; + } else { + buff->offset = aux_buffer->offset; + buff->len = aux_buffer->size; + buff->buf = aux_buffer->data; + } + /* + buff->offset = 0; + buff->len = sizeof(cstrace); + buff->buf = cstrace; + */ + + buff->ref_timestamp = aux_buffer->reference; + + if (etmq->use_buffer_pid_tid && + ((etmq->pid != aux_buffer->pid) || + (etmq->tid != aux_buffer->tid))) { + cs_etm__use_buffer_pid_tid(etmq,queue,aux_buffer); + } + + if (etmq->step_through_buffers) + etmq->stop = true; + + return buff->len; +} + +static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm, + unsigned int queue_nr) +{ + struct cs_etm_decoder_params d_params; + struct cs_etm_trace_params *t_params; + struct cs_etm_queue *etmq; + size_t i; + + etmq = zalloc(sizeof(struct cs_etm_queue)); + if (!etmq) + return NULL; + + if (etm->synth_opts.callchain) { + size_t sz = sizeof(struct ip_callchain); + + sz += etm->synth_opts.callchain_sz * sizeof(u64); + etmq->chain = zalloc(sz); + if (!etmq->chain) + goto out_free; + } else { + etmq->chain = NULL; + } + + etmq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE); + if (!etmq->event_buf) + goto out_free; + + etmq->etm = etm; + etmq->queue_nr = queue_nr; + etmq->pid = -1; + etmq->tid = -1; + etmq->cpu = -1; + etmq->stop = false; + etmq->kernel_mapped = false; + + t_params = zalloc(sizeof(struct cs_etm_trace_params)*etm->num_cpu); + + for (i = 0; i < etm->num_cpu; ++i) { + t_params[i].reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0]; + t_params[i].reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1]; + t_params[i].reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2]; + t_params[i].reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8]; + t_params[i].reg_configr = etm->metadata[i][CS_ETMV4_TRCCONFIGR]; + t_params[i].reg_traceidr = etm->metadata[i][CS_ETMV4_TRCTRACEIDR]; + t_params[i].protocol = CS_ETM_PROTO_ETMV4i; + } + d_params.packet_printer = cs_etm__packet_dump; + d_params.operation = CS_ETM_OPERATION_DECODE; + d_params.formatted = true; + d_params.fsyncs = false; + d_params.hsyncs = false; + d_params.frame_aligned = true; + d_params.data = etmq; + + etmq->decoder = cs_etm_decoder__new(etm->num_cpu,&d_params,t_params); + + + zfree(&t_params); + + if (!etmq->decoder) + goto out_free; + + etmq->offset = 0; + etmq->eot = false; + + return etmq; + +out_free: + zfree(&etmq->event_buf); + zfree(&etmq->chain); + free(etmq); + return NULL; +} + +static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm, + struct auxtrace_queue *queue, + unsigned int queue_nr) +{ + struct cs_etm_queue *etmq = queue->priv; + + if (list_empty(&(queue->head))) + return 0; + + if (etmq == NULL) { + etmq = cs_etm__alloc_queue(etm,queue_nr); + + if (etmq == NULL) { + return -ENOMEM; + } + + queue->priv = etmq; + + if (queue->cpu != -1) { + etmq->cpu = queue->cpu; + } + + etmq->tid = queue->tid; + + if (etm->sampling_mode) { + if (etm->timeless_decoding) + etmq->step_through_buffers = true; + if (etm->timeless_decoding || !etm->have_sched_switch) + etmq->use_buffer_pid_tid = true; + } + } + + if (!etmq->on_heap && + (!etm->sync_switch)) { + const struct cs_etm_state *state; + int ret = 0; + + if (etm->timeless_decoding) + return ret; + + //cs_etm__log("queue %u getting timestamp\n",queue_nr); + //cs_etm__log("queue %u decoding cpu %d pid %d tid %d\n", + //queue_nr, etmq->cpu, etmq->pid, etmq->tid); + (void) state; + return ret; + /* + while (1) { + state = cs_etm_decoder__decode(etmq->decoder); + if (state->err) { + if (state->err == CS_ETM_ERR_NODATA) { + //cs_etm__log("queue %u has no timestamp\n", + //queue_nr); + return 0; + } + continue; + } + if (state->timestamp) + break; + } + + etmq->timestamp = state->timestamp; + //cs_etm__log("queue %u timestamp 0x%"PRIx64 "\n", + //queue_nr, etmq->timestamp); + etmq->state = state; + etmq->have_sample = true; + //cs_etm__sample_flags(etmq); + ret = auxtrace_heap__add(&etm->heap, queue_nr, etmq->timestamp); + if (ret) + return ret; + etmq->on_heap = true; + */ + } + + return 0; +} + + +static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm) +{ + unsigned int i; + int ret; + + for (i = 0; i < etm->queues.nr_queues; i++) { + ret = cs_etm__setup_queue(etm, &(etm->queues.queue_array[i]),i); + if (ret) + return ret; + } + return 0; +} + +#if 0 +struct cs_etm_cache_entry { + struct auxtrace_cache_entry entry; + uint64_t icount; + uint64_t bcount; +}; + +static size_t cs_etm__cache_divisor(void) +{ + static size_t d = 64; + + return d; +} + +static size_t cs_etm__cache_size(struct dso *dso, + struct machine *machine) +{ + off_t size; + + size = dso__data_size(dso,machine); + size /= cs_etm__cache_divisor(); + + if (size < 1000) + return 10; + + if (size > (1 << 21)) + return 21; + + return 32 - __builtin_clz(size); +} + +static struct auxtrace_cache *cs_etm__cache(struct dso *dso, + struct machine *machine) +{ + struct auxtrace_cache *c; + size_t bits; + + if (dso->auxtrace_cache) + return dso->auxtrace_cache; + + bits = cs_etm__cache_size(dso,machine); + + c = auxtrace_cache__new(bits, sizeof(struct cs_etm_cache_entry), 200); + + dso->auxtrace_cache = c; + + return c; +} + +static int cs_etm__cache_add(struct dso *dso, struct machine *machine, + uint64_t offset, uint64_t icount, uint64_t bcount) +{ + struct auxtrace_cache *c = cs_etm__cache(dso, machine); + struct cs_etm_cache_entry *e; + int err; + + if (!c) + return -ENOMEM; + + e = auxtrace_cache__alloc_entry(c); + if (!e) + return -ENOMEM; + + e->icount = icount; + e->bcount = bcount; + + err = auxtrace_cache__add(c, offset, &e->entry); + + if (err) + auxtrace_cache__free_entry(c, e); + + return err; +} + +static struct cs_etm_cache_entry *cs_etm__cache_lookup(struct dso *dso, + struct machine *machine, + uint64_t offset) +{ + struct auxtrace_cache *c = cs_etm__cache(dso, machine); + + if (!c) + return NULL; + + return auxtrace_cache__lookup(dso->auxtrace_cache, offset); +} +#endif + +static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq, + struct cs_etm_packet *packet) +{ + int ret = 0; + struct cs_etm_auxtrace *etm = etmq->etm; + union perf_event *event = etmq->event_buf; + struct perf_sample sample = {.ip = 0,}; + uint64_t start_addr = packet->start_addr; + uint64_t end_addr = packet->end_addr; + + event->sample.header.type = PERF_RECORD_SAMPLE; + event->sample.header.misc = PERF_RECORD_MISC_USER; + event->sample.header.size = sizeof(struct perf_event_header); + + + sample.ip = start_addr; + sample.pid = etmq->pid; + sample.tid = etmq->tid; + sample.addr = end_addr; + sample.id = etmq->etm->instructions_id; + sample.stream_id = etmq->etm->instructions_id; + sample.period = (end_addr - start_addr) >> 2; + sample.cpu = packet->cpu; + sample.flags = 0; // etmq->flags; + sample.insn_len = 1; // etmq->insn_len; + + //etmq->last_insn_cnt = etmq->state->tot_insn_cnt; + +#if 0 + { + struct addr_location al; + uint64_t offset; + struct thread *thread; + struct machine *machine = etmq->etm->machine; + uint8_t cpumode; + struct cs_etm_cache_entry *e; + uint8_t buf[256]; + size_t bufsz; + + thread = etmq->thread; + + if (!thread) { + thread = etmq->etm->unknown_thread; + } + + if (start_addr > 0xffffffc000000000UL) { + cpumode = PERF_RECORD_MISC_KERNEL; + } else { + cpumode = PERF_RECORD_MISC_USER; + } + + thread__find_addr_map(thread, cpumode, MAP__FUNCTION, start_addr,&al); + if (!al.map || !al.map->dso) { + goto endTest; + } + if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR && + dso__data_status_seen(al.map->dso,DSO_DATA_STATUS_SEEN_ITRACE)) { + goto endTest; + } + + offset = al.map->map_ip(al.map,start_addr); + + + e = cs_etm__cache_lookup(al.map->dso, machine, offset); + + if (e) { + (void) e; + } else { + int len; + map__load(al.map, machine->symbol_filter); + + bufsz = sizeof(buf); + len = dso__data_read_offset(al.map->dso, machine, + offset, buf, bufsz); + + if (len <= 0) { + goto endTest; + } + + cs_etm__cache_add(al.map->dso, machine, offset, (end_addr - start_addr) >> 2, end_addr - start_addr); + + } +endTest: + (void) offset; + } +#endif + + ret = perf_session__deliver_synth_event(etm->session,event, &sample); + + if (ret) { + pr_err("CS ETM Trace: failed to deliver instruction event, error %d\n", ret); + + } + return ret; +} + +struct cs_etm_synth { + struct perf_tool dummy_tool; + struct perf_session *session; +}; + + +static int cs_etm__event_synth(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + struct cs_etm_synth *cs_etm_synth = + container_of(tool, struct cs_etm_synth, dummy_tool); + + (void) sample; + (void) machine; + + return perf_session__deliver_synth_event(cs_etm_synth->session, event, NULL); + +} + + +static int cs_etm__synth_event(struct perf_session *session, + struct perf_event_attr *attr, u64 id) +{ + struct cs_etm_synth cs_etm_synth; + + memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth)); + cs_etm_synth.session = session; + + return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1, + &id, cs_etm__event_synth); +} + +static int cs_etm__synth_events(struct cs_etm_auxtrace *etm, + struct perf_session *session) +{ + struct perf_evlist *evlist = session->evlist; + struct perf_evsel *evsel; + struct perf_event_attr attr; + bool found = false; + u64 id; + int err; + + evlist__for_each(evlist, evsel) { + + if (evsel->attr.type == etm->pmu_type) { + found = true; + break; + } + } + + if (!found) { + pr_debug("There are no selected events with Core Sight Trace data\n"); + return 0; + } + + memset(&attr, 0, sizeof(struct perf_event_attr)); + attr.size = sizeof(struct perf_event_attr); + attr.type = PERF_TYPE_HARDWARE; + attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK; + attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID | + PERF_SAMPLE_PERIOD; + if (etm->timeless_decoding) + attr.sample_type &= ~(u64)PERF_SAMPLE_TIME; + else + attr.sample_type |= PERF_SAMPLE_TIME; + + attr.exclude_user = evsel->attr.exclude_user; + attr.exclude_kernel = evsel->attr.exclude_kernel; + attr.exclude_hv = evsel->attr.exclude_hv; + attr.exclude_host = evsel->attr.exclude_host; + attr.exclude_guest = evsel->attr.exclude_guest; + attr.sample_id_all = evsel->attr.sample_id_all; + attr.read_format = evsel->attr.read_format; + + id = evsel->id[0] + 1000000000; + + if (!id) + id = 1; + + if (etm->synth_opts.instructions) { + attr.config = PERF_COUNT_HW_INSTRUCTIONS; + attr.sample_period = etm->synth_opts.period; + etm->instructions_sample_period = attr.sample_period; + err = cs_etm__synth_event(session, &attr, id); + + if (err) { + pr_err("%s: failed to synthesize 'instructions' event type\n", + __func__); + return err; + } + etm->sample_instructions = true; + etm->instructions_sample_type = attr.sample_type; + etm->instructions_id = id; + id += 1; + } + + etm->synth_needs_swap = evsel->needs_swap; + return 0; +} + +static int cs_etm__sample(struct cs_etm_queue *etmq) +{ + //const struct cs_etm_state *state = etmq->state; + struct cs_etm_packet packet; + //struct cs_etm_auxtrace *etm = etmq->etm; + int err; + + err = cs_etm_decoder__get_packet(etmq->decoder,&packet); + // if there is no sample, it returns err = -1, no real error + + if (!err && packet.sample_type & CS_ETM_RANGE) { + err = cs_etm__synth_instruction_sample(etmq,&packet); + if (err) + return err; + } + return 0; +} + +static int cs_etm__run_decoder(struct cs_etm_queue *etmq, u64 *timestamp) +{ + struct cs_etm_buffer buffer; + size_t buffer_used; + int err = 0; + + /* Go through each buffer in the queue and decode them one by one */ +more: + buffer_used = 0; + memset(&buffer, 0, sizeof(buffer)); + err = cs_etm__get_trace(&buffer,etmq); + if (err <= 0) + return err; + + do { + size_t processed = 0; + etmq->state = cs_etm_decoder__process_data_block(etmq->decoder, + etmq->offset, + &buffer.buf[buffer_used], + buffer.len-buffer_used, + &processed); + err = etmq->state->err; + etmq->offset += processed; + buffer_used += processed; + if (!err) + cs_etm__sample(etmq); + } while (!etmq->eot && (buffer.len > buffer_used)); +goto more; + + (void) timestamp; + + return err; +} + +static int cs_etm__update_queues(struct cs_etm_auxtrace *etm) +{ + if (etm->queues.new_data) { + etm->queues.new_data = false; + return cs_etm__setup_queues(etm); + } + return 0; +} + +static int cs_etm__process_queues(struct cs_etm_auxtrace *etm, u64 timestamp) +{ + unsigned int queue_nr; + u64 ts; + int ret; + + while (1) { + struct auxtrace_queue *queue; + struct cs_etm_queue *etmq; + + if (!etm->heap.heap_cnt) + return 0; + + if (etm->heap.heap_array[0].ordinal >= timestamp) + return 0; + + queue_nr = etm->heap.heap_array[0].queue_nr; + queue = &etm->queues.queue_array[queue_nr]; + etmq = queue->priv; + + //cs_etm__log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n", + //queue_nr, etm->heap.heap_array[0].ordinal, + //timestamp); + + auxtrace_heap__pop(&etm->heap); + + if (etm->heap.heap_cnt) { + ts = etm->heap.heap_array[0].ordinal + 1; + if (ts > timestamp) + ts = timestamp; + } else { + ts = timestamp; + } + + cs_etm__set_pid_tid_cpu(etm, queue); + + ret = cs_etm__run_decoder(etmq, &ts); + + if (ret < 0) { + auxtrace_heap__add(&etm->heap, queue_nr, ts); + return ret; + } + + if (!ret) { + ret = auxtrace_heap__add(&etm->heap, queue_nr, ts); + if (ret < 0) + return ret; + } else { + etmq->on_heap = false; + } + } + return 0; +} + +static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm, + pid_t tid, + u64 time_) +{ + struct auxtrace_queues *queues = &etm->queues; + unsigned int i; + u64 ts = 0; + + for (i = 0; i < queues->nr_queues; ++i) { + struct auxtrace_queue *queue = &(etm->queues.queue_array[i]); + struct cs_etm_queue *etmq = queue->priv; + + if (etmq && ((tid == -1) || (etmq->tid == tid))) { + etmq->time = time_; + cs_etm__set_pid_tid_cpu(etm, queue); + cs_etm__run_decoder(etmq,&ts); + + } + } + return 0; +} + +static struct cs_etm_queue *cs_etm__cpu_to_etmq(struct cs_etm_auxtrace *etm, + int cpu) +{ + unsigned q,j; + + if (etm->queues.nr_queues == 0) + return NULL; + + if (cpu < 0) + q = 0; + else if ((unsigned) cpu >= etm->queues.nr_queues) + q = etm->queues.nr_queues - 1; + else + q = cpu; + + if (etm->queues.queue_array[q].cpu == cpu) + return etm->queues.queue_array[q].priv; + + for (j = 0; q > 0; j++) { + if (etm->queues.queue_array[--q].cpu == cpu) + return etm->queues.queue_array[q].priv; + } + + for (; j < etm->queues.nr_queues; j++) { + if (etm->queues.queue_array[j].cpu == cpu) + return etm->queues.queue_array[j].priv; + + } + + return NULL; +} + +static uint32_t cs_etm__mem_access(struct cs_etm_queue *etmq, uint64_t address, size_t size, uint8_t *buffer) +{ + struct addr_location al; + uint64_t offset; + struct thread *thread; + struct machine *machine; + uint8_t cpumode; + int len; + + if (etmq == NULL) + return -1; + + machine = etmq->etm->machine; + thread = etmq->thread; + if (address > 0xffffffc000000000UL) { + cpumode = PERF_RECORD_MISC_KERNEL; + } else { + cpumode = PERF_RECORD_MISC_USER; + } + + thread__find_addr_map(thread, cpumode, MAP__FUNCTION, address,&al); + + if (!al.map || !al.map->dso) { + return 0; + } + + if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR && + dso__data_status_seen(al.map->dso,DSO_DATA_STATUS_SEEN_ITRACE)) { + return 0; + } + + offset = al.map->map_ip(al.map,address); + + map__load(al.map, machine->symbol_filter); + + len = dso__data_read_offset(al.map->dso, machine, + offset, buffer, size); + + if (len <= 0) { + return 0; + } + + return len; +} + +static bool check_need_swap(int file_endian) +{ + const int data = 1; + u8 *check = (u8 *)&data; + int host_endian; + + if (check[0] == 1) + host_endian = ELFDATA2LSB; + else + host_endian = ELFDATA2MSB; + + return host_endian != file_endian; +} + +static int cs_etm__read_elf_info(const char *fname, uint64_t *foffset, uint64_t *fstart, uint64_t *fsize) +{ + FILE *fp; + u8 e_ident[EI_NIDENT]; + int ret = -1; + bool need_swap = false; + size_t buf_size; + void *buf; + int i; + + fp = fopen(fname, "r"); + if (fp == NULL) + return -1; + + if (fread(e_ident, sizeof(e_ident), 1, fp) != 1) + goto out; + + if (memcmp(e_ident, ELFMAG, SELFMAG) || + e_ident[EI_VERSION] != EV_CURRENT) + goto out; + + need_swap = check_need_swap(e_ident[EI_DATA]); + + /* for simplicity */ + fseek(fp, 0, SEEK_SET); + + if (e_ident[EI_CLASS] == ELFCLASS32) { + Elf32_Ehdr ehdr; + Elf32_Phdr *phdr; + + if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) + goto out; + + if (need_swap) { + ehdr.e_phoff = bswap_32(ehdr.e_phoff); + ehdr.e_phentsize = bswap_16(ehdr.e_phentsize); + ehdr.e_phnum = bswap_16(ehdr.e_phnum); + } + + buf_size = ehdr.e_phentsize * ehdr.e_phnum; + buf = malloc(buf_size); + if (buf == NULL) + goto out; + + fseek(fp, ehdr.e_phoff, SEEK_SET); + if (fread(buf, buf_size, 1, fp) != 1) + goto out_free; + + for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) { + + if (need_swap) { + phdr->p_type = bswap_32(phdr->p_type); + phdr->p_offset = bswap_32(phdr->p_offset); + phdr->p_filesz = bswap_32(phdr->p_filesz); + } + + if (phdr->p_type != PT_LOAD) + continue; + + *foffset = phdr->p_offset; + *fstart = phdr->p_vaddr; + *fsize = phdr->p_filesz; + ret = 0; + break; + } + } else { + Elf64_Ehdr ehdr; + Elf64_Phdr *phdr; + + if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) + goto out; + + if (need_swap) { + ehdr.e_phoff = bswap_64(ehdr.e_phoff); + ehdr.e_phentsize = bswap_16(ehdr.e_phentsize); + ehdr.e_phnum = bswap_16(ehdr.e_phnum); + } + + buf_size = ehdr.e_phentsize * ehdr.e_phnum; + buf = malloc(buf_size); + if (buf == NULL) + goto out; + + fseek(fp, ehdr.e_phoff, SEEK_SET); + if (fread(buf, buf_size, 1, fp) != 1) + goto out_free; + + for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) { + + if (need_swap) { + phdr->p_type = bswap_32(phdr->p_type); + phdr->p_offset = bswap_64(phdr->p_offset); + phdr->p_filesz = bswap_64(phdr->p_filesz); + } + + if (phdr->p_type != PT_LOAD) + continue; + + *foffset = phdr->p_offset; + *fstart = phdr->p_vaddr; + *fsize = phdr->p_filesz; + ret = 0; + break; + } + } +out_free: + free(buf); +out: + fclose(fp); + return ret; +} + +static int cs_etm__process_event(struct perf_session *session, + union perf_event *event, + struct perf_sample *sample, + struct perf_tool *tool) +{ + struct cs_etm_auxtrace *etm = container_of(session->auxtrace, + struct cs_etm_auxtrace, + auxtrace); + + u64 timestamp; + int err = 0; + + if (dump_trace) + return 0; + + if (!tool->ordered_events) { + pr_err("CoreSight ETM Trace requires ordered events\n"); + return -EINVAL; + } + + if (sample->time && (sample->time != (u64)-1)) + timestamp = sample->time; + else + timestamp = 0; + + if (timestamp || etm->timeless_decoding) { + err = cs_etm__update_queues(etm); + if (err) + return err; + + } + + if (event->header.type == PERF_RECORD_MMAP2) { + struct dso *dso; + int cpu; + struct cs_etm_queue *etmq; + + cpu = sample->cpu; + + etmq = cs_etm__cpu_to_etmq(etm,cpu); + + if (!etmq) { + return -1; + } + + dso = dsos__find(&(etm->machine->dsos),event->mmap2.filename,false); + if (NULL != dso) { + err = cs_etm_decoder__add_mem_access_cb( + etmq->decoder, + event->mmap2.start, + event->mmap2.len, + cs_etm__mem_access); + } + + if ((symbol_conf.vmlinux_name != NULL) && (!etmq->kernel_mapped)) { + uint64_t foffset; + uint64_t fstart; + uint64_t fsize; + + err = cs_etm__read_elf_info(symbol_conf.vmlinux_name, + &foffset,&fstart,&fsize); + + if (!err) { + cs_etm_decoder__add_bin_file( + etmq->decoder, + foffset, + fstart, + fsize & ~0x1ULL, + symbol_conf.vmlinux_name); + + etmq->kernel_mapped = true; + } + } + + } + + if (etm->timeless_decoding) { + if (event->header.type == PERF_RECORD_EXIT) { + err = cs_etm__process_timeless_queues(etm, + event->fork.tid, + sample->time); + } + } else if (timestamp) { + err = cs_etm__process_queues(etm, timestamp); + } + + //cs_etm__log("event %s (%u): cpu %d time%"PRIu64" tsc %#"PRIx64"\n", + //perf_event__name(event->header.type), event->header.type, + //sample->cpu, sample->time, timestamp); + return err; +} + +static int cs_etm__process_auxtrace_event(struct perf_session *session, + union perf_event *event, + struct perf_tool *tool) +{ + struct cs_etm_auxtrace *etm = container_of(session->auxtrace, + struct cs_etm_auxtrace, + auxtrace); + + (void) tool; + + if (!etm->data_queued) { + struct auxtrace_buffer *buffer; + off_t data_offset; + int fd = perf_data_file__fd(session->file); + bool is_pipe = perf_data_file__is_pipe(session->file); + int err; + + if (is_pipe) { + data_offset = 0; + } else { + data_offset = lseek(fd, 0, SEEK_CUR); + if (data_offset == -1) { + return -errno; + } + } + + err = auxtrace_queues__add_event(&etm->queues, + session, + event, + data_offset, + &buffer); + if (err) + return err; + + if (dump_trace) + { + if (auxtrace_buffer__get_data(buffer,fd)) { + cs_etm__dump_event(etm,buffer); + auxtrace_buffer__put_data(buffer); + } + } + } + + return 0; + +} + +static const char * const cs_etm_global_header_fmts[] = { + [CS_HEADER_VERSION_0] = " Header version %"PRIx64"\n", + [CS_PMU_TYPE_CPUS] = " PMU type/num cpus %"PRIx64"\n", + [CS_ETM_SNAPSHOT] = " Snapshot %"PRIx64"\n", +}; + +static const char * const cs_etm_priv_fmts[] = { + [CS_ETM_MAGIC] = " Magic number %"PRIx64"\n", + [CS_ETM_CPU] = " CPU %"PRIx64"\n", + [CS_ETM_ETMCR] = " ETMCR %"PRIx64"\n", + [CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %"PRIx64"\n", + [CS_ETM_ETMCCER] = " ETMCCER %"PRIx64"\n", + [CS_ETM_ETMIDR] = " ETMIDR %"PRIx64"\n", +}; + +static const char * const cs_etmv4_priv_fmts[] = { + [CS_ETM_MAGIC] = " Magic number %"PRIx64"\n", + [CS_ETM_CPU] = " CPU %"PRIx64"\n", + [CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %"PRIx64"\n", + [CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %"PRIx64"\n", + [CS_ETMV4_TRCIDR0] = " TRCIDR0 %"PRIx64"\n", + [CS_ETMV4_TRCIDR1] = " TRCIDR1 %"PRIx64"\n", + [CS_ETMV4_TRCIDR2] = " TRCIDR2 %"PRIx64"\n", + [CS_ETMV4_TRCIDR8] = " TRCIDR8 %"PRIx64"\n", + [CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %"PRIx64"\n", +}; + +static void cs_etm__print_auxtrace_info(u64 *val, size_t num) +{ + unsigned i,j,cpu; + + for (i = 0, cpu = 0; cpu < num; ++cpu) { + + if (val[i] == __perf_cs_etmv3_magic) { + for (j = 0; j < CS_ETM_PRIV_MAX; ++j, ++i) { + fprintf(stdout,cs_etm_priv_fmts[j],val[i]); + } + } else if (val[i] == __perf_cs_etmv4_magic) { + for (j = 0; j < CS_ETMV4_PRIV_MAX; ++j, ++i) { + fprintf(stdout,cs_etmv4_priv_fmts[j],val[i]); + } + } else { + // failure.. return + return; + } + } +} + +int cs_etm__process_auxtrace_info(union perf_event *event, + struct perf_session *session) +{ + struct auxtrace_info_event *auxtrace_info = &(event->auxtrace_info); + size_t event_header_size = sizeof(struct perf_event_header); + size_t info_header_size = 8; + size_t total_size = auxtrace_info->header.size; + size_t priv_size = 0; + size_t num_cpu; + struct cs_etm_auxtrace *etm = 0; + int err = 0, idx = -1; + u64 *ptr; + u64 *hdr = NULL; + u64 **metadata = NULL; + size_t i,j,k; + unsigned pmu_type; + struct int_node *inode; + + /* + * sizeof(auxtrace_info_event::type) + + * sizeof(auxtrace_info_event::reserved) == 8 + */ + info_header_size = 8; + + if (total_size < (event_header_size + info_header_size)) + return -EINVAL; + + priv_size = total_size - event_header_size - info_header_size; + + // First the global part + + ptr = (u64 *) auxtrace_info->priv; + if (ptr[0] == 0) { + hdr = zalloc(sizeof(u64 *) * CS_HEADER_VERSION_0_MAX); + if (hdr == NULL) { + return -EINVAL; + } + for (i = 0; i < CS_HEADER_VERSION_0_MAX; ++i) { + hdr[i] = ptr[i]; + } + num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff; + pmu_type = (unsigned) ((hdr[CS_PMU_TYPE_CPUS] >> 32) & 0xffffffff); + } else { + return -EINVAL; + } + + /* + * Create an RB tree for traceID-CPU# tuple. Since the conversion has + * to be made for each packet that gets decoded optimizing access in + * anything other than a sequential array is worth doing. + */ + traceid_list = intlist__new(NULL); + if (!traceid_list) + return -ENOMEM; + + metadata = zalloc(sizeof(u64 *) * num_cpu); + if (!metadata) { + err = -ENOMEM; + goto err_free_traceid_list; + } + + if (metadata == NULL) { + return -EINVAL; + } + + for (j = 0; j < num_cpu; ++j) { + if (ptr[i] == __perf_cs_etmv3_magic) { + metadata[j] = zalloc(sizeof(u64)*CS_ETM_PRIV_MAX); + if (metadata == NULL) + return -EINVAL; + for (k = 0; k < CS_ETM_PRIV_MAX; k++) { + metadata[j][k] = ptr[i+k]; + } + + /* The traceID is our handle */ + idx = metadata[j][CS_ETM_ETMIDR]; + i += CS_ETM_PRIV_MAX; + } else if (ptr[i] == __perf_cs_etmv4_magic) { + metadata[j] = zalloc(sizeof(u64)*CS_ETMV4_PRIV_MAX); + if (metadata == NULL) + return -EINVAL; + for (k = 0; k < CS_ETMV4_PRIV_MAX; k++) { + metadata[j][k] = ptr[i+k]; + } + + /* The traceID is our handle */ + idx = metadata[j][CS_ETMV4_TRCTRACEIDR]; + i += CS_ETMV4_PRIV_MAX; + } + + /* Get an RB node for this CPU */ + inode = intlist__findnew(traceid_list, idx); + + /* Something went wrong, no need to continue */ + if (!inode) { + err = PTR_ERR(inode); + goto err_free_metadata; + } + + /* + * The node for that CPU should not have been taken already. + * Backout if that's the case. + */ + if (inode->priv) { + err = -EINVAL; + goto err_free_metadata; + } + + /* All good, associate the traceID with the CPU# */ + inode->priv = &metadata[j][CS_ETM_CPU]; + + } + + if (i*8 != priv_size) + return -EINVAL; + + if (dump_trace) + cs_etm__print_auxtrace_info(auxtrace_info->priv,num_cpu); + + etm = zalloc(sizeof(struct cs_etm_auxtrace)); + + etm->num_cpu = num_cpu; + etm->pmu_type = pmu_type; + etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0); + + if (!etm) + return -ENOMEM; + + + err = auxtrace_queues__init(&etm->queues); + if (err) + goto err_free; + + etm->unknown_thread = thread__new(999999999,999999999); + if (etm->unknown_thread == NULL) { + err = -ENOMEM; + goto err_free_queues; + } + err = thread__set_comm(etm->unknown_thread, "unknown", 0); + if (err) { + goto err_delete_thread; + } + + if (thread__init_map_groups(etm->unknown_thread, + etm->machine)) { + err = -ENOMEM; + goto err_delete_thread; + } + + etm->timeless_decoding = true; + etm->sampling_mode = false; + etm->metadata = metadata; + etm->session = session; + etm->machine = &session->machines.host; + etm->auxtrace_type = auxtrace_info->type; + + etm->auxtrace.process_event = cs_etm__process_event; + etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event; + etm->auxtrace.flush_events = cs_etm__flush_events; + etm->auxtrace.free_events = cs_etm__free_events; + etm->auxtrace.free = cs_etm__free; + session->auxtrace = &(etm->auxtrace); + + if (dump_trace) + return 0; + + if (session->itrace_synth_opts && session->itrace_synth_opts->set) { + etm->synth_opts = *session->itrace_synth_opts; + } else { + itrace_synth_opts__set_default(&etm->synth_opts); + } + etm->synth_opts.branches = false; + etm->synth_opts.callchain = false; + etm->synth_opts.calls = false; + etm->synth_opts.returns = false; + + err = cs_etm__synth_events(etm, session); + if (err) + goto err_delete_thread; + + err = auxtrace_queues__process_index(&etm->queues, session); + if (err) + goto err_delete_thread; + + etm->data_queued = etm->queues.populated; + + return 0; + +err_delete_thread: + thread__delete(etm->unknown_thread); +err_free_queues: + auxtrace_queues__free(&etm->queues); + session->auxtrace = NULL; +err_free: + free(etm); +err_free_metadata: + /* No need to check @metadata[j], free(NULL) is supported */ + for (j = 0; j < num_cpu; ++j) + free(metadata[j]); + free(metadata); +err_free_traceid_list: + intlist__delete(traceid_list); + + return err; +} diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h new file mode 100644 index 000000000000..ec6ff78f1905 --- /dev/null +++ b/tools/perf/util/cs-etm.h @@ -0,0 +1,84 @@ +/* + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef INCLUDE__UTIL_PERF_CS_ETM_H__ +#define INCLUDE__UTIL_PERF_CS_ETM_H__ + +#include "util/event.h" +#include "util/intlist.h" +#include "util/session.h" + +/* Versionning header in case things need tro change in the future. That way + * decoding of old snapshot is still possible. + */ +enum { + /* Starting with 0x0 */ + CS_HEADER_VERSION_0, + /* PMU->type (32 bit), total # of CPUs (32 bit) */ + CS_PMU_TYPE_CPUS, + CS_ETM_SNAPSHOT, + CS_HEADER_VERSION_0_MAX, +}; + +/* Beginning of header common to both ETMv3 and V4 */ +enum { + CS_ETM_MAGIC, + CS_ETM_CPU, +}; + +/* ETMv3/PTM metadata */ +enum { + /* Dynamic, configurable parameters */ + CS_ETM_ETMCR = CS_ETM_CPU + 1, + CS_ETM_ETMTRACEIDR, + /* RO, taken from sysFS */ + CS_ETM_ETMCCER, + CS_ETM_ETMIDR, + CS_ETM_PRIV_MAX, +}; + +/* ETMv4 metadata */ +enum { + /* Dynamic, configurable parameters */ + CS_ETMV4_TRCCONFIGR = CS_ETM_CPU + 1, + CS_ETMV4_TRCTRACEIDR, + /* RO, taken from sysFS */ + CS_ETMV4_TRCIDR0, + CS_ETMV4_TRCIDR1, + CS_ETMV4_TRCIDR2, + CS_ETMV4_TRCIDR8, + CS_ETMV4_TRCAUTHSTATUS, + CS_ETMV4_PRIV_MAX, +}; + +/* RB tree for quick conversion between traceID and CPUs */ +struct intlist *traceid_list; + +#define KiB(x) ((x) * 1024) +#define MiB(x) ((x) * 1024 * 1024) + +#define CS_ETM_HEADER_SIZE (CS_HEADER_VERSION_0_MAX * sizeof(u64)) + +static const u64 __perf_cs_etmv3_magic = 0x3030303030303030ULL; +static const u64 __perf_cs_etmv4_magic = 0x4040404040404040ULL; +#define CS_ETMV3_PRIV_SIZE (CS_ETM_PRIV_MAX * sizeof(u64)) +#define CS_ETMV4_PRIV_SIZE (CS_ETMV4_PRIV_MAX * sizeof(u64)) + +int cs_etm__process_auxtrace_info(union perf_event *event, + struct perf_session *session); + +#endif diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index b4b96120fc3b..b856cf0393ea 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -1247,6 +1247,30 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **e return err; } +int perf_evlist__apply_drv_configs(struct perf_evlist *evlist, + struct perf_evsel **err_evsel, + struct perf_evsel_config_term **err_term) +{ + struct perf_evsel *evsel; + int err = 0; + const int ncpus = cpu_map__nr(evlist->cpus), + nthreads = thread_map__nr(evlist->threads); + + evlist__for_each(evlist, evsel) { + if (list_empty(&evsel->drv_config_terms)) + continue; + + err = perf_evsel__apply_drv_configs(evsel, ncpus, + nthreads, err_term); + if (err) { + *err_evsel = evsel; + break; + } + } + + return err; +} + int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) { struct perf_evsel *evsel; @@ -1486,7 +1510,7 @@ int perf_evlist__open(struct perf_evlist *evlist) perf_evlist__update_id_pos(evlist); evlist__for_each(evlist, evsel) { - err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); + err = perf_evsel__open(evsel, evsel->cpus, evsel->threads); if (err < 0) goto out_err; } diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index a459fe71b452..ae5c1eb1d08c 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -163,6 +163,9 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, struct thread_map *threads); int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target); int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel); +int perf_evlist__apply_drv_configs(struct perf_evlist *evlist, + struct perf_evsel **err_evsel, + struct perf_evsel_config_term **term); void __perf_evlist__set_leader(struct list_head *list); void perf_evlist__set_leader(struct perf_evlist *evlist); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index d4913a46ee1c..6e0a16c7176a 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -211,6 +211,7 @@ void perf_evsel__init(struct perf_evsel *evsel, evsel->bpf_fd = -1; INIT_LIST_HEAD(&evsel->node); INIT_LIST_HEAD(&evsel->config_terms); + INIT_LIST_HEAD(&evsel->drv_config_terms); perf_evsel__object.init(evsel); evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); perf_evsel__calc_id_pos(evsel); @@ -981,6 +982,27 @@ int perf_evsel__append_filter(struct perf_evsel *evsel, return -1; } +int perf_evsel__apply_drv_configs(struct perf_evsel *evsel, + int ncpus, int nthreads, + struct perf_evsel_config_term **err_term) +{ + int err = 0; + struct perf_evsel_config_term *term; + + list_for_each_entry(term, &evsel->drv_config_terms, list) { + err = perf_evsel__run_ioctl(evsel, ncpus, nthreads, + PERF_EVENT_IOC_SET_DRV_CONFIGS, + (void *)term->val.drv_cfg); + + if (err) { + *err_term = term; + break; + } + } + + return err; +} + int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) { return perf_evsel__run_ioctl(evsel, ncpus, nthreads, @@ -988,6 +1010,16 @@ int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) 0); } +int perf_evsel__disable(struct perf_evsel *evsel) +{ + int nthreads = thread_map__nr(evsel->threads); + int ncpus = cpu_map__nr(evsel->cpus); + + return perf_evsel__run_ioctl(evsel, ncpus, nthreads, + PERF_EVENT_IOC_DISABLE, + 0); +} + int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) { if (ncpus == 0 || nthreads == 0) @@ -1033,6 +1065,16 @@ static void perf_evsel__free_config_terms(struct perf_evsel *evsel) } } +static void perf_evsel__free_drv_config_terms(struct perf_evsel *evsel) +{ + struct perf_evsel_config_term *term, *h; + + list_for_each_entry_safe(term, h, &evsel->drv_config_terms, list) { + list_del(&term->list); + free(term); + } +} + void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) { int cpu, thread; @@ -1054,6 +1096,7 @@ void perf_evsel__exit(struct perf_evsel *evsel) perf_evsel__free_fd(evsel); perf_evsel__free_id(evsel); perf_evsel__free_config_terms(evsel); + perf_evsel__free_drv_config_terms(evsel); close_cgroup(evsel->cgrp); cpu_map__put(evsel->cpus); cpu_map__put(evsel->own_cpus); diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 0e49bd742c63..b649143ac16b 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -44,6 +44,7 @@ enum { PERF_EVSEL__CONFIG_TERM_CALLGRAPH, PERF_EVSEL__CONFIG_TERM_STACK_USER, PERF_EVSEL__CONFIG_TERM_INHERIT, + PERF_EVSEL__CONFIG_TERM_DRV_CFG, PERF_EVSEL__CONFIG_TERM_MAX, }; @@ -55,6 +56,7 @@ struct perf_evsel_config_term { u64 freq; bool time; char *callgraph; + char *drv_cfg; u64 stack_user; bool inherit; } val; @@ -75,6 +77,7 @@ struct perf_evsel_config_term { * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all * is used there is an id sample appended to non-sample events * @priv: And what is in its containing unnamed union are tool specific + * @drv_config_terms: List of configurables sent directly to the PMU driver */ struct perf_evsel { struct list_head node; @@ -123,6 +126,7 @@ struct perf_evsel { char *group_name; bool cmdline_group_boundary; struct list_head config_terms; + struct list_head drv_config_terms; int bpf_fd; }; @@ -227,7 +231,11 @@ int perf_evsel__append_filter(struct perf_evsel *evsel, const char *op, const char *filter); int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads, const char *filter); +int perf_evsel__apply_drv_configs(struct perf_evsel *evsel, + int ncpus, int nthreads, + struct perf_evsel_config_term **err_term); int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads); +int perf_evsel__disable(struct perf_evsel *evsel); int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus); diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 8b303ff20289..888640ffada5 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -1,3 +1,4 @@ +#include "build-id.h" #include "callchain.h" #include "debug.h" #include "event.h" @@ -685,8 +686,16 @@ static struct dso *machine__get_kernel(struct machine *machine) DSO_TYPE_GUEST_KERNEL); } - if (kernel != NULL && (!kernel->has_build_id)) - dso__read_running_kernel_build_id(kernel, machine); + if (kernel != NULL && (!kernel->has_build_id)) { + if (symbol_conf.vmlinux_name != NULL) { + filename__read_build_id(symbol_conf.vmlinux_name, + kernel->build_id, + sizeof(kernel->build_id)); + kernel->has_build_id = 1; + } else { + dso__read_running_kernel_build_id(kernel, machine); + } + } return kernel; } @@ -700,8 +709,19 @@ static void machine__get_kallsyms_filename(struct machine *machine, char *buf, { if (machine__is_default_guest(machine)) scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); - else - scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); + else { + if (symbol_conf.vmlinux_name != 0) { + unsigned char build_id[BUILD_ID_SIZE]; + char build_id_hex[SBUILD_ID_SIZE]; + filename__read_build_id(symbol_conf.vmlinux_name, + build_id, + sizeof(build_id)); + build_id__sprintf(build_id,sizeof(build_id), build_id_hex); + build_id__filename((char *)build_id_hex,buf,bufsz); + } else { + scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); + } + } } const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; @@ -710,7 +730,7 @@ const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; * Returns the name of the start symbol in *symbol_name. Pass in NULL as * symbol_name if it's not that important. */ -static u64 machine__get_running_kernel_start(struct machine *machine, +static u64 machine__get_kallsyms_kernel_start(struct machine *machine, const char **symbol_name) { char filename[PATH_MAX]; @@ -738,7 +758,7 @@ static u64 machine__get_running_kernel_start(struct machine *machine, int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) { enum map_type type; - u64 start = machine__get_running_kernel_start(machine, NULL); + u64 start = machine__get_kallsyms_kernel_start(machine, NULL); for (type = 0; type < MAP__NR_TYPES; ++type) { struct kmap *kmap; @@ -1083,7 +1103,8 @@ int machine__create_kernel_maps(struct machine *machine) { struct dso *kernel = machine__get_kernel(machine); const char *name; - u64 addr = machine__get_running_kernel_start(machine, &name); + u64 addr = machine__get_kallsyms_kernel_start(machine, &name); + if (!addr) return -1; diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index a35db828bd0d..854dd2105bd5 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -285,7 +285,8 @@ static struct perf_evsel * __add_event(struct list_head *list, int *idx, struct perf_event_attr *attr, char *name, struct cpu_map *cpus, - struct list_head *config_terms) + struct list_head *config_terms, + struct list_head *drv_config_terms) { struct perf_evsel *evsel; @@ -304,6 +305,9 @@ __add_event(struct list_head *list, int *idx, if (config_terms) list_splice(config_terms, &evsel->config_terms); + if (drv_config_terms) + list_splice(drv_config_terms, &evsel->drv_config_terms); + list_add_tail(&evsel->node, list); return evsel; } @@ -312,7 +316,8 @@ static int add_event(struct list_head *list, int *idx, struct perf_event_attr *attr, char *name, struct list_head *config_terms) { - return __add_event(list, idx, attr, name, NULL, config_terms) ? 0 : -ENOMEM; + return __add_event(list, idx, attr, name, + NULL, config_terms, NULL) ? 0 : -ENOMEM; } static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size) @@ -823,7 +828,8 @@ static int config_term_pmu(struct perf_event_attr *attr, struct parse_events_term *term, struct parse_events_error *err) { - if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER) + if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || + term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) /* * Always succeed for sysfs terms, as we dont know * at this point what type they need to have. @@ -869,10 +875,7 @@ static int config_attr(struct perf_event_attr *attr, return 0; } -static int get_config_terms(struct list_head *head_config, - struct list_head *head_terms __maybe_unused) -{ -#define ADD_CONFIG_TERM(__type, __name, __val) \ +#define ADD_CONFIG_TERM(__type, __name, __val, __head_terms) \ do { \ struct perf_evsel_config_term *__t; \ \ @@ -883,33 +886,43 @@ do { \ INIT_LIST_HEAD(&__t->list); \ __t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \ __t->val.__name = __val; \ - list_add_tail(&__t->list, head_terms); \ + list_add_tail(&__t->list, __head_terms); \ } while (0) +static int get_config_terms(struct list_head *head_config, + struct list_head *head_terms __maybe_unused) +{ struct parse_events_term *term; list_for_each_entry(term, head_config, list) { switch (term->type_term) { case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: - ADD_CONFIG_TERM(PERIOD, period, term->val.num); + ADD_CONFIG_TERM(PERIOD, period, + term->val.num, head_terms); break; case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: - ADD_CONFIG_TERM(FREQ, freq, term->val.num); + ADD_CONFIG_TERM(FREQ, freq, + term->val.num, head_terms); break; case PARSE_EVENTS__TERM_TYPE_TIME: - ADD_CONFIG_TERM(TIME, time, term->val.num); + ADD_CONFIG_TERM(TIME, time, + term->val.num, head_terms); break; case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: - ADD_CONFIG_TERM(CALLGRAPH, callgraph, term->val.str); + ADD_CONFIG_TERM(CALLGRAPH, callgraph, + term->val.str, head_terms); break; case PARSE_EVENTS__TERM_TYPE_STACKSIZE: - ADD_CONFIG_TERM(STACK_USER, stack_user, term->val.num); + ADD_CONFIG_TERM(STACK_USER, stack_user, + term->val.num, head_terms); break; case PARSE_EVENTS__TERM_TYPE_INHERIT: - ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 1 : 0); + ADD_CONFIG_TERM(INHERIT, inherit, + term->val.num ? 1 : 0, head_terms); break; case PARSE_EVENTS__TERM_TYPE_NOINHERIT: - ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 0 : 1); + ADD_CONFIG_TERM(INHERIT, inherit, + term->val.num ? 0 : 1, head_terms); break; default: break; @@ -919,6 +932,21 @@ do { \ return 0; } +static int get_drv_config_terms(struct list_head *head_config, + struct list_head *head_terms) +{ + struct parse_events_term *term; + + list_for_each_entry(term, head_config, list) { + if (term->type_term != PARSE_EVENTS__TERM_TYPE_DRV_CFG) + continue; + + ADD_CONFIG_TERM(DRV_CFG, drv_cfg, term->val.str, head_terms); + } + + return 0; +} + int parse_events_add_tracepoint(struct list_head *list, int *idx, char *sys, char *event, struct parse_events_error *err, @@ -989,6 +1017,7 @@ int parse_events_add_pmu(struct parse_events_evlist *data, struct perf_pmu *pmu; struct perf_evsel *evsel; LIST_HEAD(config_terms); + LIST_HEAD(drv_config_terms); pmu = perf_pmu__find(name); if (!pmu) @@ -1003,7 +1032,8 @@ int parse_events_add_pmu(struct parse_events_evlist *data, if (!head_config) { attr.type = pmu->type; - evsel = __add_event(list, &data->idx, &attr, NULL, pmu->cpus, NULL); + evsel = __add_event(list, &data->idx, &attr, + NULL, pmu->cpus, NULL, NULL); return evsel ? 0 : -ENOMEM; } @@ -1020,12 +1050,15 @@ int parse_events_add_pmu(struct parse_events_evlist *data, if (get_config_terms(head_config, &config_terms)) return -ENOMEM; + if (get_drv_config_terms(head_config, &drv_config_terms)) + return -ENOMEM; + if (perf_pmu__config(pmu, &attr, head_config, data->error)) return -EINVAL; evsel = __add_event(list, &data->idx, &attr, pmu_event_name(head_config), pmu->cpus, - &config_terms); + &config_terms, &drv_config_terms); if (evsel) { evsel->unit = info.unit; evsel->scale = info.scale; diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index f1a6db107241..09c3ee2df45c 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -68,7 +68,8 @@ enum { PARSE_EVENTS__TERM_TYPE_CALLGRAPH, PARSE_EVENTS__TERM_TYPE_STACKSIZE, PARSE_EVENTS__TERM_TYPE_NOINHERIT, - PARSE_EVENTS__TERM_TYPE_INHERIT + PARSE_EVENTS__TERM_TYPE_INHERIT, + PARSE_EVENTS__TERM_TYPE_DRV_CFG, }; struct parse_events_term { diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l index 58c5831ffd5c..de260ed0dd54 100644 --- a/tools/perf/util/parse-events.l +++ b/tools/perf/util/parse-events.l @@ -53,6 +53,16 @@ static int str(yyscan_t scanner, int token) return token; } +static int drv_str(yyscan_t scanner, int token) +{ + YYSTYPE *yylval = parse_events_get_lval(scanner); + char *text = parse_events_get_text(scanner); + + /* Strip off the '@' */ + yylval->str = strdup(text + 1); + return token; +} + #define REWIND(__alloc) \ do { \ YYSTYPE *__yylval = parse_events_get_lval(yyscanner); \ @@ -123,6 +133,7 @@ num_hex 0x[a-fA-F0-9]+ num_raw_hex [a-fA-F0-9]+ name [a-zA-Z_*?][a-zA-Z0-9_*?.]* name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.]* +drv_cfg_term [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)? /* If you add a modifier you need to update check_modifier() */ modifier_event [ukhpPGHSDI]+ modifier_bp [rwx]{1,3} @@ -196,6 +207,7 @@ no-inherit { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOINHERIT); } , { return ','; } "/" { BEGIN(INITIAL); return '/'; } {name_minus} { return str(yyscanner, PE_NAME); } +@{drv_cfg_term} { return drv_str(yyscanner, PE_DRV_CFG_TERM); } } <mem>{ diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index ad379968d4c1..d35c10275ba4 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -48,6 +48,7 @@ static inc_group_count(struct list_head *list, %token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP %token PE_ERROR %token PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT +%token PE_DRV_CFG_TERM %type <num> PE_VALUE %type <num> PE_VALUE_SYM_HW %type <num> PE_VALUE_SYM_SW @@ -62,6 +63,7 @@ static inc_group_count(struct list_head *list, %type <str> PE_MODIFIER_BP %type <str> PE_EVENT_NAME %type <str> PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT +%type <str> PE_DRV_CFG_TERM %type <num> value_sym %type <head> event_config %type <term> event_term @@ -573,6 +575,15 @@ PE_TERM ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1, &@1, NULL)); $$ = term; } +| +PE_DRV_CFG_TERM +{ + struct parse_events_term *term; + + ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_DRV_CFG, + $1, $1, &@1, NULL)); + $$ = term; +} sep_dc: ':' | diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index a8e825fca42a..df49c0035170 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -806,6 +806,8 @@ static void python_process_general_event(struct perf_sample *sample, PyInt_FromLong(sample->cpu)); pydict_set_item_string_decref(dict_sample, "ip", PyLong_FromUnsignedLongLong(sample->ip)); + pydict_set_item_string_decref(dict_sample, "addr", + PyLong_FromUnsignedLongLong(sample->addr)); pydict_set_item_string_decref(dict_sample, "time", PyLong_FromUnsignedLongLong(sample->time)); pydict_set_item_string_decref(dict_sample, "period", diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 468de95bc8bb..010ff659b82f 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -224,14 +224,6 @@ static int process_event_stub(struct perf_tool *tool __maybe_unused, return 0; } -static int process_build_id_stub(struct perf_tool *tool __maybe_unused, - union perf_event *event __maybe_unused, - struct perf_session *session __maybe_unused) -{ - dump_printf(": unhandled!\n"); - return 0; -} - static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, union perf_event *event __maybe_unused, struct ordered_events *oe __maybe_unused) @@ -244,23 +236,6 @@ static int process_finished_round(struct perf_tool *tool, union perf_event *event, struct ordered_events *oe); -static int process_id_index_stub(struct perf_tool *tool __maybe_unused, - union perf_event *event __maybe_unused, - struct perf_session *perf_session - __maybe_unused) -{ - dump_printf(": unhandled!\n"); - return 0; -} - -static int process_event_auxtrace_info_stub(struct perf_tool *tool __maybe_unused, - union perf_event *event __maybe_unused, - struct perf_session *session __maybe_unused) -{ - dump_printf(": unhandled!\n"); - return 0; -} - static int skipn(int fd, off_t n) { char buf[4096]; @@ -287,10 +262,9 @@ static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused, return event->auxtrace.size; } -static -int process_event_auxtrace_error_stub(struct perf_tool *tool __maybe_unused, - union perf_event *event __maybe_unused, - struct perf_session *session __maybe_unused) +static int process_event_op2_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct perf_session *session __maybe_unused) { dump_printf(": unhandled!\n"); return 0; @@ -331,7 +305,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool) if (tool->tracing_data == NULL) tool->tracing_data = process_event_synth_tracing_data_stub; if (tool->build_id == NULL) - tool->build_id = process_build_id_stub; + tool->build_id = process_event_op2_stub; if (tool->finished_round == NULL) { if (tool->ordered_events) tool->finished_round = process_finished_round; @@ -339,13 +313,13 @@ void perf_tool__fill_defaults(struct perf_tool *tool) tool->finished_round = process_finished_round_stub; } if (tool->id_index == NULL) - tool->id_index = process_id_index_stub; + tool->id_index = process_event_op2_stub; if (tool->auxtrace_info == NULL) - tool->auxtrace_info = process_event_auxtrace_info_stub; + tool->auxtrace_info = process_event_op2_stub; if (tool->auxtrace == NULL) tool->auxtrace = process_event_auxtrace_stub; if (tool->auxtrace_error == NULL) - tool->auxtrace_error = process_event_auxtrace_error_stub; + tool->auxtrace_error = process_event_op2_stub; } static void swap_sample_id_all(union perf_event *event, void *data) diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c index 48906333a858..9be16712ce74 100644 --- a/tools/perf/util/symbol-minimal.c +++ b/tools/perf/util/symbol-minimal.c @@ -344,7 +344,7 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused, if (ret >= 0) dso->is_64_bit = ret; - if (filename__read_build_id(ss->name, build_id, BUILD_ID_SIZE) > 0) { + if ((!dso->has_build_id) && (filename__read_build_id(ss->name, build_id, BUILD_ID_SIZE) > 0)) { dso__set_build_id(dso, build_id); } return 0; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index cd08027a6d2c..1d0d8bff4a5b 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1465,7 +1465,8 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) * Read the build id if possible. This is required for * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work */ - if (filename__read_build_id(dso->name, build_id, BUILD_ID_SIZE) > 0) + if ((!dso->has_build_id) && + (filename__read_build_id(dso->name, build_id, BUILD_ID_SIZE) > 0)) dso__set_build_id(dso, build_id); /* diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c index f0b08a2a48ba..7d31d8c5b9ea 100644 --- a/virt/kvm/irqchip.c +++ b/virt/kvm/irqchip.c @@ -40,7 +40,7 @@ int kvm_irq_map_gsi(struct kvm *kvm, irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, lockdep_is_held(&kvm->irq_lock)); - if (gsi < irq_rt->nr_rt_entries) { + if (irq_rt && gsi < irq_rt->nr_rt_entries) { hlist_for_each_entry(e, &irq_rt->map[gsi], link) { entries[n] = *e; ++n; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index fefbf2d148ef..510df220d1b5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2861,7 +2861,7 @@ static long kvm_vm_ioctl(struct file *filp, if (copy_from_user(&routing, argp, sizeof(routing))) goto out; r = -EINVAL; - if (routing.nr >= KVM_MAX_IRQ_ROUTES) + if (routing.nr > KVM_MAX_IRQ_ROUTES) goto out; if (routing.flags) goto out; |
