diff options
484 files changed, 16260 insertions, 9353 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX index e39561d41f8b..44ad7a310c7d 100644 --- a/Documentation/00-INDEX +++ b/Documentation/00-INDEX @@ -437,6 +437,8 @@ sysrq.txt - info on the magic SysRq key. target/ - directory with info on generating TCM v4 fabric .ko modules +tee.txt + - info on the TEE subsystem and drivers this_cpu_ops.txt - List rationale behind and the way to use this_cpu operations. thermal/ diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index 500c60403653..2baed1151eac 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -51,6 +51,18 @@ Description: Controls the dirty page count condition for the in-place-update policies. +What: /sys/fs/f2fs/<disk>/min_hot_blocks +Date: March 2017 +Contact: "Jaegeuk Kim" <jaegeuk@kernel.org> +Description: + Controls the dirty page count condition for redefining hot data. + +What: /sys/fs/f2fs/<disk>/min_ssr_sections +Date: October 2017 +Contact: "Chao Yu" <yuchao0@huawei.com> +Description: + Controls the fee section threshold to trigger SSR allocation. + What: /sys/fs/f2fs/<disk>/max_small_discards Date: November 2013 Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com> @@ -96,6 +108,18 @@ Contact: "Jaegeuk Kim" <jaegeuk@kernel.org> Description: Controls the checkpoint timing. +What: /sys/fs/f2fs/<disk>/idle_interval +Date: January 2016 +Contact: "Jaegeuk Kim" <jaegeuk@kernel.org> +Description: + Controls the idle timing. + +What: /sys/fs/f2fs/<disk>/iostat_enable +Date: August 2017 +Contact: "Chao Yu" <yuchao0@huawei.com> +Description: + Controls to enable/disable IO stat. + What: /sys/fs/f2fs/<disk>/ra_nid_pages Date: October 2015 Contact: "Chao Yu" <chao2.yu@samsung.com> @@ -116,6 +140,12 @@ Contact: "Shuoran Liu" <liushuoran@huawei.com> Description: Shows total written kbytes issued to disk. +What: /sys/fs/f2fs/<disk>/feature +Date: July 2017 +Contact: "Jaegeuk Kim" <jaegeuk@kernel.org> +Description: + Shows all enabled features in current device. + What: /sys/fs/f2fs/<disk>/inject_rate Date: May 2016 Contact: "Sheng Yong" <shengyong1@huawei.com> @@ -132,7 +162,18 @@ What: /sys/fs/f2fs/<disk>/reserved_blocks Date: June 2017 Contact: "Chao Yu" <yuchao0@huawei.com> Description: - Controls current reserved blocks in system. + Controls target reserved blocks in system, the threshold + is soft, it could exceed current available user space. + +What: /sys/fs/f2fs/<disk>/current_reserved_blocks +Date: October 2017 +Contact: "Yunlong Song" <yunlong.song@huawei.com> +Contact: "Chao Yu" <yuchao0@huawei.com> +Description: + Shows current reserved blocks in system, it may be temporarily + smaller than target_reserved_blocks, but will gradually + increase to target_reserved_blocks when more free blocks are + freed by user later. What: /sys/fs/f2fs/<disk>/gc_urgent Date: August 2017 diff --git a/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt new file mode 100644 index 000000000000..d38834c67dff --- /dev/null +++ b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt @@ -0,0 +1,31 @@ +OP-TEE Device Tree Bindings + +OP-TEE is a piece of software using hardware features to provide a Trusted +Execution Environment. The security can be provided with ARM TrustZone, but +also by virtualization or a separate chip. + +We're using "linaro" as the first part of the compatible property for +the reference implementation maintained by Linaro. + +* OP-TEE based on ARM TrustZone required properties: + +- compatible : should contain "linaro,optee-tz" + +- method : The method of calling the OP-TEE Trusted OS. Permitted + values are: + + "smc" : SMC #0, with the register assignments specified + in drivers/tee/optee/optee_smc.h + + "hvc" : HVC #0, with the register assignments specified + in drivers/tee/optee/optee_smc.h + + + +Example: + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt index 42e97f765bee..c7268ef07f59 100644 --- a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt +++ b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt @@ -80,6 +80,13 @@ Optional properties for WLED: or disabled. - qcom,auto-calibration-enable : A boolean property which enables auto-calibration of the WLED sink configuration. +- qcom,wled-brightness-map : Array of brightness map codes of size 256. + These codes will be mapped to the brightness + level requested in the scale of 0-4095. Code + entry is of 16 bit size. +- qcom,wled-stepper-en : A boolean property to specify if stepper algorithm + needs to be enabled. This needs the brightness map + table to be specified. Optional properties if 'qcom,disp-type-amoled' is mentioned in DT: - qcom,loop-comp-res-kohm : control to select the compensation resistor in kohm. default is 320. @@ -123,4 +130,5 @@ Example: qcom,en-phase-stag; qcom,led-strings-list = [00 01 02 03]; qcom,en-ext-pfet-sc-pro; + qcom,wled-brightness-map = /bits/ 16 <0 . . 4095>; }; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 6cca6f49c194..f9097941c192 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -133,6 +133,7 @@ lacie LaCie lantiq Lantiq Semiconductor lenovo Lenovo Group Ltd. lg LG Corporation +linaro Linaro Limited linux Linux-specific binding lsi LSI Corp. (LSI Logic) lltc Linear Technology Corporation diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 91261a32a573..b5ce7b6c3576 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -307,6 +307,7 @@ Code Seq#(hex) Include File Comments 0xA3 80-8F Port ACL in development: <mailto:tlewis@mindspring.com> 0xA3 90-9F linux/dtlk.h +0xA4 00-1F uapi/linux/tee.h Generic TEE subsystem 0xAA 00-3F linux/uapi/linux/userfaultfd.h 0xAB 00-1F linux/nbd.h 0xAC 00-1F linux/raw.h diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index f6851d94c1af..40dc329f142b 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1239,6 +1239,10 @@ igmp_link_local_mcast_reports - BOOLEAN 224.0.0.X range. Default TRUE +nf_ipv4_defrag_skip - BOOLEAN + Skip defragmentation per interface if set. + Default : 0 (always defrag) + Alexey Kuznetsov. kuznet@ms2.inr.ac.ru diff --git a/Documentation/tee.txt b/Documentation/tee.txt new file mode 100644 index 000000000000..718599357596 --- /dev/null +++ b/Documentation/tee.txt @@ -0,0 +1,118 @@ +TEE subsystem +This document describes the TEE subsystem in Linux. + +A TEE (Trusted Execution Environment) is a trusted OS running in some +secure environment, for example, TrustZone on ARM CPUs, or a separate +secure co-processor etc. A TEE driver handles the details needed to +communicate with the TEE. + +This subsystem deals with: + +- Registration of TEE drivers + +- Managing shared memory between Linux and the TEE + +- Providing a generic API to the TEE + +The TEE interface +================= + +include/uapi/linux/tee.h defines the generic interface to a TEE. + +User space (the client) connects to the driver by opening /dev/tee[0-9]* or +/dev/teepriv[0-9]*. + +- TEE_IOC_SHM_ALLOC allocates shared memory and returns a file descriptor + which user space can mmap. When user space doesn't need the file + descriptor any more, it should be closed. When shared memory isn't needed + any longer it should be unmapped with munmap() to allow the reuse of + memory. + +- TEE_IOC_VERSION lets user space know which TEE this driver handles and + the its capabilities. + +- TEE_IOC_OPEN_SESSION opens a new session to a Trusted Application. + +- TEE_IOC_INVOKE invokes a function in a Trusted Application. + +- TEE_IOC_CANCEL may cancel an ongoing TEE_IOC_OPEN_SESSION or TEE_IOC_INVOKE. + +- TEE_IOC_CLOSE_SESSION closes a session to a Trusted Application. + +There are two classes of clients, normal clients and supplicants. The latter is +a helper process for the TEE to access resources in Linux, for example file +system access. A normal client opens /dev/tee[0-9]* and a supplicant opens +/dev/teepriv[0-9]. + +Much of the communication between clients and the TEE is opaque to the +driver. The main job for the driver is to receive requests from the +clients, forward them to the TEE and send back the results. In the case of +supplicants the communication goes in the other direction, the TEE sends +requests to the supplicant which then sends back the result. + +OP-TEE driver +============= + +The OP-TEE driver handles OP-TEE [1] based TEEs. Currently it is only the ARM +TrustZone based OP-TEE solution that is supported. + +Lowest level of communication with OP-TEE builds on ARM SMC Calling +Convention (SMCCC) [2], which is the foundation for OP-TEE's SMC interface +[3] used internally by the driver. Stacked on top of that is OP-TEE Message +Protocol [4]. + +OP-TEE SMC interface provides the basic functions required by SMCCC and some +additional functions specific for OP-TEE. The most interesting functions are: + +- OPTEE_SMC_FUNCID_CALLS_UID (part of SMCCC) returns the version information + which is then returned by TEE_IOC_VERSION + +- OPTEE_SMC_CALL_GET_OS_UUID returns the particular OP-TEE implementation, used + to tell, for instance, a TrustZone OP-TEE apart from an OP-TEE running on a + separate secure co-processor. + +- OPTEE_SMC_CALL_WITH_ARG drives the OP-TEE message protocol + +- OPTEE_SMC_GET_SHM_CONFIG lets the driver and OP-TEE agree on which memory + range to used for shared memory between Linux and OP-TEE. + +The GlobalPlatform TEE Client API [5] is implemented on top of the generic +TEE API. + +Picture of the relationship between the different components in the +OP-TEE architecture. + + User space Kernel Secure world + ~~~~~~~~~~ ~~~~~~ ~~~~~~~~~~~~ + +--------+ +-------------+ + | Client | | Trusted | + +--------+ | Application | + /\ +-------------+ + || +----------+ /\ + || |tee- | || + || |supplicant| \/ + || +----------+ +-------------+ + \/ /\ | TEE Internal| + +-------+ || | API | + + TEE | || +--------+--------+ +-------------+ + | Client| || | TEE | OP-TEE | | OP-TEE | + | API | \/ | subsys | driver | | Trusted OS | + +-------+----------------+----+-------+----+-----------+-------------+ + | Generic TEE API | | OP-TEE MSG | + | IOCTL (TEE_IOC_*) | | SMCCC (OPTEE_SMC_CALL_*) | + +-----------------------------+ +------------------------------+ + +RPC (Remote Procedure Call) are requests from secure world to kernel driver +or tee-supplicant. An RPC is identified by a special range of SMCCC return +values from OPTEE_SMC_CALL_WITH_ARG. RPC messages which are intended for the +kernel are handled by the kernel driver. Other RPC messages will be forwarded to +tee-supplicant without further involvement of the driver, except switching +shared memory buffer representation. + +References: +[1] https://github.com/OP-TEE/optee_os +[2] http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html +[3] drivers/tee/optee/optee_smc.h +[4] drivers/tee/optee/optee_msg.h +[5] http://www.globalplatform.org/specificationsdevice.asp look for + "TEE Client API Specification v1.0" and click download. diff --git a/MAINTAINERS b/MAINTAINERS index 167a1a751339..c34c64ce4c6f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7955,6 +7955,11 @@ F: arch/*/oprofile/ F: drivers/oprofile/ F: include/linux/oprofile.h +OP-TEE DRIVER +M: Jens Wiklander <jens.wiklander@linaro.org> +S: Maintained +F: drivers/tee/optee/ + ORACLE CLUSTER FILESYSTEM 2 (OCFS2) M: Mark Fasheh <mfasheh@suse.com> M: Joel Becker <jlbec@evilplan.org> @@ -9382,6 +9387,14 @@ F: drivers/hwtracing/stm/ F: include/linux/stm.h F: include/uapi/linux/stm.h +TEE SUBSYSTEM +M: Jens Wiklander <jens.wiklander@linaro.org> +S: Maintained +F: include/linux/tee_drv.h +F: include/uapi/linux/tee.h +F: drivers/tee/ +F: Documentation/tee.txt + THUNDERBOLT DRIVER M: Andreas Noever <andreas.noever@gmail.com> S: Maintained @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 97 +SUBLEVEL = 105 EXTRAVERSION = NAME = Blurry Fish Butt diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index d23e2524d694..be9c37e89be1 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -142,10 +142,11 @@ }; scm_conf: scm_conf@0 { - compatible = "syscon"; + compatible = "syscon", "simple-bus"; reg = <0x0 0x800>; #address-cells = <1>; #size-cells = <1>; + ranges = <0 0 0x800>; scm_clocks: clocks { #address-cells = <1>; diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi index 7988b42e5764..c226c3d952d8 100644 --- a/arch/arm/boot/dts/dm814x.dtsi +++ b/arch/arm/boot/dts/dm814x.dtsi @@ -138,7 +138,7 @@ }; uart1: uart@20000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart1"; reg = <0x20000 0x2000>; clock-frequency = <48000000>; @@ -148,7 +148,7 @@ }; uart2: uart@22000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart2"; reg = <0x22000 0x2000>; clock-frequency = <48000000>; @@ -158,7 +158,7 @@ }; uart3: uart@24000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart3"; reg = <0x24000 0x2000>; clock-frequency = <48000000>; @@ -189,10 +189,11 @@ ranges = <0 0x160000 0x16d000>; scm_conf: scm_conf@0 { - compatible = "syscon"; + compatible = "syscon", "simple-bus"; reg = <0x0 0x800>; #address-cells = <1>; #size-cells = <1>; + ranges = <0 0 0x800>; scm_clocks: clocks { #address-cells = <1>; diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi index eee636de4cd8..e526928e6e96 100644 --- a/arch/arm/boot/dts/dm816x.dtsi +++ b/arch/arm/boot/dts/dm816x.dtsi @@ -347,7 +347,7 @@ }; uart1: uart@48020000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart1"; reg = <0x48020000 0x2000>; clock-frequency = <48000000>; @@ -357,7 +357,7 @@ }; uart2: uart@48022000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart2"; reg = <0x48022000 0x2000>; clock-frequency = <48000000>; @@ -367,7 +367,7 @@ }; uart3: uart@48024000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart3"; reg = <0x48024000 0x2000>; clock-frequency = <48000000>; diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts index 5b0430041ec6..fec92cd36ae3 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts +++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts @@ -88,7 +88,7 @@ interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>; pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins &mmc1_cd>; - cd-gpios = <&gpio4 31 IRQ_TYPE_LEVEL_LOW>; /* gpio127 */ + cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio127 */ vmmc-supply = <&vmmc1>; bus-width = <4>; cap-power-off-card; diff --git a/arch/arm/boot/dts/qcom/dsi-panel-lgd-incell-sw49106-fhd-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-lgd-incell-sw49106-fhd-video.dtsi new file mode 100644 index 000000000000..8db5317f2106 --- /dev/null +++ b/arch/arm/boot/dts/qcom/dsi-panel-lgd-incell-sw49106-fhd-video.dtsi @@ -0,0 +1,115 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&mdss_mdp { + dsi_lgd_incell_sw49106_fhd_video: + qcom,mdss_dsi_lgd_incell_sw49106_fhd_video { + qcom,mdss-dsi-panel-name = + "lgd incell sw49106 fhd video"; + qcom,mdss-dsi-panel-type = "dsi_video_mode"; + qcom,mdss-dsi-panel-framerate = <60>; + qcom,mdss-dsi-virtual-channel-id = <0>; + qcom,mdss-dsi-stream = <0>; + qcom,mdss-dsi-panel-width = <1080>; + qcom,mdss-dsi-panel-height = <2160>; + qcom,mdss-dsi-h-front-porch = <8>; + qcom,mdss-dsi-h-back-porch = <8>; + qcom,mdss-dsi-h-pulse-width = <4>; + qcom,mdss-dsi-h-sync-skew = <0>; + qcom,mdss-dsi-v-back-porch = <92>; + qcom,mdss-dsi-v-front-porch = <170>; + qcom,mdss-dsi-v-pulse-width = <1>; + qcom,mdss-dsi-h-left-border = <0>; + qcom,mdss-dsi-h-right-border = <0>; + qcom,mdss-dsi-v-top-border = <0>; + qcom,mdss-dsi-v-bottom-border = <0>; + qcom,mdss-dsi-bpp = <24>; + qcom,mdss-dsi-underflow-color = <0xff>; + qcom,mdss-dsi-border-color = <0>; + qcom,mdss-dsi-h-sync-pulse = <0>; + qcom,mdss-dsi-traffic-mode = "burst_mode"; + qcom,mdss-dsi-bllp-eof-power-mode; + qcom,mdss-dsi-bllp-power-mode; + qcom,mdss-dsi-lane-0-state; + qcom,mdss-dsi-lane-1-state; + qcom,mdss-dsi-lane-2-state; + qcom,mdss-dsi-lane-3-state; + qcom,mdss-dsi-panel-timings = [F8 3C 28 00 6E 72 2E + 40 30 03 04 00]; + qcom,mdss-dsi-t-clk-post = <0x02>; + qcom,mdss-dsi-t-clk-pre = <0x2D>; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-dma-trigger = "trigger_sw"; + qcom,mdss-dsi-mdp-trigger = "none"; + qcom,mdss-dsi-on-command = [05 01 00 00 0B 00 02 35 00 + 15 01 00 00 00 00 02 36 00 + 15 01 00 00 00 00 02 51 FF + 15 01 00 00 00 00 02 53 24 + 15 01 00 00 00 00 02 55 80 + 39 01 00 00 00 00 02 B0 AC + 39 01 00 00 00 00 06 B1 46 00 80 14 85 + 39 01 00 00 00 00 08 B3 05 08 14 00 1C 00 02 + 39 01 00 00 00 00 10 B4 83 08 00 04 04 04 04 00 + 00 00 00 00 00 00 00 + 39 01 00 00 00 00 13 B5 03 1E 0B 02 29 00 00 00 + 00 04 00 24 00 10 10 10 10 00 + 39 01 00 00 00 00 0A B6 00 72 39 13 08 67 00 60 46 + 39 01 00 00 00 00 05 B7 00 50 37 04 + 39 01 00 00 00 00 0C B8 70 38 14 ED 08 04 00 01 + 0A A0 00 + 39 01 00 00 00 00 06 C0 8A 8F 18 C1 12 + 39 01 00 00 00 00 07 C1 01 00 30 C2 C7 0F + 39 01 00 00 00 00 03 C2 2A 00 + 39 01 00 00 00 00 07 C3 05 0E 0E 50 88 09 + 39 01 00 00 00 00 04 C4 A2 E8 F4 + 39 01 00 00 00 00 05 C5 C2 2A 4E 08 + 39 01 00 00 00 00 03 C6 15 01 + 39 01 00 00 00 00 07 CA 00 00 03 84 55 F5 + 39 01 00 00 00 00 03 CB 3F A0 + 39 01 00 00 00 00 09 CC F0 03 10 55 11 FC 34 34 + 39 01 00 00 00 00 07 CD 11 50 50 90 00 F3 + 39 01 00 00 00 00 07 CE A0 28 28 34 00 AB + 39 01 00 00 00 00 10 D0 10 1B 22 2A 35 42 4A 53 4D + 44 34 23 10 03 81 + 39 01 00 00 00 00 10 D1 09 15 1C 25 31 3F 47 52 4F + 45 34 22 0E 01 83 + 39 01 00 00 00 00 10 D2 10 1B 22 29 34 41 49 52 4E + 44 34 23 10 03 81 + 39 01 00 00 00 00 10 D3 09 15 1C 24 30 3E 46 51 50 + 45 34 22 0E 01 83 + 39 01 00 00 00 00 10 D4 10 1B 22 2A 35 42 4A 53 4D + 44 34 23 10 03 81 + 39 01 00 00 00 00 10 D5 09 15 1C 25 31 3F 47 52 4F + 45 34 22 0E 01 83 + 39 01 00 00 00 00 0D E5 24 23 11 10 00 0A 08 06 04 + 11 0E 23 + 39 01 00 00 00 00 0D E6 24 23 11 10 01 0B 09 07 05 + 11 0E 23 + 39 01 00 00 00 00 07 E7 15 16 17 18 19 1A + 39 01 00 00 00 00 07 E8 1B 1C 1D 1E 1F 20 + 39 01 00 00 00 00 05 ED 00 01 53 0C + 39 01 00 00 00 00 03 F0 B2 00 + 39 01 00 00 00 00 05 F2 01 00 17 00 + 39 01 00 00 64 00 07 F3 00 50 90 C9 00 01 + 05 01 00 00 78 00 02 11 00 + 05 01 00 00 05 00 02 29 00]; + qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 28 00 + 05 01 00 00 64 00 02 10 00]; + qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; + qcom,mdss-dsi-off-command-state = "dsi_lp_mode"; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-reset-sequence = <1 200>, <0 200>, <1 200>; + qcom,mdss-dsi-tx-eot-append; + qcom,mdss-dsi-post-init-delay = <1>; + }; +}; diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi index 83f0bbe86410..eb1f821234ba 100644 --- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -815,7 +815,8 @@ mmagic-supply = <&gdsc_mmagic_camss>; gdscr-supply = <&gdsc_camss_top>; vfe0-vdd-supply = <&gdsc_vfe0>; - qcom,cam-vreg-name = "mmagic", "gdscr", "vfe0-vdd"; + vfe1-vdd-supply = <&gdsc_vfe1>; + qcom,cam-vreg-name = "mmagic", "gdscr", "vfe0-vdd", "vfe1-vdd"; clocks = <&clock_mmss clk_mmss_mmagic_ahb_clk>, <&clock_mmss clk_camss_top_ahb_clk>, <&clock_mmss clk_cci_clk_src>, @@ -825,12 +826,16 @@ <&clock_mmss clk_mmagic_camss_axi_clk>, <&clock_mmss clk_camss_vfe_ahb_clk>, <&clock_mmss clk_camss_vfe0_ahb_clk>, + <&clock_mmss clk_camss_vfe1_ahb_clk>, <&clock_mmss clk_camss_vfe_axi_clk>, <&clock_mmss clk_camss_vfe0_stream_clk>, + <&clock_mmss clk_camss_vfe1_stream_clk>, <&clock_mmss clk_smmu_vfe_axi_clk>, <&clock_mmss clk_smmu_vfe_ahb_clk>, <&clock_mmss clk_camss_csi_vfe0_clk>, + <&clock_mmss clk_camss_csi_vfe1_clk>, <&clock_mmss clk_vfe0_clk_src>, + <&clock_mmss clk_vfe1_clk_src>, <&clock_mmss clk_camss_csi_vfe0_clk>, <&clock_mmss clk_camss_csi2_ahb_clk>, <&clock_mmss clk_camss_csi2_clk>, @@ -839,7 +844,8 @@ <&clock_mmss clk_camss_csi2phytimer_clk>, <&clock_mmss clk_camss_csi2rdi_clk>, <&clock_mmss clk_camss_ispif_ahb_clk>, - <&clock_mmss clk_camss_vfe0_clk>; + <&clock_mmss clk_camss_vfe0_clk>, + <&clock_mmss clk_camss_vfe1_clk>; clock-names = "mmss_mmagic_ahb_clk", "camss_top_ahb_clk", @@ -850,12 +856,16 @@ "mmagic_camss_axi_clk", "camss_vfe_ahb_clk", "camss_vfe0_ahb_clk", + "camss_vfe1_ahb_clk", "camss_vfe_axi_clk", "camss_vfe0_stream_clk", + "camss_vfe1_stream_clk", "smmu_vfe_axi_clk", "smmu_vfe_ahb_clk", "camss_csi_vfe0_clk", + "camss_csi_vfe1_clk", "vfe0_clk_src", + "vfe1_clk_src", "camss_csi_vfe0_clk", "camss_csi2_ahb_clk", "camss_csi2_clk", @@ -864,7 +874,8 @@ "camss_csi2phytimer_clk", "camss_csi2rdi_clk", "camss_ispif_ahb_clk", - "clk_camss_vfe0_clk"; + "clk_camss_vfe0_clk", + "clk_camss_vfe1_clk"; qcom,clock-rates = <19200000 19200000 @@ -875,12 +886,16 @@ 0 0 0 + 0 320000000 0 0 0 0 - 19200000 + 0 + 0 + 320000000 + 320000000 0 0 200000000 @@ -889,6 +904,7 @@ 200000000 200000000 0 + 100000000 100000000>; }; @@ -1158,6 +1174,10 @@ <&pm8994_gpios 7 0>; /* INT3 */ }; + qcom,tv-tuner { + compatible = "qcom,tv-tuner"; + }; + qcom,msm-ba { compatible = "qcom,msm-ba"; qcom,ba-input-profile-0 { @@ -1179,6 +1199,16 @@ qcom,ba-node = <1>; /* ba node */ qcom,user-type = <1>; /* user type */ }; + + qcom,ba-input-profile-2 { + qcom,type = <8>; /* input type */ + qcom,name = "TUNER-2"; /* input name */ + qcom,ba-input = <16>; /* ba input id */ + qcom,ba-output = <0>; /* ba output id */ + qcom,sd-name = "tv-tuner"; /* sd name */ + qcom,ba-node = <2>; /* ba node */ + qcom,user-type = <1>; /* user type */ + }; }; }; diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi index e4d18854f361..8eee428355db 100644 --- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -580,7 +580,8 @@ mmagic-supply = <&gdsc_mmagic_camss>; gdscr-supply = <&gdsc_camss_top>; vfe0-vdd-supply = <&gdsc_vfe0>; - qcom,cam-vreg-name = "mmagic", "gdscr", "vfe0-vdd"; + vfe1-vdd-supply = <&gdsc_vfe1>; + qcom,cam-vreg-name = "mmagic", "gdscr", "vfe0-vdd", "vfe1-vdd"; clocks = <&clock_mmss clk_mmss_mmagic_ahb_clk>, <&clock_mmss clk_camss_top_ahb_clk>, <&clock_mmss clk_cci_clk_src>, @@ -590,12 +591,16 @@ <&clock_mmss clk_mmagic_camss_axi_clk>, <&clock_mmss clk_camss_vfe_ahb_clk>, <&clock_mmss clk_camss_vfe0_ahb_clk>, + <&clock_mmss clk_camss_vfe1_ahb_clk>, <&clock_mmss clk_camss_vfe_axi_clk>, <&clock_mmss clk_camss_vfe0_stream_clk>, + <&clock_mmss clk_camss_vfe1_stream_clk>, <&clock_mmss clk_smmu_vfe_axi_clk>, <&clock_mmss clk_smmu_vfe_ahb_clk>, <&clock_mmss clk_camss_csi_vfe0_clk>, + <&clock_mmss clk_camss_csi_vfe1_clk>, <&clock_mmss clk_vfe0_clk_src>, + <&clock_mmss clk_vfe1_clk_src>, <&clock_mmss clk_camss_csi_vfe0_clk>, <&clock_mmss clk_camss_csi2_ahb_clk>, <&clock_mmss clk_camss_csi2_clk>, @@ -604,7 +609,8 @@ <&clock_mmss clk_camss_csi2phytimer_clk>, <&clock_mmss clk_camss_csi2rdi_clk>, <&clock_mmss clk_camss_ispif_ahb_clk>, - <&clock_mmss clk_camss_vfe0_clk>; + <&clock_mmss clk_camss_vfe0_clk>, + <&clock_mmss clk_camss_vfe1_clk>; clock-names = "mmss_mmagic_ahb_clk", "camss_top_ahb_clk", @@ -615,12 +621,16 @@ "mmagic_camss_axi_clk", "camss_vfe_ahb_clk", "camss_vfe0_ahb_clk", + "camss_vfe1_ahb_clk", "camss_vfe_axi_clk", "camss_vfe0_stream_clk", + "camss_vfe1_stream_clk", "smmu_vfe_axi_clk", "smmu_vfe_ahb_clk", "camss_csi_vfe0_clk", + "camss_csi_vfe1_clk", "vfe0_clk_src", + "vfe1_clk_src", "camss_csi_vfe0_clk", "camss_csi2_ahb_clk", "camss_csi2_clk", @@ -629,7 +639,8 @@ "camss_csi2phytimer_clk", "camss_csi2rdi_clk", "camss_ispif_ahb_clk", - "clk_camss_vfe0_clk"; + "clk_camss_vfe0_clk", + "clk_camss_vfe1_clk"; qcom,clock-rates = <19200000 19200000 @@ -640,12 +651,16 @@ 0 0 0 + 0 320000000 0 0 0 0 - 19200000 + 0 + 0 + 320000000 + 320000000 0 0 200000000 @@ -654,6 +669,7 @@ 200000000 200000000 0 + 100000000 100000000>; }; diff --git a/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi b/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi index 4d05ea75b576..7db93928a369 100644 --- a/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -184,6 +184,13 @@ qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>; }; +&dsi_lgd_incell_sw49106_fhd_video { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +}; + &mdss_dp_ctrl { pinctrl-names = "mdss_dp_active", "mdss_dp_sleep"; pinctrl-0 = <&mdss_dp_aux_active &mdss_dp_usbplug_cc_active>; diff --git a/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi index 3ffd43bcda60..2cf4a1378778 100644 --- a/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -25,6 +25,7 @@ #include "dsi-panel-truly-1080p-cmd.dtsi" #include "dsi-panel-truly-1080p-video.dtsi" #include "dsi-panel-rm67195-amoled-fhd-cmd.dtsi" +#include "dsi-panel-lgd-incell-sw49106-fhd-video.dtsi" &soc { dsi_panel_pwr_supply: dsi_panel_pwr_supply { @@ -325,3 +326,14 @@ qcom,mdss-dsi-t-clk-post = <0x0d>; qcom,mdss-dsi-t-clk-pre = <0x2f>; }; + + +&dsi_lgd_incell_sw49106_fhd_video { + qcom,mdss-dsi-panel-timings-phy-v2 = [24 1f 08 09 05 03 04 a0 + 24 1f 08 09 05 03 04 a0 + 24 1f 08 09 05 03 04 a0 + 24 1f 08 09 05 03 04 a0 + 24 1b 08 09 05 03 04 a0]; + qcom,mdss-dsi-t-clk-post = <0x0d>; + qcom,mdss-dsi-t-clk-pre = <0x30>; +}; diff --git a/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi index 50f5d83346c6..8b1596325889 100644 --- a/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -184,6 +184,13 @@ qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>; }; +&dsi_lgd_incell_sw49106_fhd_video { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +}; + &sdhc_1 { /* device core power supply */ vdd-supply = <&pm660l_l4>; diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-modem.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-modem.dtsi index 67c53e450134..1be84c16b80a 100644 --- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-modem.dtsi +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-modem.dtsi @@ -105,13 +105,13 @@ "halt_nc", "rmb_base", "restart_reg"; clocks = <&clock_gcc clk_cxo_clk_src>, - <&clock_gcc clk_gcc_mss_cfg_ahb_clk>, + <&clock_virt clk_gcc_mss_cfg_ahb_clk>, <&clock_gcc clk_pnoc_clk>, - <&clock_gcc clk_gcc_mss_q6_bimc_axi_clk>, - <&clock_gcc clk_gcc_boot_rom_ahb_clk>, - <&clock_gcc clk_gpll0_out_msscc>, - <&clock_gcc clk_gcc_mss_snoc_axi_clk>, - <&clock_gcc clk_gcc_mss_mnoc_bimc_axi_clk>, + <&clock_virt clk_gcc_mss_q6_bimc_axi_clk>, + <&clock_virt clk_gcc_boot_rom_ahb_clk>, + <&clock_virt clk_gpll0_out_msscc>, + <&clock_virt clk_gcc_mss_snoc_axi_clk>, + <&clock_virt clk_gcc_mss_mnoc_bimc_axi_clk>, <&clock_gcc clk_qdss_clk>; clock-names = "xo", "iface_clk", "pnoc_clk", "bus_clk", "mem_clk", "gpll0_mss_clk", "snoc_axi_clk", diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts index 37528ab0625e..187648f50f59 100644 --- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -113,6 +113,24 @@ /* Up to 800 Mbps */ <45 512 207108 14432000>; }; + + dsrc_vreg: dsrc_vreg { + compatible = "qcom,stub-regulator"; + regulator-name = "dsrc_vreg"; + startup-delay-us = <2410>; + enable-active-high; + gpio = <&tlmm 125 0>; + }; + + qcom,cnss_sdio { + compatible = "qcom,cnss_sdio"; + subsys-name = "AR6320_SDIO"; + vdd-wlan-supply = <&rome_vreg>; + vdd-wlan-xtal-supply = <&pm8994_l30>; + vdd-wlan-io-supply = <&pm8994_s4>; + vdd-wlan-dsrc-supply = <&dsrc_vreg>; + qcom,skip-wlan-en-toggle; + }; }; &spi_9 { diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-smmu.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-smmu.dtsi index 2076ba08280e..04dae361cade 100644 --- a/arch/arm/boot/dts/qcom/vplatform-lfv-smmu.dtsi +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-smmu.dtsi @@ -33,7 +33,7 @@ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>; vdd-supply = <&gdsc_hlos1_vote_lpass_adsp>; - clocks = <&clock_gcc clk_hlos1_vote_lpass_adsp_smmu_clk>; + clocks = <&clock_virt clk_hlos1_vote_lpass_adsp_smmu_clk>; clock-names = "lpass_q6_smmu_clocks"; #clock-cells = <1>; }; diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index c5e1943e5427..09ebd37e01e0 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -221,6 +221,7 @@ CONFIG_SERIO=m CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=6 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_SHARE_IRQ=y diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c index 6d685298690e..648d5fac9cbf 100644 --- a/arch/arm/crypto/aesbs-glue.c +++ b/arch/arm/crypto/aesbs-glue.c @@ -357,7 +357,7 @@ static struct crypto_alg aesbs_algs[] = { { }, { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-neonbs", - .cra_priority = 300, + .cra_priority = 250, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), @@ -377,7 +377,7 @@ static struct crypto_alg aesbs_algs[] = { { }, { .cra_name = "ctr(aes)", .cra_driver_name = "ctr-aes-neonbs", - .cra_priority = 300, + .cra_priority = 250, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct async_helper_ctx), @@ -397,7 +397,7 @@ static struct crypto_alg aesbs_algs[] = { { }, { .cra_name = "xts(aes)", .cra_driver_name = "xts-aes-neonbs", - .cra_priority = 300, + .cra_priority = 250, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index cdefc69c656b..75a371951f1a 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -133,30 +133,26 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, set_fs(fs); } -static void dump_instr(const char *lvl, struct pt_regs *regs) +static void __dump_instr(const char *lvl, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); const int thumb = thumb_mode(regs); const int width = thumb ? 4 : 8; - mm_segment_t fs; char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; int i; /* - * We need to switch to kernel mode so that we can use __get_user - * to safely read from kernel space. Note that we now dump the - * code first, just in case the backtrace kills us. + * Note that we now dump the code first, just in case the backtrace + * kills us. */ - fs = get_fs(); - set_fs(KERNEL_DS); for (i = -4; i < 1 + !!thumb; i++) { unsigned int val, bad; if (thumb) - bad = __get_user(val, &((u16 *)addr)[i]); + bad = get_user(val, &((u16 *)addr)[i]); else - bad = __get_user(val, &((u32 *)addr)[i]); + bad = get_user(val, &((u32 *)addr)[i]); if (!bad) p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ", @@ -167,8 +163,20 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) } } printk("%sCode: %s\n", lvl, str); +} - set_fs(fs); +static void dump_instr(const char *lvl, struct pt_regs *regs) +{ + mm_segment_t fs; + + if (!user_mode(regs)) { + fs = get_fs(); + set_fs(KERNEL_DS); + __dump_instr(lvl, regs); + set_fs(fs); + } else { + __dump_instr(lvl, regs); + } } #ifdef CONFIG_ARM_UNWIND diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c index 7b02ed218a42..0c120b2ea2f9 100644 --- a/arch/arm/mach-omap1/dma.c +++ b/arch/arm/mach-omap1/dma.c @@ -31,7 +31,6 @@ #include "soc.h" #define OMAP1_DMA_BASE (0xfffed800) -#define OMAP1_LOGICAL_DMA_CH_COUNT 17 static u32 enable_1510_mode; @@ -311,8 +310,6 @@ static int __init omap1_system_dma_init(void) goto exit_iounmap; } - d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT; - /* Valid attributes for omap1 plus processors */ if (cpu_is_omap15xx()) d->dev_caps = ENABLE_1510_MODE; @@ -329,13 +326,14 @@ static int __init omap1_system_dma_init(void) d->dev_caps |= CLEAR_CSR_ON_READ; d->dev_caps |= IS_WORD_16; - if (cpu_is_omap15xx()) - d->chan_count = 9; - else if (cpu_is_omap16xx() || cpu_is_omap7xx()) { - if (!(d->dev_caps & ENABLE_1510_MODE)) - d->chan_count = 16; + /* available logical channels */ + if (cpu_is_omap15xx()) { + d->lch_count = 9; + } else { + if (d->dev_caps & ENABLE_1510_MODE) + d->lch_count = 9; else - d->chan_count = 9; + d->lch_count = 16; } p = dma_plat_info; diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 58144779dec4..1e6e09841707 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -522,7 +522,6 @@ static void pdata_quirks_check(struct pdata_init *quirks) if (of_machine_is_compatible(quirks->compatible)) { if (quirks->fn) quirks->fn(); - break; } quirks++; } diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 9fe8e241335c..e1f6f0daa847 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -126,8 +126,8 @@ static const struct prot_bits section_bits[] = { .val = PMD_SECT_USER, .set = "USR", }, { - .mask = L_PMD_SECT_RDONLY, - .val = L_PMD_SECT_RDONLY, + .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2, + .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2, .set = "ro", .clear = "RW", #elif __LINUX_ARM_ARCH__ >= 6 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index d3d718772381..4d58a6eca48e 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -669,8 +669,8 @@ static struct section_perm ro_perms[] = { .start = (unsigned long)_stext, .end = (unsigned long)__init_begin, #ifdef CONFIG_ARM_LPAE - .mask = ~L_PMD_SECT_RDONLY, - .prot = L_PMD_SECT_RDONLY, + .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2), + .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2, #else .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3cb501b93da6..4c72ce5955d9 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -104,6 +104,7 @@ config ARM64 select HAVE_CONTEXT_TRACKING select HAVE_ARM_SMCCC select THREAD_INFO_IN_TASK + select HAVE_ARM_SMCCC help ARM 64-bit (AArch64) Linux support. diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi index 3c92d92278e5..a14a6bb31887 100644 --- a/arch/arm64/boot/dts/broadcom/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi @@ -30,6 +30,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +/memreserve/ 0x81000000 0x00200000; + #include <dt-bindings/interrupt-controller/arm-gic.h> /memreserve/ 0x84b00000 0x00000008; diff --git a/arch/arm64/configs/msm-auto-perf_defconfig b/arch/arm64/configs/msm-auto-perf_defconfig index bb0b8f71963c..1f9e8ac9a446 100644 --- a/arch/arm64/configs/msm-auto-perf_defconfig +++ b/arch/arm64/configs/msm-auto-perf_defconfig @@ -386,6 +386,7 @@ CONFIG_MSM_AIS_DEBUG=y CONFIG_MSM_AIS_CAMERA_SENSOR=y # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set CONFIG_VIDEO_ADV7481=m +CONFIG_VIDEO_TVTUNER=m CONFIG_QCOM_KGSL=y CONFIG_DRM=y CONFIG_MSM_BA_V4L2=y diff --git a/arch/arm64/configs/msm-auto_defconfig b/arch/arm64/configs/msm-auto_defconfig index b72807cc8644..36833b167c30 100644 --- a/arch/arm64/configs/msm-auto_defconfig +++ b/arch/arm64/configs/msm-auto_defconfig @@ -390,6 +390,7 @@ CONFIG_MSM_AIS_DEBUG=y CONFIG_MSM_AIS_CAMERA_SENSOR=y # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set CONFIG_VIDEO_ADV7481=m +CONFIG_VIDEO_TVTUNER=m CONFIG_QCOM_KGSL=y CONFIG_DRM=y CONFIG_MSM_BA_V4L2=y diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 320dc9c7e4f4..c6aae0b85cef 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -266,7 +266,7 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); } -#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) +#define kvm_virt_to_phys(x) __pa_symbol(x) void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index ae11e8fdbfd2..915b2422d9d0 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -188,6 +188,7 @@ static inline void *phys_to_virt(phys_addr_t x) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys(x)) +#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) /* * virt_to_page(k) convert a _valid_ virtual address to struct page * diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index ba8343303284..2e36504f56b6 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -55,7 +55,7 @@ static inline void contextidr_thread_switch(struct task_struct *next) */ static inline void cpu_set_reserved_ttbr0(void) { - unsigned long ttbr = virt_to_phys(empty_zero_page); + unsigned long ttbr = __pa_symbol(empty_zero_page); asm( " msr ttbr0_el1, %0 // set TTBR0\n" @@ -129,7 +129,7 @@ static inline void cpu_install_idmap(void) local_flush_tlb_all(); cpu_set_idmap_tcr_t0sz(); - cpu_switch_mm(idmap_pg_dir, &init_mm); + cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm); } /* @@ -144,7 +144,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgd) phys_addr_t pgd_phys = virt_to_phys(pgd); - replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1); + replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); cpu_install_idmap(); replace_phys(pgd_phys); diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index ecd7dc14330c..6c3848f50fcc 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -120,7 +120,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); * for zero-mapped memory areas etc.. */ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; -#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) +#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c index 4b1e5a7a98da..89c96bd1aab9 100644 --- a/arch/arm64/kernel/acpi_parking_protocol.c +++ b/arch/arm64/kernel/acpi_parking_protocol.c @@ -17,6 +17,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/acpi.h> +#include <linux/mm.h> #include <linux/types.h> #include <asm/cpu_ops.h> @@ -102,7 +103,7 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu) * that read this address need to convert this address to the * Boot-Loader's endianness before jumping. */ - writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point); + writeq_relaxed(__pa_symbol(secondary_entry), &mailbox->entry_point); writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id); arch_send_wakeup_ipi_mask(cpumask_of(cpu)); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f75000996e4c..3beb2b5cad6f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -23,6 +23,7 @@ #include <linux/sort.h> #include <linux/stop_machine.h> #include <linux/types.h> +#include <linux/mm.h> #include <asm/cpu.h> #include <asm/cpufeature.h> #include <asm/cpu_ops.h> diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 5f72243e5ba7..a3f8f8bbfc92 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -98,7 +98,7 @@ static void __kprobes *patch_map(void *addr, int fixmap) page = vmalloc_to_page(addr); else if (!module && (IS_ENABLED(CONFIG_DEBUG_RODATA) || IS_ENABLED(CONFIG_KERNEL_TEXT_RDONLY))) - page = virt_to_page(addr); + page = phys_to_page(__pa_symbol(addr)); else return addr; diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c index 471fb3cb8c5f..d43ea93dc68d 100644 --- a/arch/arm64/kernel/io.c +++ b/arch/arm64/kernel/io.c @@ -26,8 +26,7 @@ */ void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) { - while (count && (!IS_ALIGNED((unsigned long)from, 8) || - !IS_ALIGNED((unsigned long)to, 8))) { + while (count && !IS_ALIGNED((unsigned long)from, 8)) { *(u8 *)to = __raw_readb_no_log(from); from++; to++; @@ -55,23 +54,22 @@ EXPORT_SYMBOL(__memcpy_fromio); */ void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) { - while (count && (!IS_ALIGNED((unsigned long)to, 8) || - !IS_ALIGNED((unsigned long)from, 8))) { - __raw_writeb_no_log(*(volatile u8 *)from, to); + while (count && !IS_ALIGNED((unsigned long)to, 8)) { + __raw_writeb_no_log(*(u8 *)from, to); from++; to++; count--; } while (count >= 8) { - __raw_writeq_no_log(*(volatile u64 *)from, to); + __raw_writeq_no_log(*(u64 *)from, to); from += 8; to += 8; count -= 8; } while (count) { - __raw_writeb_no_log(*(volatile u8 *)from, to); + __raw_writeb_no_log(*(u8 *)from, to); from++; to++; count--; diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index b9e7c42cd8eb..9006af285c39 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -19,6 +19,7 @@ #include <linux/of.h> #include <linux/smp.h> #include <linux/delay.h> +#include <linux/mm.h> #include <linux/psci.h> #include <uapi/linux/psci.h> @@ -46,7 +47,8 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu) static int cpu_psci_cpu_boot(unsigned int cpu) { - int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry)); + int err = psci_ops.cpu_on(cpu_logical_map(cpu), + __pa_symbol(secondary_entry)); if (err) pr_err("failed to boot CPU%d (%d)\n", cpu, err); diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index b8b40d95ebef..0b93365e8cf0 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -45,6 +45,7 @@ #include <linux/efi.h> #include <linux/psci.h> #include <linux/dma-mapping.h> +#include <linux/mm.h> #include <asm/acpi.h> #include <asm/fixmap.h> @@ -212,10 +213,10 @@ static void __init request_standard_resources(void) struct memblock_region *region; struct resource *res; - kernel_code.start = virt_to_phys(_text); - kernel_code.end = virt_to_phys(__init_begin - 1); - kernel_data.start = virt_to_phys(_sdata); - kernel_data.end = virt_to_phys(_end - 1); + kernel_code.start = __pa_symbol(_text); + kernel_code.end = __pa_symbol(__init_begin - 1); + kernel_data.start = __pa_symbol(_sdata); + kernel_data.end = __pa_symbol(_end - 1); for_each_memblock(memory, region) { res = alloc_bootmem_low(sizeof(*res)); @@ -367,9 +368,9 @@ void __init setup_arch(char **cmdline_p) * thread. */ #ifdef CONFIG_THREAD_INFO_IN_TASK - init_task.thread_info.ttbr0 = virt_to_phys(empty_zero_page); + init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page); #else - init_thread_info.ttbr0 = virt_to_phys(empty_zero_page); + init_thread_info.ttbr0 = __pa_symbol(empty_zero_page); #endif #endif diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index aef3605a8c47..2ccb883353d9 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -21,6 +21,7 @@ #include <linux/of.h> #include <linux/smp.h> #include <linux/types.h> +#include <linux/mm.h> #include <asm/cacheflush.h> #include <asm/cpu_ops.h> @@ -96,7 +97,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu) * boot-loader's endianess before jumping. This is mandated by * the boot protocol. */ - writeq_relaxed(__pa(secondary_holding_pen), release_addr); + writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr); __flush_dcache_area((__force void *)release_addr, sizeof(*release_addr)); diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 3b8acfae7797..7e9dd94452bb 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -114,6 +114,7 @@ static struct vm_special_mapping vdso_spec[2]; static int __init vdso_init(void) { int i; + unsigned long pfn; if (memcmp(&vdso_start, "\177ELF", 4)) { pr_err("vDSO is not a valid ELF object!\n"); @@ -131,11 +132,14 @@ static int __init vdso_init(void) return -ENOMEM; /* Grab the vDSO data page. */ - vdso_pagelist[0] = virt_to_page(vdso_data); + vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data)); + /* Grab the vDSO code pages. */ + pfn = sym_to_pfn(&vdso_start); + for (i = 0; i < vdso_pages; i++) - vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE); + vdso_pagelist[i + 1] = pfn_to_page(pfn + i); /* Populate the special mapping structures */ vdso_spec[0] = (struct vm_special_mapping) { @@ -214,8 +218,8 @@ void update_vsyscall(struct timekeeper *tk) if (!use_syscall) { /* tkr_mono.cycle_last == tkr_raw.cycle_last */ vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; - vdso_data->raw_time_sec = tk->raw_time.tv_sec; - vdso_data->raw_time_nsec = tk->raw_time.tv_nsec; + vdso_data->raw_time_sec = tk->raw_sec; + vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec; vdso_data->xtime_clock_sec = tk->xtime_sec; vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; /* tkr_raw.xtime_nsec == 0 */ diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index e00b4671bd7c..c97ce91cf023 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -310,7 +310,7 @@ ENTRY(__kernel_clock_getres) b.ne 4f ldr x2, 6f 2: - cbz w1, 3f + cbz x1, 3f stp xzr, x2, [x1] 3: /* res == NULL. */ diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 7fd74d55c68e..30f1a600a7c9 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -34,6 +34,7 @@ #include <linux/dma-contiguous.h> #include <linux/efi.h> #include <linux/swiotlb.h> +#include <linux/mm.h> #include <asm/boot.h> #include <asm/fixmap.h> @@ -191,8 +192,8 @@ void __init arm64_memblock_init(void) * linear mapping. Take care not to clip the kernel which may be * high in memory. */ - memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)), - ULLONG_MAX); + memblock_remove(max_t(u64, memstart_addr + linear_region_size, + __pa_symbol(_end)), ULLONG_MAX); if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) { /* ensure that memstart_addr remains sufficiently aligned */ memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, @@ -212,7 +213,7 @@ void __init arm64_memblock_init(void) */ bootloader_memory_limit = memblock_end_of_DRAM(); memblock_enforce_memory_limit(memory_limit); - memblock_add(__pa(_text), (u64)(_end - _text)); + memblock_add(__pa_symbol(_text), (u64)(_end - _text)); } if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { @@ -236,7 +237,7 @@ void __init arm64_memblock_init(void) * Register the kernel text, kernel data, initrd, and initial * pagetables with memblock. */ - memblock_reserve(__pa(_text), _end - _text); + memblock_reserve(__pa_symbol(_text), _end - _text); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) { memblock_reserve(initrd_start, initrd_end - initrd_start); diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 757009daa9ed..03588d136f93 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/memblock.h> #include <linux/start_kernel.h> +#include <linux/mm.h> #include <asm/mmu_context.h> #include <asm/kernel-pgtable.h> @@ -26,6 +27,13 @@ static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); +/* + * The p*d_populate functions call virt_to_phys implicitly so they can't be used + * directly on kernel symbols (bm_p*d). All the early functions are called too + * early to use lm_alias so __p*d_populate functions must be used to populate + * with the physical address from __pa_symbol. + */ + static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end) { @@ -33,12 +41,13 @@ static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long next; if (pmd_none(*pmd)) - pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); + __pmd_populate(pmd, __pa_symbol(kasan_zero_pte), + PMD_TYPE_TABLE); pte = pte_offset_kimg(pmd, addr); do { next = addr + PAGE_SIZE; - set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page), + set_pte(pte, pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL)); } while (pte++, addr = next, addr != end && pte_none(*pte)); } @@ -51,7 +60,8 @@ static void __init kasan_early_pmd_populate(pud_t *pud, unsigned long next; if (pud_none(*pud)) - pud_populate(&init_mm, pud, kasan_zero_pmd); + __pud_populate(pud, __pa_symbol(kasan_zero_pmd), + PMD_TYPE_TABLE); pmd = pmd_offset_kimg(pud, addr); do { @@ -68,7 +78,8 @@ static void __init kasan_early_pud_populate(pgd_t *pgd, unsigned long next; if (pgd_none(*pgd)) - pgd_populate(&init_mm, pgd, kasan_zero_pud); + __pgd_populate(pgd, __pa_symbol(kasan_zero_pud), + PUD_TYPE_TABLE); pud = pud_offset_kimg(pgd, addr); do { @@ -148,7 +159,7 @@ void __init kasan_init(void) */ memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir)); dsb(ishst); - cpu_replace_ttbr1(tmp_pg_dir); + cpu_replace_ttbr1(lm_alias(tmp_pg_dir)); clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); @@ -199,10 +210,10 @@ void __init kasan_init(void) */ for (i = 0; i < PTRS_PER_PTE; i++) set_pte(&kasan_zero_pte[i], - pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO)); + pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO)); memset(kasan_zero_page, 0, PAGE_SIZE); - cpu_replace_ttbr1(swapper_pg_dir); + cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); /* At this point kasan is fully initialized. Enable error messages */ init_task.kasan_depth = 0; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 8c063d39bc17..b1411e933bb3 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -31,6 +31,7 @@ #include <linux/stop_machine.h> #include <linux/dma-contiguous.h> #include <linux/cma.h> +#include <linux/mm.h> #include <asm/barrier.h> #include <asm/cputype.h> @@ -391,8 +392,8 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt, static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end) { - unsigned long kernel_start = __pa(_text); - unsigned long kernel_end = __pa(__init_begin); + unsigned long kernel_start = __pa_symbol(_text); + unsigned long kernel_end = __pa_symbol(__init_begin); /* * Take care not to create a writable alias for the @@ -456,14 +457,15 @@ void mark_rodata_ro(void) unsigned long section_size; section_size = (unsigned long)_etext - (unsigned long)_text; - create_mapping_late(__pa(_text), (unsigned long)_text, + create_mapping_late(__pa_symbol(_text), (unsigned long)_text, section_size, PAGE_KERNEL_ROX); /* * mark .rodata as read only. Use __init_begin rather than __end_rodata * to cover NOTES and EXCEPTION_TABLE. */ section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; - create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata, + create_mapping_late(__pa_symbol(__start_rodata), + (unsigned long)__start_rodata, section_size, PAGE_KERNEL_RO); } @@ -480,7 +482,7 @@ void fixup_init(void) static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, pgprot_t prot, struct vm_struct *vma) { - phys_addr_t pa_start = __pa(va_start); + phys_addr_t pa_start = __pa_symbol(va_start); unsigned long size = va_end - va_start; BUG_ON(!PAGE_ALIGNED(pa_start)); @@ -528,7 +530,7 @@ static void __init map_kernel(pgd_t *pgd) */ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START), - __pud(__pa(bm_pmd) | PUD_TYPE_TABLE)); + __pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE)); pud_clear_fixmap(); } else { BUG(); @@ -590,7 +592,7 @@ void __init paging_init(void) */ cpu_replace_ttbr1(__va(pgd_phys)); memcpy(swapper_pg_dir, pgd, PAGE_SIZE); - cpu_replace_ttbr1(swapper_pg_dir); + cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); pgd_clear_fixmap(); memblock_free(pgd_phys, PAGE_SIZE); @@ -599,7 +601,7 @@ void __init paging_init(void) * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd * allocated with it. */ - memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE, + memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE, SWAPPER_DIR_SIZE - PAGE_SIZE); bootmem_init(); @@ -1141,6 +1143,12 @@ static inline pte_t * fixmap_pte(unsigned long addr) return &bm_pte[pte_index(addr)]; } +/* + * The p*d_populate functions call virt_to_phys implicitly so they can't be used + * directly on kernel symbols (bm_p*d). This function is called too early to use + * lm_alias so __p*d_populate functions must be used to populate with the + * physical address from __pa_symbol. + */ void __init early_fixmap_init(void) { pgd_t *pgd; @@ -1150,7 +1158,7 @@ void __init early_fixmap_init(void) pgd = pgd_offset_k(addr); if (CONFIG_PGTABLE_LEVELS > 3 && - !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa(bm_pud))) { + !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) { /* * We only end up here if the kernel mapping and the fixmap * share the top level pgd entry, which should only happen on @@ -1159,12 +1167,15 @@ void __init early_fixmap_init(void) BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); pud = pud_offset_kimg(pgd, addr); } else { - pgd_populate(&init_mm, pgd, bm_pud); + if (pgd_none(*pgd)) + __pgd_populate(pgd, __pa_symbol(bm_pud), + PUD_TYPE_TABLE); pud = fixmap_pud(addr); } - pud_populate(&init_mm, pud, bm_pmd); + if (pud_none(*pud)) + __pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); pmd = fixmap_pmd(addr); - pmd_populate_kernel(&init_mm, pmd, bm_pte); + __pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE); /* * The boot-ioremap range spans multiple pmds, for which diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index 58fca9ad5fcc..3446b6fb3acb 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c @@ -576,6 +576,7 @@ static int __init ar7_register_uarts(void) uart_port.type = PORT_AR7; uart_port.uartclk = clk_get_rate(bus_clk) / 2; uart_port.iotype = UPIO_MEM32; + uart_port.flags = UPF_FIXED_TYPE; uart_port.regshift = 2; uart_port.line = 0; @@ -654,6 +655,10 @@ static int __init ar7_register_devices(void) u32 val; int res; + res = ar7_gpio_init(); + if (res) + pr_warn("unable to register gpios: %d\n", res); + res = ar7_register_uarts(); if (res) pr_err("unable to setup uart(s): %d\n", res); diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c index a23adc49d50f..36aabee9cba4 100644 --- a/arch/mips/ar7/prom.c +++ b/arch/mips/ar7/prom.c @@ -246,8 +246,6 @@ void __init prom_init(void) ar7_init_cmdline(fw_arg0, (char **)fw_arg1); ar7_init_env((struct env_var *)fw_arg2); console_config(); - - ar7_gpio_init(); } #define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4))) diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c index d20ae63eb3c2..46abe9e4e0e0 100644 --- a/arch/mips/bcm47xx/leds.c +++ b/arch/mips/bcm47xx/leds.c @@ -330,7 +330,7 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = { /* Verified on: WRT54GS V1.0 */ static const struct gpio_led bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = { - BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF), BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON), BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF), }; diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h index 7c26b28bf252..859cf7048347 100644 --- a/arch/mips/include/asm/asm.h +++ b/arch/mips/include/asm/asm.h @@ -54,7 +54,8 @@ .align 2; \ .type symbol, @function; \ .ent symbol, 0; \ -symbol: .frame sp, 0, ra +symbol: .frame sp, 0, ra; \ + .insn /* * NESTED - declare nested routine entry point @@ -63,8 +64,9 @@ symbol: .frame sp, 0, ra .globl symbol; \ .align 2; \ .type symbol, @function; \ - .ent symbol, 0; \ -symbol: .frame sp, framesize, rpc + .ent symbol, 0; \ +symbol: .frame sp, framesize, rpc; \ + .insn /* * END - mark end of function @@ -86,7 +88,7 @@ symbol: #define FEXPORT(symbol) \ .globl symbol; \ .type symbol, @function; \ -symbol: +symbol: .insn /* * ABS - export absolute symbol diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index 6516e9da5133..b836ddec82b7 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -238,8 +238,8 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80) #define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15) #define CM_GCR_BASE_CMDEFTGT_SHF 0 #define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0) -#define CM_GCR_BASE_CMDEFTGT_DISABLED 0 -#define CM_GCR_BASE_CMDEFTGT_MEM 1 +#define CM_GCR_BASE_CMDEFTGT_MEM 0 +#define CM_GCR_BASE_CMDEFTGT_RESERVED 1 #define CM_GCR_BASE_CMDEFTGT_IOCU0 2 #define CM_GCR_BASE_CMDEFTGT_IOCU1 3 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 477ba026c3e5..163b3449a8de 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -49,9 +49,7 @@ #ifdef CONFIG_HOTPLUG_CPU void arch_cpu_idle_dead(void) { - /* What the heck is this check doing ? */ - if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map)) - play_dead(); + play_dead(); } #endif diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 24c115a0721a..a3f38e6b7ea1 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -650,6 +650,19 @@ static const struct user_regset_view user_mips64_view = { .n = ARRAY_SIZE(mips64_regsets), }; +#ifdef CONFIG_MIPS32_N32 + +static const struct user_regset_view user_mipsn32_view = { + .name = "mipsn32", + .e_flags = EF_MIPS_ABI2, + .e_machine = ELF_ARCH, + .ei_osabi = ELF_OSABI, + .regsets = mips64_regsets, + .n = ARRAY_SIZE(mips64_regsets), +}; + +#endif /* CONFIG_MIPS32_N32 */ + #endif /* CONFIG_64BIT */ const struct user_regset_view *task_user_regset_view(struct task_struct *task) @@ -661,6 +674,10 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) return &user_mips_view; #endif +#ifdef CONFIG_MIPS32_N32 + if (test_tsk_thread_flag(task, TIF_32BIT_ADDR)) + return &user_mipsn32_view; +#endif return &user_mips64_view; #endif } diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 8acae316f26b..4f9f1ae49213 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -152,6 +152,35 @@ void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_add add_memory_region(start, size, BOOT_MEM_RAM); } +bool __init memory_region_available(phys_addr_t start, phys_addr_t size) +{ + int i; + bool in_ram = false, free = true; + + for (i = 0; i < boot_mem_map.nr_map; i++) { + phys_addr_t start_, end_; + + start_ = boot_mem_map.map[i].addr; + end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size; + + switch (boot_mem_map.map[i].type) { + case BOOT_MEM_RAM: + if (start >= start_ && start + size <= end_) + in_ram = true; + break; + case BOOT_MEM_RESERVED: + if ((start >= start_ && start < end_) || + (start < start_ && start + size >= start_)) + free = false; + break; + default: + continue; + } + } + + return in_ram && free; +} + static void __init print_memory_map(void) { int i; @@ -300,11 +329,19 @@ static void __init bootmem_init(void) #else /* !CONFIG_SGI_IP27 */ +static unsigned long __init bootmap_bytes(unsigned long pages) +{ + unsigned long bytes = DIV_ROUND_UP(pages, 8); + + return ALIGN(bytes, sizeof(long)); +} + static void __init bootmem_init(void) { unsigned long reserved_end; unsigned long mapstart = ~0UL; unsigned long bootmap_size; + bool bootmap_valid = false; int i; /* @@ -385,11 +422,42 @@ static void __init bootmem_init(void) #endif /* - * Initialize the boot-time allocator with low memory only. + * check that mapstart doesn't overlap with any of + * memory regions that have been reserved through eg. DTB */ - bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart, - min_low_pfn, max_low_pfn); + bootmap_size = bootmap_bytes(max_low_pfn - min_low_pfn); + + bootmap_valid = memory_region_available(PFN_PHYS(mapstart), + bootmap_size); + for (i = 0; i < boot_mem_map.nr_map && !bootmap_valid; i++) { + unsigned long mapstart_addr; + + switch (boot_mem_map.map[i].type) { + case BOOT_MEM_RESERVED: + mapstart_addr = PFN_ALIGN(boot_mem_map.map[i].addr + + boot_mem_map.map[i].size); + if (PHYS_PFN(mapstart_addr) < mapstart) + break; + + bootmap_valid = memory_region_available(mapstart_addr, + bootmap_size); + if (bootmap_valid) + mapstart = PHYS_PFN(mapstart_addr); + break; + default: + break; + } + } + if (!bootmap_valid) + panic("No memory area to place a bootmap bitmap"); + + /* + * Initialize the boot-time allocator with low memory only. + */ + if (bootmap_size != init_bootmem_node(NODE_DATA(0), mapstart, + min_low_pfn, max_low_pfn)) + panic("Unexpected memory size required for bootmap"); for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; @@ -438,6 +506,10 @@ static void __init bootmem_init(void) continue; default: /* Not usable memory */ + if (start > min_low_pfn && end < max_low_pfn) + reserve_bootmem(boot_mem_map.map[i].addr, + boot_mem_map.map[i].size, + BOOTMEM_DEFAULT); continue; } diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 7fef02a9eb85..4af08c197177 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -64,6 +64,9 @@ EXPORT_SYMBOL(cpu_sibling_map); cpumask_t cpu_core_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_core_map); +static DECLARE_COMPLETION(cpu_starting); +static DECLARE_COMPLETION(cpu_running); + /* * A logcal cpu mask containing only one VPE per core to * reduce the number of IPIs on large MT systems. @@ -174,9 +177,12 @@ asmlinkage void start_secondary(void) cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); - cpumask_set_cpu(cpu, &cpu_callin_map); + /* Notify boot CPU that we're starting & ready to sync counters */ + complete(&cpu_starting); + synchronise_count_slave(cpu); + /* The CPU is running and counters synchronised, now mark it online */ set_cpu_online(cpu, true); set_cpu_sibling_map(cpu); @@ -185,6 +191,12 @@ asmlinkage void start_secondary(void) calculate_cpu_foreign_map(); /* + * Notify boot CPU that we're up & online and it can safely return + * from __cpu_up + */ + complete(&cpu_running); + + /* * irq will be enabled in ->smp_finish(), enabling it too early * is dangerous. */ @@ -242,22 +254,23 @@ void smp_prepare_boot_cpu(void) { set_cpu_possible(0, true); set_cpu_online(0, true); - cpumask_set_cpu(0, &cpu_callin_map); } int __cpu_up(unsigned int cpu, struct task_struct *tidle) { mp_ops->boot_secondary(cpu, tidle); - /* - * Trust is futile. We should really have timeouts ... - */ - while (!cpumask_test_cpu(cpu, &cpu_callin_map)) { - udelay(100); - schedule(); + /* Wait for CPU to start and be ready to sync counters */ + if (!wait_for_completion_timeout(&cpu_starting, + msecs_to_jiffies(1000))) { + pr_crit("CPU%u: failed to start\n", cpu); + return -EIO; } synchronise_count_master(cpu); + + /* Wait for CPU to finish startup & mark itself online before return */ + wait_for_completion(&cpu_running); return 0; } diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c index d78178daea4b..e2fe48dd67b5 100644 --- a/arch/mips/mm/uasm-micromips.c +++ b/arch/mips/mm/uasm-micromips.c @@ -75,7 +75,7 @@ static struct insn insn_table_MM[] = { { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS }, { insn_lb, M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, { insn_ld, 0, 0 }, - { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM }, + { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM }, { insn_lld, 0, 0 }, { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM }, diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c index 3660dc67d544..f4961bc9a61d 100644 --- a/arch/mips/netlogic/common/irq.c +++ b/arch/mips/netlogic/common/irq.c @@ -275,7 +275,7 @@ asmlinkage void plat_irq_dispatch(void) do_IRQ(nlm_irq_to_xirq(node, i)); } -#ifdef CONFIG_OF +#ifdef CONFIG_CPU_XLP static const struct irq_domain_ops xlp_pic_irq_domain_ops = { .xlate = irq_domain_xlate_onetwocell, }; @@ -348,7 +348,7 @@ void __init arch_init_irq(void) #if defined(CONFIG_CPU_XLR) nlm_setup_fmn_irq(); #endif -#if defined(CONFIG_OF) +#ifdef CONFIG_CPU_XLP of_irq_init(xlp_pic_irq_ids); #endif } diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index 48d6349fd9d7..c5f45fc96c74 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c @@ -141,8 +141,8 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = { FUNC("i2c", 0, 4, 2), }; -static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) }; -static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) }; +static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) }; +static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) }; static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) }; static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index c6b855f7892c..9f22195b90ed 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -688,15 +688,15 @@ cas_action: /* ELF32 Process entry path */ lws_compare_and_swap_2: #ifdef CONFIG_64BIT - /* Clip the input registers */ + /* Clip the input registers. We don't need to clip %r23 as we + only use it for word operations */ depdi 0, 31, 32, %r26 depdi 0, 31, 32, %r25 depdi 0, 31, 32, %r24 - depdi 0, 31, 32, %r23 #endif /* Check the validity of the size pointer */ - subi,>>= 4, %r23, %r0 + subi,>>= 3, %r23, %r0 b,n lws_exit_nosys /* Jump to the functions which will load the old and new values into diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index ec7b8f1e4822..c628f47a9052 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -1083,11 +1083,6 @@ source "arch/powerpc/Kconfig.debug" source "security/Kconfig" -config KEYS_COMPAT - bool - depends on COMPAT && KEYS - default y - source "crypto/Kconfig" config PPC_LIB_RHEAP diff --git a/arch/powerpc/boot/dts/fsl/kmcoge4.dts b/arch/powerpc/boot/dts/fsl/kmcoge4.dts index 6858ec9ef295..1a953d9edf1e 100644 --- a/arch/powerpc/boot/dts/fsl/kmcoge4.dts +++ b/arch/powerpc/boot/dts/fsl/kmcoge4.dts @@ -83,6 +83,10 @@ }; }; + sdhc@114000 { + status = "disabled"; + }; + i2c@119000 { status = "disabled"; }; diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index cf8c7e4e0b21..984a54c85952 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -102,7 +102,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, static void do_signal(struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); - struct ksignal ksig; + struct ksignal ksig = { .sig = 0 }; int ret; int is32 = is_32bit_task(); diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 24f58076d49e..1d2bc84338bf 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -280,6 +280,7 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, */ if (reject && reject != XICS_IPI) { arch_spin_unlock(&ics->lock); + icp->n_reject++; new_irq = reject; goto again; } @@ -611,10 +612,8 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) state = &ics->irq_state[src]; /* Still asserted, resend it */ - if (state->asserted) { - icp->n_reject++; + if (state->asserted) icp_rm_deliver_irq(xics, icp, irq); - } if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) { icp->rm_action |= XICS_RM_NOTIFY_EOI; diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 60530fd93d6d..9510ddfff59b 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -347,9 +347,6 @@ config COMPAT config SYSVIPC_COMPAT def_bool y if COMPAT && SYSVIPC -config KEYS_COMPAT - def_bool y if COMPAT && KEYS - config SMP def_bool y prompt "Symmetric multi-processing support" diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h new file mode 100644 index 000000000000..2c3413b0ca52 --- /dev/null +++ b/arch/s390/include/asm/asm-prototypes.h @@ -0,0 +1,8 @@ +#ifndef _ASM_S390_PROTOTYPES_H + +#include <linux/kvm_host.h> +#include <linux/ftrace.h> +#include <asm/fpu/api.h> +#include <asm-generic/asm-prototypes.h> + +#endif /* _ASM_S390_PROTOTYPES_H */ diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index 649eb62c52b3..9e02cb7955c1 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h @@ -81,6 +81,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range); int zpci_load(u64 *data, u64 req, u64 offset); int zpci_store(u64 data, u64 req, u64 offset); int zpci_store_block(const u64 *data, u64 req, u64 offset); -void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc); +int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc); #endif diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h index 402ad6df4897..c54a9310d814 100644 --- a/arch/s390/include/asm/runtime_instr.h +++ b/arch/s390/include/asm/runtime_instr.h @@ -85,6 +85,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next, load_runtime_instr_cb(&runtime_instr_empty_cb); } -void exit_thread_runtime_instr(void); +struct task_struct; + +void runtime_instr_release(struct task_struct *tsk); #endif /* _RUNTIME_INSTR_H */ diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index 12d45f0cfdd9..dde6b52359c5 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -34,8 +34,8 @@ static inline void restore_access_regs(unsigned int *acrs) save_access_regs(&prev->thread.acrs[0]); \ save_ri_cb(prev->thread.ri_cb); \ } \ + update_cr_regs(next); \ if (next->mm) { \ - update_cr_regs(next); \ set_cpu_flag(CIF_FPU); \ restore_access_regs(&next->thread.acrs[0]); \ restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 6e72961608f0..07477ba392b7 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -1549,6 +1549,7 @@ static struct s390_insn opcode_e7[] = { { "vfsq", 0xce, INSTR_VRR_VV000MM }, { "vfs", 0xe2, INSTR_VRR_VVV00MM }, { "vftci", 0x4a, INSTR_VRI_VVIMM }, + { "", 0, INSTR_INVALID } }; static struct s390_insn opcode_eb[] = { @@ -1961,7 +1962,7 @@ void show_code(struct pt_regs *regs) { char *mode = user_mode(regs) ? "User" : "Krnl"; unsigned char code[64]; - char buffer[64], *ptr; + char buffer[128], *ptr; mm_segment_t old_fs; unsigned long addr; int start, end, opsize, hops, i; @@ -2024,7 +2025,7 @@ void show_code(struct pt_regs *regs) start += opsize; printk(buffer); ptr = buffer; - ptr += sprintf(ptr, "\n "); + ptr += sprintf(ptr, "\n\t "); hops++; } printk("\n"); diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 3c31609df959..ee7b8e7ca4f8 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -325,8 +325,10 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; if (test_facility(40)) S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; - if (test_facility(50) && test_facility(73)) + if (test_facility(50) && test_facility(73)) { S390_lowcore.machine_flags |= MACHINE_FLAG_TE; + __ctl_set_bit(0, 55); + } if (test_facility(51)) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; if (test_facility(129)) { diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 114ee8b96f17..7bc4e4c5d5b8 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -72,7 +72,6 @@ extern void kernel_thread_starter(void); */ void exit_thread(void) { - exit_thread_runtime_instr(); } void flush_thread(void) @@ -87,6 +86,7 @@ void arch_release_task_struct(struct task_struct *tsk) { /* Free either the floating-point or the vector register save area */ kfree(tsk->thread.fpu.regs); + runtime_instr_release(tsk); } int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) @@ -137,6 +137,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); clear_tsk_thread_flag(p, TIF_SINGLE_STEP); + p->thread.per_flags = 0; /* Initialize per thread user and system timer values */ ti = task_thread_info(p); ti->user_timer = 0; diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index fffa0e5462af..fd03a7569e10 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -18,11 +18,24 @@ /* empty control block to disable RI by loading it */ struct runtime_instr_cb runtime_instr_empty_cb; +void runtime_instr_release(struct task_struct *tsk) +{ + kfree(tsk->thread.ri_cb); +} + static void disable_runtime_instr(void) { - struct pt_regs *regs = task_pt_regs(current); + struct task_struct *task = current; + struct pt_regs *regs; + if (!task->thread.ri_cb) + return; + regs = task_pt_regs(task); + preempt_disable(); load_runtime_instr_cb(&runtime_instr_empty_cb); + kfree(task->thread.ri_cb); + task->thread.ri_cb = NULL; + preempt_enable(); /* * Make sure the RI bit is deleted from the PSW. If the user did not @@ -43,17 +56,6 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb) cb->valid = 1; } -void exit_thread_runtime_instr(void) -{ - struct task_struct *task = current; - - if (!task->thread.ri_cb) - return; - disable_runtime_instr(); - kfree(task->thread.ri_cb); - task->thread.ri_cb = NULL; -} - SYSCALL_DEFINE1(s390_runtime_instr, int, command) { struct runtime_instr_cb *cb; @@ -62,9 +64,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command) return -EOPNOTSUPP; if (command == S390_RUNTIME_INSTR_STOP) { - preempt_disable(); - exit_thread_runtime_instr(); - preempt_enable(); + disable_runtime_instr(); return 0; } diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index f2f6720a3331..ef0499b76c50 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -359,7 +359,8 @@ static void zpci_irq_handler(struct airq_struct *airq) /* End of second scan with interrupts on. */ break; /* First scan complete, reenable interrupts. */ - zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); + if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC)) + break; si = 0; continue; } @@ -921,7 +922,7 @@ static int __init pci_base_init(void) if (!s390_pci_probe) return 0; - if (!test_facility(69) || !test_facility(71) || !test_facility(72)) + if (!test_facility(69) || !test_facility(71)) return 0; rc = zpci_debug_init(); diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c index 10ca15dcab11..bc065392f7ab 100644 --- a/arch/s390/pci/pci_insn.c +++ b/arch/s390/pci/pci_insn.c @@ -7,6 +7,7 @@ #include <linux/export.h> #include <linux/errno.h> #include <linux/delay.h> +#include <asm/facility.h> #include <asm/pci_insn.h> #include <asm/pci_debug.h> #include <asm/processor.h> @@ -91,11 +92,14 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range) } /* Set Interruption Controls */ -void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc) +int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc) { + if (!test_facility(72)) + return -EIO; asm volatile ( " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused)); + return 0; } /* PCI Load */ diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c index 538c10db3537..8dc315b212c2 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c @@ -165,7 +165,6 @@ static struct plat_sci_port scif2_platform_data = { .scscr = SCSCR_TE | SCSCR_RE, .type = PORT_IRDA, .ops = &sh770x_sci_port_ops, - .regshift = 1, }; static struct resource scif2_resources[] = { diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 894bcaed002e..1cf6a15102d8 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -550,9 +550,6 @@ config SYSVIPC_COMPAT depends on COMPAT && SYSVIPC default y -config KEYS_COMPAT - def_bool y if COMPAT && KEYS - endmenu source "net/Kconfig" diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8dc3b07ee3cc..f37e01e6b7f2 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2657,10 +2657,6 @@ config COMPAT_FOR_U64_ALIGNMENT config SYSVIPC_COMPAT def_bool y depends on SYSVIPC - -config KEYS_COMPAT - def_bool y - depends on KEYS endif endmenu diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S index 85c4e1cf7172..e1693457c178 100644 --- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S +++ b/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S @@ -174,8 +174,8 @@ LABEL skip_ %I .endr # Find min length - vmovdqa _lens+0*16(state), %xmm0 - vmovdqa _lens+1*16(state), %xmm1 + vmovdqu _lens+0*16(state), %xmm0 + vmovdqu _lens+1*16(state), %xmm1 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} @@ -195,8 +195,8 @@ LABEL skip_ %I vpsubd %xmm2, %xmm0, %xmm0 vpsubd %xmm2, %xmm1, %xmm1 - vmovdqa %xmm0, _lens+0*16(state) - vmovdqa %xmm1, _lens+1*16(state) + vmovdqu %xmm0, _lens+0*16(state) + vmovdqu %xmm1, _lens+1*16(state) # "state" and "args" are the same address, arg1 # len is arg2 @@ -260,8 +260,8 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2) jc .return_null # Find min length - vmovdqa _lens(state), %xmm0 - vmovdqa _lens+1*16(state), %xmm1 + vmovdqu _lens(state), %xmm0 + vmovdqu _lens+1*16(state), %xmm1 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 08b1f2f6ea50..c9e6eab2075b 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -3,6 +3,7 @@ #include <asm/fpu/api.h> #include <asm/pgtable.h> +#include <asm/tlb.h> /* * We map the EFI regions needed for runtime services non-contiguously, @@ -66,6 +67,17 @@ extern u64 asmlinkage efi_call(void *fp, ...); #define efi_call_phys(f, args...) efi_call((f), args) +/* + * Scratch space used for switching the pagetable in the EFI stub + */ +struct efi_scratch { + u64 r15; + u64 prev_cr3; + pgd_t *efi_pgt; + bool use_pgd; + u64 phys_stack; +} __packed; + #define efi_call_virt(f, ...) \ ({ \ efi_status_t __s; \ @@ -73,7 +85,20 @@ extern u64 asmlinkage efi_call(void *fp, ...); efi_sync_low_kernel_mappings(); \ preempt_disable(); \ __kernel_fpu_begin(); \ + \ + if (efi_scratch.use_pgd) { \ + efi_scratch.prev_cr3 = read_cr3(); \ + write_cr3((unsigned long)efi_scratch.efi_pgt); \ + __flush_tlb_all(); \ + } \ + \ __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \ + \ + if (efi_scratch.use_pgd) { \ + write_cr3(efi_scratch.prev_cr3); \ + __flush_tlb_all(); \ + } \ + \ __kernel_fpu_end(); \ preempt_enable(); \ __s; \ @@ -113,6 +138,7 @@ extern void __init efi_memory_uc(u64 addr, unsigned long size); extern void __init efi_map_region(efi_memory_desc_t *md); extern void __init efi_map_region_fixed(efi_memory_desc_t *md); extern void efi_sync_low_kernel_mappings(void); +extern int __init efi_alloc_page_tables(void); extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages); extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages); extern void __init old_map_region(efi_memory_desc_t *md); diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 19d14ac23ef9..fc3c7e49c8e4 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -296,6 +296,7 @@ struct x86_emulate_ctxt { bool perm_ok; /* do not check permissions if true */ bool ud; /* inject an #UD if host doesn't support insn */ + bool tf; /* TF value before instruction (after for syscall/sysret) */ bool have_exception; struct x86_exception exception; diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 91dfcafe27a6..bad25bb80679 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -21,7 +21,7 @@ asmlinkage long sys_ioperm(unsigned long, unsigned long, int); asmlinkage long sys_iopl(unsigned int); /* kernel/ldt.c */ -asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); +asmlinkage long sys_modify_ldt(int, void __user *, unsigned long); /* kernel/signal.c */ asmlinkage long sys_rt_sigreturn(void); diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 7402eb4b509d..6a07c05956a6 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -7,6 +7,7 @@ #include <linux/compiler.h> #include <linux/thread_info.h> #include <linux/string.h> +#include <linux/preempt.h> #include <asm/asm.h> #include <asm/page.h> #include <asm/smap.h> @@ -66,6 +67,12 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ }) +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP +# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task()) +#else +# define WARN_ON_IN_IRQ() +#endif + /** * access_ok: - Checks if a user space pointer is valid * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that @@ -86,8 +93,11 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ -#define access_ok(type, addr, size) \ - likely(!__range_not_ok(addr, size, user_addr_max())) +#define access_ok(type, addr, size) \ +({ \ + WARN_ON_IN_IRQ(); \ + likely(!__range_not_ok(addr, size, user_addr_max())); \ +}) /* * The exception table consists of pairs of addresses relative to the diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 5f8f0b3cc674..2c0b0b645a74 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -26,7 +26,7 @@ #include "common.h" static nokprobe_inline -int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, +void __skip_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, unsigned long orig_ip) { /* @@ -41,20 +41,21 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, __this_cpu_write(current_kprobe, NULL); if (orig_ip) regs->ip = orig_ip; - return 1; } int skip_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { - if (kprobe_ftrace(p)) - return __skip_singlestep(p, regs, kcb, 0); - else - return 0; + if (kprobe_ftrace(p)) { + __skip_singlestep(p, regs, kcb, 0); + preempt_enable_no_resched(); + return 1; + } + return 0; } NOKPROBE_SYMBOL(skip_singlestep); -/* Ftrace callback handler for kprobes */ +/* Ftrace callback handler for kprobes -- called under preepmt disabed */ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct pt_regs *regs) { @@ -77,13 +78,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ regs->ip = ip + sizeof(kprobe_opcode_t); + /* To emulate trap based kprobes, preempt_disable here */ + preempt_disable(); __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (!p->pre_handler || !p->pre_handler(p, regs)) + if (!p->pre_handler || !p->pre_handler(p, regs)) { __skip_singlestep(p, regs, kcb, orig_ip); + preempt_enable_no_resched(); + } /* * If pre_handler returns !0, it sets regs->ip and - * resets current kprobe. + * resets current kprobe, and keep preempt count +1. */ } end: diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 6acc9dd91f36..d6279593bcdd 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -12,6 +12,7 @@ #include <linux/string.h> #include <linux/mm.h> #include <linux/smp.h> +#include <linux/syscalls.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/uaccess.h> @@ -271,8 +272,8 @@ out: return error; } -asmlinkage int sys_modify_ldt(int func, void __user *ptr, - unsigned long bytecount) +SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , + unsigned long , bytecount) { int ret = -ENOSYS; @@ -290,5 +291,14 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr, ret = write_ldt(ptr, bytecount, 0); break; } - return ret; + /* + * The SYSCALL_DEFINE() macros give us an 'unsigned long' + * return type, but tht ABI for sys_modify_ldt() expects + * 'int'. This cast gives us an int-sized value in %rax + * for the return code. The 'unsigned' is necessary so + * the compiler does not try to sign-extend the negative + * return codes into the high half of the register when + * taking the value from int->long. + */ + return (unsigned int)ret; } diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 04b2f3cad7ba..684edebb4a0c 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2726,6 +2726,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); } + ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; return X86EMUL_CONTINUE; } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 899c40f826dd..4b1152e57340 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1696,6 +1696,8 @@ static int ud_interception(struct vcpu_svm *svm) int er; er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); + if (er == EMULATE_USER_EXIT) + return 0; if (er != EMULATE_DONE) kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; @@ -3114,6 +3116,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) u32 ecx = msr->index; u64 data = msr->data; switch (ecx) { + case MSR_IA32_CR_PAT: + if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) + return 1; + vcpu->arch.pat = data; + svm->vmcb->save.g_pat = data; + mark_dirty(svm->vmcb, VMCB_NPT); + break; case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr); break; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9114588e3e61..253a8c8207bb 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5267,6 +5267,8 @@ static int handle_exception(struct kvm_vcpu *vcpu) return 1; } er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); + if (er == EMULATE_USER_EXIT) + return 0; if (er != EMULATE_DONE) kvm_queue_exception(vcpu, UD_VECTOR); return 1; @@ -10394,6 +10396,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); + vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); + vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8e526c6fd784..df81717a92f3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1812,6 +1812,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) */ BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); + if (guest_hv_clock.version & 1) + ++guest_hv_clock.version; /* first time write, random junk */ + vcpu->hv_clock.version = guest_hv_clock.version + 1; kvm_write_guest_cached(v->kvm, &vcpu->pv_time, &vcpu->hv_clock, @@ -5095,6 +5098,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); ctxt->eflags = kvm_get_rflags(vcpu); + ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; + ctxt->eip = kvm_rip_read(vcpu); ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : @@ -5315,37 +5320,26 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, return dr6; } -static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) +static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r) { struct kvm_run *kvm_run = vcpu->run; - /* - * rflags is the old, "raw" value of the flags. The new value has - * not been saved yet. - * - * This is correct even for TF set by the guest, because "the - * processor will not generate this exception after the instruction - * that sets the TF flag". - */ - if (unlikely(rflags & X86_EFLAGS_TF)) { - if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { - kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | - DR6_RTM; - kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; - kvm_run->debug.arch.exception = DB_VECTOR; - kvm_run->exit_reason = KVM_EXIT_DEBUG; - *r = EMULATE_USER_EXIT; - } else { - vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; - /* - * "Certain debug exceptions may clear bit 0-3. The - * remaining contents of the DR6 register are never - * cleared by the processor". - */ - vcpu->arch.dr6 &= ~15; - vcpu->arch.dr6 |= DR6_BS | DR6_RTM; - kvm_queue_exception(vcpu, DB_VECTOR); - } + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { + kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; + kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; + kvm_run->debug.arch.exception = DB_VECTOR; + kvm_run->exit_reason = KVM_EXIT_DEBUG; + *r = EMULATE_USER_EXIT; + } else { + vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; + /* + * "Certain debug exceptions may clear bit 0-3. The + * remaining contents of the DR6 register are never + * cleared by the processor". + */ + vcpu->arch.dr6 &= ~15; + vcpu->arch.dr6 |= DR6_BS | DR6_RTM; + kvm_queue_exception(vcpu, DB_VECTOR); } } @@ -5435,6 +5429,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; + if (ctxt->have_exception && inject_emulated_exception(vcpu)) + return EMULATE_DONE; if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu); @@ -5500,8 +5496,9 @@ restart: toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_rip_write(vcpu, ctxt->eip); - if (r == EMULATE_DONE) - kvm_vcpu_check_singlestep(vcpu, rflags, &r); + if (r == EMULATE_DONE && + (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) + kvm_vcpu_do_singlestep(vcpu, &r); if (!ctxt->have_exception || exception_type(ctxt->exception.vector) == EXCPT_TRAP) __kvm_set_rflags(vcpu, ctxt->eflags); diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index d388de72eaca..ec039f2a0c13 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -833,7 +833,7 @@ EndTable GrpTable: Grp3_1 0: TEST Eb,Ib -1: +1: TEST Eb,Ib 2: NOT Eb 3: NEG Eb 4: MUL AL,Eb diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 4540e8880cd9..1924bba0f3af 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -911,15 +911,10 @@ static void populate_pte(struct cpa_data *cpa, pte = pte_offset_kernel(pmd, start); while (num_pages-- && start < end) { - - /* deal with the NX bit */ - if (!(pgprot_val(pgprot) & _PAGE_NX)) - cpa->pfn &= ~_PAGE_NX; - - set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot)); + set_pte(pte, pfn_pte(cpa->pfn, pgprot)); start += PAGE_SIZE; - cpa->pfn += PAGE_SIZE; + cpa->pfn++; pte++; } } @@ -975,11 +970,11 @@ static int populate_pmd(struct cpa_data *cpa, pmd = pmd_offset(pud, start); - set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | + set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | massage_pgprot(pmd_pgprot))); start += PMD_SIZE; - cpa->pfn += PMD_SIZE; + cpa->pfn += PMD_SIZE >> PAGE_SHIFT; cur_pages += PMD_SIZE >> PAGE_SHIFT; } @@ -1048,11 +1043,11 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, * Map everything starting from the Gb boundary, possibly with 1G pages */ while (end - start >= PUD_SIZE) { - set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | + set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | massage_pgprot(pud_pgprot))); start += PUD_SIZE; - cpa->pfn += PUD_SIZE; + cpa->pfn += PUD_SIZE >> PAGE_SHIFT; cur_pages += PUD_SIZE >> PAGE_SHIFT; pud++; } diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index d90528ea5412..12c051d19e4b 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -212,8 +212,8 @@ static void arch_perfmon_setup_counters(void) eax.full = cpuid_eax(0xa); /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ - if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 && - __this_cpu_read(cpu_info.x86_model) == 15) { + if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model == 15) { eax.split.version_id = 2; eax.split.num_counters = 2; eax.split.bit_width = 40; diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c index ea48449b2e63..64fbc7e33226 100644 --- a/arch/x86/platform/efi/efi-bgrt.c +++ b/arch/x86/platform/efi/efi-bgrt.c @@ -28,8 +28,7 @@ struct bmp_header { void __init efi_bgrt_init(void) { acpi_status status; - void __iomem *image; - bool ioremapped = false; + void *image; struct bmp_header bmp_header; if (acpi_disabled) @@ -70,20 +69,14 @@ void __init efi_bgrt_init(void) return; } - image = efi_lookup_mapped_addr(bgrt_tab->image_address); + image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB); if (!image) { - image = early_ioremap(bgrt_tab->image_address, - sizeof(bmp_header)); - ioremapped = true; - if (!image) { - pr_err("Ignoring BGRT: failed to map image header memory\n"); - return; - } + pr_err("Ignoring BGRT: failed to map image header memory\n"); + return; } - memcpy_fromio(&bmp_header, image, sizeof(bmp_header)); - if (ioremapped) - early_iounmap(image, sizeof(bmp_header)); + memcpy(&bmp_header, image, sizeof(bmp_header)); + memunmap(image); bgrt_image_size = bmp_header.size; bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN); @@ -93,18 +86,14 @@ void __init efi_bgrt_init(void) return; } - if (ioremapped) { - image = early_ioremap(bgrt_tab->image_address, - bmp_header.size); - if (!image) { - pr_err("Ignoring BGRT: failed to map image memory\n"); - kfree(bgrt_image); - bgrt_image = NULL; - return; - } + image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB); + if (!image) { + pr_err("Ignoring BGRT: failed to map image memory\n"); + kfree(bgrt_image); + bgrt_image = NULL; + return; } - memcpy_fromio(bgrt_image, image, bgrt_image_size); - if (ioremapped) - early_iounmap(image, bmp_header.size); + memcpy(bgrt_image, image, bgrt_image_size); + memunmap(image); } diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index ad285404ea7f..3c1f3cd7b2ba 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -869,7 +869,7 @@ static void __init kexec_enter_virtual_mode(void) * This function will switch the EFI runtime services to virtual mode. * Essentially, we look through the EFI memmap and map every region that * has the runtime attribute bit set in its memory descriptor into the - * ->trampoline_pgd page table using a top-down VA allocation scheme. + * efi_pgd page table. * * The old method which used to update that memory descriptor with the * virtual address obtained from ioremap() is still supported when the @@ -879,8 +879,8 @@ static void __init kexec_enter_virtual_mode(void) * * The new method does a pagetable switch in a preemption-safe manner * so that we're in a different address space when calling a runtime - * function. For function arguments passing we do copy the PGDs of the - * kernel page table into ->trampoline_pgd prior to each call. + * function. For function arguments passing we do copy the PUDs of the + * kernel page table into efi_pgd prior to each call. * * Specially for kexec boot, efi runtime maps in previous kernel should * be passed in via setup_data. In that case runtime ranges will be mapped @@ -895,6 +895,12 @@ static void __init __efi_enter_virtual_mode(void) efi.systab = NULL; + if (efi_alloc_page_tables()) { + pr_err("Failed to allocate EFI page tables\n"); + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); + return; + } + efi_merge_regions(); new_memmap = efi_map_regions(&count, &pg_shift); if (!new_memmap) { @@ -954,28 +960,11 @@ static void __init __efi_enter_virtual_mode(void) efi_runtime_mkexec(); /* - * We mapped the descriptor array into the EFI pagetable above but we're - * not unmapping it here. Here's why: - * - * We're copying select PGDs from the kernel page table to the EFI page - * table and when we do so and make changes to those PGDs like unmapping - * stuff from them, those changes appear in the kernel page table and we - * go boom. - * - * From setup_real_mode(): - * - * ... - * trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd; - * - * In this particular case, our allocation is in PGD 0 of the EFI page - * table but we've copied that PGD from PGD[272] of the EFI page table: - * - * pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272 - * - * where the direct memory mapping in kernel space is. - * - * new_memmap's VA comes from that direct mapping and thus clearing it, - * it would get cleared in the kernel page table too. + * We mapped the descriptor array into the EFI pagetable above + * but we're not unmapping it here because if we're running in + * EFI mixed mode we need all of memory to be accessible when + * we pass parameters to the EFI runtime services in the + * thunking code. * * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift); */ diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index ed5b67338294..58d669bc8250 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -38,6 +38,11 @@ * say 0 - 3G. */ +int __init efi_alloc_page_tables(void) +{ + return 0; +} + void efi_sync_low_kernel_mappings(void) {} void __init efi_dump_pagetable(void) {} int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index a0ac0f9c307f..18dfaad71c99 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -40,6 +40,7 @@ #include <asm/fixmap.h> #include <asm/realmode.h> #include <asm/time.h> +#include <asm/pgalloc.h> /* * We allocate runtime services regions bottom-up, starting from -4G, i.e. @@ -47,16 +48,7 @@ */ static u64 efi_va = EFI_VA_START; -/* - * Scratch space used for switching the pagetable in the EFI stub - */ -struct efi_scratch { - u64 r15; - u64 prev_cr3; - pgd_t *efi_pgt; - bool use_pgd; - u64 phys_stack; -} __packed; +struct efi_scratch efi_scratch; static void __init early_code_mapping_set_exec(int executable) { @@ -83,8 +75,11 @@ pgd_t * __init efi_call_phys_prolog(void) int pgd; int n_pgds; - if (!efi_enabled(EFI_OLD_MEMMAP)) - return NULL; + if (!efi_enabled(EFI_OLD_MEMMAP)) { + save_pgd = (pgd_t *)read_cr3(); + write_cr3((unsigned long)efi_scratch.efi_pgt); + goto out; + } early_code_mapping_set_exec(1); @@ -96,6 +91,7 @@ pgd_t * __init efi_call_phys_prolog(void) vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); } +out: __flush_tlb_all(); return save_pgd; @@ -109,8 +105,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) int pgd_idx; int nr_pgds; - if (!save_pgd) + if (!efi_enabled(EFI_OLD_MEMMAP)) { + write_cr3((unsigned long)save_pgd); + __flush_tlb_all(); return; + } nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); @@ -123,27 +122,97 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) early_code_mapping_set_exec(0); } +static pgd_t *efi_pgd; + +/* + * We need our own copy of the higher levels of the page tables + * because we want to avoid inserting EFI region mappings (EFI_VA_END + * to EFI_VA_START) into the standard kernel page tables. Everything + * else can be shared, see efi_sync_low_kernel_mappings(). + */ +int __init efi_alloc_page_tables(void) +{ + pgd_t *pgd; + pud_t *pud; + gfp_t gfp_mask; + + if (efi_enabled(EFI_OLD_MEMMAP)) + return 0; + + gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO; + efi_pgd = (pgd_t *)__get_free_page(gfp_mask); + if (!efi_pgd) + return -ENOMEM; + + pgd = efi_pgd + pgd_index(EFI_VA_END); + + pud = pud_alloc_one(NULL, 0); + if (!pud) { + free_page((unsigned long)efi_pgd); + return -ENOMEM; + } + + pgd_populate(NULL, pgd, pud); + + return 0; +} + /* * Add low kernel mappings for passing arguments to EFI functions. */ void efi_sync_low_kernel_mappings(void) { - unsigned num_pgds; - pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); + unsigned num_entries; + pgd_t *pgd_k, *pgd_efi; + pud_t *pud_k, *pud_efi; if (efi_enabled(EFI_OLD_MEMMAP)) return; - num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET); + /* + * We can share all PGD entries apart from the one entry that + * covers the EFI runtime mapping space. + * + * Make sure the EFI runtime region mappings are guaranteed to + * only span a single PGD entry and that the entry also maps + * other important kernel regions. + */ + BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END)); + BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) != + (EFI_VA_END & PGDIR_MASK)); + + pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET); + pgd_k = pgd_offset_k(PAGE_OFFSET); - memcpy(pgd + pgd_index(PAGE_OFFSET), - init_mm.pgd + pgd_index(PAGE_OFFSET), - sizeof(pgd_t) * num_pgds); + num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET); + memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries); + + /* + * We share all the PUD entries apart from those that map the + * EFI regions. Copy around them. + */ + BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0); + BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0); + + pgd_efi = efi_pgd + pgd_index(EFI_VA_END); + pud_efi = pud_offset(pgd_efi, 0); + + pgd_k = pgd_offset_k(EFI_VA_END); + pud_k = pud_offset(pgd_k, 0); + + num_entries = pud_index(EFI_VA_END); + memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); + + pud_efi = pud_offset(pgd_efi, EFI_VA_START); + pud_k = pud_offset(pgd_k, EFI_VA_START); + + num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START); + memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); } int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) { - unsigned long text; + unsigned long pfn, text; struct page *page; unsigned npages; pgd_t *pgd; @@ -151,8 +220,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) if (efi_enabled(EFI_OLD_MEMMAP)) return 0; - efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd; - pgd = __va(efi_scratch.efi_pgt); + efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd); + pgd = efi_pgd; /* * It can happen that the physical address of new_memmap lands in memory @@ -160,7 +229,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) * and ident-map those pages containing the map before calling * phys_efi_set_virtual_address_map(). */ - if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) { + pfn = pa_memmap >> PAGE_SHIFT; + if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX)) { pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap); return 1; } @@ -185,8 +255,9 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) npages = (_end - _text) >> PAGE_SHIFT; text = __pa(_text); + pfn = text >> PAGE_SHIFT; - if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) { + if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, 0)) { pr_err("Failed to map kernel text 1:1\n"); return 1; } @@ -196,20 +267,20 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) { - pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); - - kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages); + kernel_unmap_pages_in_pgd(efi_pgd, pa_memmap, num_pages); } static void __init __map_region(efi_memory_desc_t *md, u64 va) { - pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); - unsigned long pf = 0; + unsigned long flags = 0; + unsigned long pfn; + pgd_t *pgd = efi_pgd; if (!(md->attribute & EFI_MEMORY_WB)) - pf |= _PAGE_PCD; + flags |= _PAGE_PCD; - if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf)) + pfn = md->phys_addr >> PAGE_SHIFT; + if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags)) pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", md->phys_addr, va); } @@ -312,9 +383,7 @@ void __init efi_runtime_mkexec(void) void __init efi_dump_pagetable(void) { #ifdef CONFIG_EFI_PGT_DUMP - pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); - - ptdump_walk_pgd_level(NULL, pgd); + ptdump_walk_pgd_level(NULL, efi_pgd); #endif } diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 86d0f9e08dd9..32020cb8bb08 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S @@ -38,41 +38,6 @@ mov %rsi, %cr0; \ mov (%rsp), %rsp - /* stolen from gcc */ - .macro FLUSH_TLB_ALL - movq %r15, efi_scratch(%rip) - movq %r14, efi_scratch+8(%rip) - movq %cr4, %r15 - movq %r15, %r14 - andb $0x7f, %r14b - movq %r14, %cr4 - movq %r15, %cr4 - movq efi_scratch+8(%rip), %r14 - movq efi_scratch(%rip), %r15 - .endm - - .macro SWITCH_PGT - cmpb $0, efi_scratch+24(%rip) - je 1f - movq %r15, efi_scratch(%rip) # r15 - # save previous CR3 - movq %cr3, %r15 - movq %r15, efi_scratch+8(%rip) # prev_cr3 - movq efi_scratch+16(%rip), %r15 # EFI pgt - movq %r15, %cr3 - 1: - .endm - - .macro RESTORE_PGT - cmpb $0, efi_scratch+24(%rip) - je 2f - movq efi_scratch+8(%rip), %r15 - movq %r15, %cr3 - movq efi_scratch(%rip), %r15 - FLUSH_TLB_ALL - 2: - .endm - ENTRY(efi_call) SAVE_XMM mov (%rsp), %rax @@ -83,16 +48,8 @@ ENTRY(efi_call) mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx - SWITCH_PGT call *%rdi - RESTORE_PGT addq $48, %rsp RESTORE_XMM ret ENDPROC(efi_call) - - .data -ENTRY(efi_scratch) - .fill 3,8,0 - .byte 0 - .quad 0 diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c index 836a1eb5df43..3ee234b6234d 100644 --- a/arch/x86/um/ldt.c +++ b/arch/x86/um/ldt.c @@ -6,6 +6,7 @@ #include <linux/mm.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/syscalls.h> #include <linux/uaccess.h> #include <asm/unistd.h> #include <os.h> @@ -369,7 +370,9 @@ void free_ldt(struct mm_context *mm) mm->arch.ldt.entry_count = 0; } -int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount) +SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , + unsigned long , bytecount) { - return do_modify_ldt_skas(func, ptr, bytecount); + /* See non-um modify_ldt() for why we do this cast */ + return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount); } diff --git a/crypto/Kconfig b/crypto/Kconfig index 248d1a8f9409..3240d394426c 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -361,7 +361,6 @@ config CRYPTO_XTS select CRYPTO_BLKCIPHER select CRYPTO_MANAGER select CRYPTO_GF128MUL - select CRYPTO_ECB help XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain, key size 256, 384 or 512 bits. This implementation currently diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index 2516e97c58f1..5e5a8adac0ba 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -87,7 +87,7 @@ EXPORT_SYMBOL_GPL(pkcs7_free_message); static int pkcs7_check_authattrs(struct pkcs7_message *msg) { struct pkcs7_signed_info *sinfo; - bool want; + bool want = false; sinfo = msg->signed_infos; if (!sinfo) diff --git a/drivers/Kconfig b/drivers/Kconfig index b3b27b86955d..4051a164c2eb 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -212,4 +212,6 @@ source "drivers/bif/Kconfig" source "drivers/sensors/Kconfig" +source "drivers/tee/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index 2545cf95e8db..d7c1d7422e86 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -182,3 +182,4 @@ obj-$(CONFIG_BIF) += bif/ obj-$(CONFIG_SENSORS_SSC) += sensors/ obj-$(CONFIG_ESOC) += esoc/ +obj-$(CONFIG_TEE) += tee/ diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 2c33b1251afb..2106014f1ea8 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -833,7 +833,7 @@ binder_enqueue_work_ilocked(struct binder_work *work, } /** - * binder_enqueue_thread_work_ilocked_nowake() - Add thread work + * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work * @thread: thread to queue work to * @work: struct binder_work to add to list * @@ -844,8 +844,8 @@ binder_enqueue_work_ilocked(struct binder_work *work, * Requires the proc->inner_lock to be held. */ static void -binder_enqueue_thread_work_ilocked_nowake(struct binder_thread *thread, - struct binder_work *work) +binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, + struct binder_work *work) { binder_enqueue_work_ilocked(work, &thread->todo); } @@ -2468,7 +2468,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, debug_id, (u64)fda->num_fds); continue; } - fd_array = (u32 *)(parent_buffer + fda->parent_offset); + fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); for (fd_index = 0; fd_index < fda->num_fds; fd_index++) task_close_fd(proc, fd_array[fd_index]); } break; @@ -2692,7 +2692,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, */ parent_buffer = parent->buffer - binder_alloc_get_user_buffer_offset(&target_proc->alloc); - fd_array = (u32 *)(parent_buffer + fda->parent_offset); + fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { binder_user_error("%d:%d parent offset not aligned correctly.\n", proc->pid, thread->pid); @@ -2758,7 +2758,7 @@ static int binder_fixup_parent(struct binder_transaction *t, proc->pid, thread->pid); return -EINVAL; } - parent_buffer = (u8 *)(parent->buffer - + parent_buffer = (u8 *)((uintptr_t)parent->buffer - binder_alloc_get_user_buffer_offset( &target_proc->alloc)); *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; @@ -3348,7 +3348,14 @@ static void binder_transaction(struct binder_proc *proc, } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); binder_inner_proc_lock(proc); - binder_enqueue_thread_work_ilocked_nowake(thread, tcomplete); + /* + * Defer the TRANSACTION_COMPLETE, so we don't return to + * userspace immediately; this allows the target process to + * immediately start processing this transaction, reducing + * latency. We will then return the TRANSACTION_COMPLETE when + * the target replies (or there is an error). + */ + binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 6aaa3f81755b..c2ba811993d4 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -272,6 +272,7 @@ config SATA_SX4 config ATA_BMDMA bool "ATA BMDMA support" + depends on HAS_DMA default y help This option adds support for SFF ATA controllers with BMDMA @@ -318,6 +319,7 @@ config SATA_DWC_VDEBUG config SATA_HIGHBANK tristate "Calxeda Highbank SATA support" + depends on HAS_DMA depends on ARCH_HIGHBANK || COMPILE_TEST help This option enables support for the Calxeda Highbank SoC's @@ -327,6 +329,7 @@ config SATA_HIGHBANK config SATA_MV tristate "Marvell SATA support" + depends on HAS_DMA depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \ ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST select GENERIC_PHY diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 91a9e6af2ec4..75cced210b2a 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2245,8 +2245,8 @@ static void ata_eh_link_autopsy(struct ata_link *link) if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) eflags |= ATA_EFLAG_DUBIOUS_XFER; ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); + trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); } - trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); DPRINTK("EXIT\n"); } diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 433b60092972..e40f67b7d28b 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -1936,6 +1936,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) if (ret) { dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, ret); + of_node_put(np); goto free_table; } } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 55a8671f1979..80455f70ff79 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2736,7 +2736,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) * from the parent. */ page_count = (u32)calc_pages_for(0, length); - pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); + pages = ceph_alloc_page_vector(page_count, GFP_NOIO); if (IS_ERR(pages)) { result = PTR_ERR(pages); pages = NULL; @@ -2863,7 +2863,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request) */ size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32); page_count = (u32)calc_pages_for(0, size); - pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); + pages = ceph_alloc_page_vector(page_count, GFP_NOIO); if (IS_ERR(pages)) return PTR_ERR(pages); diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 33e23a7a691f..a295ad6a1674 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -1407,33 +1407,34 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, static void make_response(struct xen_blkif *blkif, u64 id, unsigned short op, int st) { - struct blkif_response resp; + struct blkif_response *resp; unsigned long flags; union blkif_back_rings *blk_rings = &blkif->blk_rings; int notify; - resp.id = id; - resp.operation = op; - resp.status = st; - spin_lock_irqsave(&blkif->blk_ring_lock, flags); /* Place on the response ring for the relevant domain. */ switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: - memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), - &resp, sizeof(resp)); + resp = RING_GET_RESPONSE(&blk_rings->native, + blk_rings->native.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_32: - memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), - &resp, sizeof(resp)); + resp = RING_GET_RESPONSE(&blk_rings->x86_32, + blk_rings->x86_32.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_64: - memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), - &resp, sizeof(resp)); + resp = RING_GET_RESPONSE(&blk_rings->x86_64, + blk_rings->x86_64.rsp_prod_pvt); break; default: BUG(); } + + resp->id = id; + resp->operation = op; + resp->status = st; + blk_rings->common.rsp_prod_pvt++; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index c929ae22764c..04cfee719334 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -74,9 +74,8 @@ extern unsigned int xen_blkif_max_ring_order; struct blkif_common_request { char dummy; }; -struct blkif_common_response { - char dummy; -}; + +/* i386 protocol version */ struct blkif_x86_32_request_rw { uint8_t nr_segments; /* number of segments */ @@ -128,14 +127,6 @@ struct blkif_x86_32_request { } u; } __attribute__((__packed__)); -/* i386 protocol version */ -#pragma pack(push, 4) -struct blkif_x86_32_response { - uint64_t id; /* copied from request */ - uint8_t operation; /* copied from request */ - int16_t status; /* BLKIF_RSP_??? */ -}; -#pragma pack(pop) /* x86_64 protocol version */ struct blkif_x86_64_request_rw { @@ -192,18 +183,12 @@ struct blkif_x86_64_request { } u; } __attribute__((__packed__)); -struct blkif_x86_64_response { - uint64_t __attribute__((__aligned__(8))) id; - uint8_t operation; /* copied from request */ - int16_t status; /* BLKIF_RSP_??? */ -}; - DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, - struct blkif_common_response); + struct blkif_response); DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, - struct blkif_x86_32_response); + struct blkif_response __packed); DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, - struct blkif_x86_64_response); + struct blkif_response); union blkif_back_rings { struct blkif_back_ring native; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 7bb8055bd10c..1ccad79ce77c 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2969,6 +2969,12 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_QCA_ROME) { data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; + + /* QCA Rome devices lose their updated firmware over suspend, + * but the USB hub doesn't notice any status change. + * Explicitly request a device reset on resume. + */ + set_bit(BTUSB_RESET_RESUME, &data->flags); } #ifdef CONFIG_BT_HCIBTUSB_RTL diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 25372dc381d4..5cb5e8ff0224 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -4029,7 +4029,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, } static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, - struct list_head *timeouts, long timeout_period, + struct list_head *timeouts, + unsigned long timeout_period, int slot, unsigned long *flags, unsigned int *waiting_msgs) { @@ -4042,8 +4043,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, if (!ent->inuse) return; - ent->timeout -= timeout_period; - if (ent->timeout > 0) { + if (timeout_period < ent->timeout) { + ent->timeout -= timeout_period; (*waiting_msgs)++; return; } @@ -4109,7 +4110,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, } } -static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period) +static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, + unsigned long timeout_period) { struct list_head timeouts; struct ipmi_recv_msg *msg, *msg2; diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index 2e14dfb588f4..7d060ffe8975 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -265,7 +265,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) /* Get configuration for the ATL instances */ snprintf(prop, sizeof(prop), "atl%u", i); - cfg_node = of_find_node_by_name(node, prop); + cfg_node = of_get_child_by_name(node, prop); if (cfg_node) { ret = of_property_read_u32(cfg_node, "bws", &cdesc->bws); @@ -278,6 +278,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) atl_write(cinfo, DRA7_ATL_AWSMUX_REG(i), cdesc->aws); } + of_node_put(cfg_node); } cdesc->probed = true; diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c index ffbfd1c11af9..81a9f9763915 100644 --- a/drivers/cpuidle/lpm-levels-of.c +++ b/drivers/cpuidle/lpm-levels-of.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -848,14 +848,12 @@ failed: void free_cluster_node(struct lpm_cluster *cluster) { - struct list_head *list; int i; + struct lpm_cluster *cl, *m; - list_for_each(list, &cluster->child) { - struct lpm_cluster *n; - n = list_entry(list, typeof(*n), list); - list_del(list); - free_cluster_node(n); + list_for_each_entry_safe(cl, m, &cluster->child, list) { + list_del(&cl->list); + free_cluster_node(cl); }; if (cluster->cpu) { diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index 72f138985e18..d83ab4bac8b1 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -80,11 +80,13 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, int ret; struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); pagefault_enable(); + preempt_enable(); ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); return ret; @@ -99,11 +101,13 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); enable_kernel_vsx(); aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); pagefault_enable(); + preempt_enable(); crypto_xor(keystream, src, nbytes); memcpy(dst, keystream, nbytes); @@ -132,6 +136,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); enable_kernel_vsx(); @@ -143,6 +148,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, &ctx->enc_key, walk.iv); pagefault_enable(); + preempt_enable(); /* We need to update IV mostly for last bytes/round */ inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE; diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index b8576fd6bd0e..1c7568c0055a 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -634,6 +634,7 @@ static int dmatest_func(void *data) * free it this time?" dancing. For now, just * leave it dangling. */ + WARN(1, "dmatest: Kernel stack may be corrupted!!\n"); dmaengine_unmap_put(um); result("test timed out", total_tests, src_off, dst_off, len, 0); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 8250950aab8b..66d84bcf9bbf 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1657,7 +1657,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i) static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) { struct pl330_thread *thrd = NULL; - unsigned long flags; int chans, i; if (pl330->state == DYING) @@ -1665,8 +1664,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) chans = pl330->pcfg.num_chan; - spin_lock_irqsave(&pl330->lock, flags); - for (i = 0; i < chans; i++) { thrd = &pl330->channels[i]; if ((thrd->free) && (!_manager_ns(thrd) || @@ -1684,8 +1681,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) thrd = NULL; } - spin_unlock_irqrestore(&pl330->lock, flags); - return thrd; } @@ -1703,7 +1698,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev) static void pl330_release_channel(struct pl330_thread *thrd) { struct pl330_dmac *pl330; - unsigned long flags; if (!thrd || thrd->free) return; @@ -1715,10 +1709,8 @@ static void pl330_release_channel(struct pl330_thread *thrd) pl330 = thrd->dmac; - spin_lock_irqsave(&pl330->lock, flags); _free_event(thrd, thrd->ev); thrd->free = true; - spin_unlock_irqrestore(&pl330->lock, flags); } /* Initialize the structure for PL330 configuration, that can be used @@ -2085,20 +2077,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) struct pl330_dmac *pl330 = pch->dmac; unsigned long flags; - spin_lock_irqsave(&pch->lock, flags); + spin_lock_irqsave(&pl330->lock, flags); dma_cookie_init(chan); pch->cyclic = false; pch->thread = pl330_request_channel(pl330); if (!pch->thread) { - spin_unlock_irqrestore(&pch->lock, flags); + spin_unlock_irqrestore(&pl330->lock, flags); return -ENOMEM; } tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); - spin_unlock_irqrestore(&pch->lock, flags); + spin_unlock_irqrestore(&pl330->lock, flags); return 1; } @@ -2201,12 +2193,13 @@ static int pl330_pause(struct dma_chan *chan) static void pl330_free_chan_resources(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); + struct pl330_dmac *pl330 = pch->dmac; unsigned long flags; tasklet_kill(&pch->task); pm_runtime_get_sync(pch->dmac->ddma.dev); - spin_lock_irqsave(&pch->lock, flags); + spin_lock_irqsave(&pl330->lock, flags); pl330_release_channel(pch->thread); pch->thread = NULL; @@ -2214,7 +2207,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan) if (pch->cyclic) list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); - spin_unlock_irqrestore(&pch->lock, flags); + spin_unlock_irqrestore(&pl330->lock, flags); pm_runtime_mark_last_busy(pch->dmac->ddma.dev); pm_runtime_put_autosuspend(pch->dmac->ddma.dev); } diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c index 245d759d5ffc..6059d81e701a 100644 --- a/drivers/dma/zx296702_dma.c +++ b/drivers/dma/zx296702_dma.c @@ -813,6 +813,7 @@ static int zx_dma_probe(struct platform_device *op) INIT_LIST_HEAD(&d->slave.channels); dma_cap_set(DMA_SLAVE, d->slave.cap_mask); dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); + dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); dma_cap_set(DMA_PRIVATE, d->slave.cap_mask); d->slave.dev = &op->dev; d->slave.device_free_chan_resources = zx_dma_free_chan_resources; diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index ca64b174f8a3..a4e1f6939c39 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -1773,6 +1773,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, break; case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: pvt->pci_ta = pdev; + break; case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS: pvt->pci_ras = pdev; break; diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c index 93c30a885740..aa2f6bb82b32 100644 --- a/drivers/extcon/extcon-palmas.c +++ b/drivers/extcon/extcon-palmas.c @@ -190,6 +190,11 @@ static int palmas_usb_probe(struct platform_device *pdev) struct palmas_usb *palmas_usb; int status; + if (!palmas) { + dev_err(&pdev->dev, "failed to get valid parent\n"); + return -EINVAL; + } + palmas_usb = devm_kzalloc(&pdev->dev, sizeof(*palmas_usb), GFP_KERNEL); if (!palmas_usb) return -ENOMEM; diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index c51f3b2fe3c0..20451c290233 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -327,38 +327,6 @@ u64 __init efi_mem_desc_end(efi_memory_desc_t *md) return end; } -/* - * We can't ioremap data in EFI boot services RAM, because we've already mapped - * it as RAM. So, look it up in the existing EFI memory map instead. Only - * callable after efi_enter_virtual_mode and before efi_free_boot_services. - */ -void __iomem *efi_lookup_mapped_addr(u64 phys_addr) -{ - struct efi_memory_map *map; - void *p; - map = efi.memmap; - if (!map) - return NULL; - if (WARN_ON(!map->map)) - return NULL; - for (p = map->map; p < map->map_end; p += map->desc_size) { - efi_memory_desc_t *md = p; - u64 size = md->num_pages << EFI_PAGE_SHIFT; - u64 end = md->phys_addr + size; - if (!(md->attribute & EFI_MEMORY_RUNTIME) && - md->type != EFI_BOOT_SERVICES_CODE && - md->type != EFI_BOOT_SERVICES_DATA) - continue; - if (!md->virt_addr) - continue; - if (phys_addr >= md->phys_addr && phys_addr < end) { - phys_addr += md->virt_addr - md->phys_addr; - return (__force void __iomem *)(unsigned long)phys_addr; - } - } - return NULL; -} - static __initdata efi_config_table_type_t common_tables[] = { {ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20}, {ACPI_TABLE_GUID, "ACPI", &efi.acpi}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index f4cae5357e40..3e90ddcbb24a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1575,34 +1575,32 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev) WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]); } -/* Atom needs data in little endian format - * so swap as appropriate when copying data to - * or from atom. Note that atom operates on - * dw units. +/* Atom needs data in little endian format so swap as appropriate when copying + * data to or from atom. Note that atom operates on dw units. + * + * Use to_le=true when sending data to atom and provide at least + * ALIGN(num_bytes,4) bytes in the dst buffer. + * + * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4) + * byes in the src buffer. */ void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) { #ifdef __BIG_ENDIAN - u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ - u32 *dst32, *src32; + u32 src_tmp[5], dst_tmp[5]; int i; + u8 align_num_bytes = ALIGN(num_bytes, 4); - memcpy(src_tmp, src, num_bytes); - src32 = (u32 *)src_tmp; - dst32 = (u32 *)dst_tmp; if (to_le) { - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = cpu_to_le32(src32[i]); - memcpy(dst, dst_tmp, num_bytes); + memcpy(src_tmp, src, num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = cpu_to_le32(src_tmp[i]); + memcpy(dst, dst_tmp, align_num_bytes); } else { - u8 dws = num_bytes & ~3; - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = le32_to_cpu(src32[i]); - memcpy(dst, dst_tmp, dws); - if (num_bytes % 4) { - for (i = 0; i < (num_bytes % 4); i++) - dst[dws+i] = dst_tmp[dws+i]; - } + memcpy(src_tmp, src, align_num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = le32_to_cpu(src_tmp[i]); + memcpy(dst, dst_tmp, num_bytes); } #else memcpy(dst, src, num_bytes); diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile index ffd673615772..26412d2f8c98 100644 --- a/drivers/gpu/drm/armada/Makefile +++ b/drivers/gpu/drm/armada/Makefile @@ -4,3 +4,5 @@ armada-y += armada_510.o armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o obj-$(CONFIG_DRM_ARMADA) := armada.o + +CFLAGS_armada_trace.o := -I$(src) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 7dd6728dd092..ccc2044af831 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -312,7 +312,7 @@ static int drm_minor_register(struct drm_device *dev, unsigned int type) ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root); if (ret) { DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); - return ret; + goto err_debugfs; } ret = device_add(minor->kdev); diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index dbf263d3511b..db1f2a738eb2 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -378,14 +378,12 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, BUG_ON(!hole_node->hole_follows || node->allocated); - if (adj_start < start) - adj_start = start; - if (adj_end > end) - adj_end = end; - if (mm->color_adjust) mm->color_adjust(hole_node, color, &adj_start, &adj_end); + adj_start = max(adj_start, start); + adj_end = min(adj_end, end); + if (flags & DRM_MM_CREATE_TOP) adj_start = adj_end - size; @@ -657,17 +655,15 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_ adj_end = drm_mm_hole_node_end(entry); hole_size = adj_end - adj_start; - if (adj_start < start) - adj_start = start; - if (adj_end > end) - adj_end = end; - if (mm->color_adjust) { mm->color_adjust(entry, color, &adj_start, &adj_end); if (adj_end <= adj_start) continue; } + adj_start = max(adj_start, start); + adj_end = min(adj_end, end); + if (!check_free_hole(adj_start, adj_end, size, alignment)) continue; diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index fbe1b3174f75..34cebcdc2fc4 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -180,6 +180,8 @@ static void decon_commit(struct exynos_drm_crtc *crtc) /* enable output and display signal */ decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID | VIDCON0_ENVID_F, ~0); + + decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); } static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5044f2257e89..6fca39e1c419 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3475,11 +3475,6 @@ static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) return VGACNTRL; } -static inline void __user *to_user_ptr(u64 address) -{ - return (void __user *)(uintptr_t)address; -} - static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) { unsigned long j = msecs_to_jiffies(m); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f56af0aaafde..659b90657f36 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -324,7 +324,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; void *vaddr = obj->phys_handle->vaddr + args->offset; - char __user *user_data = to_user_ptr(args->data_ptr); + char __user *user_data = u64_to_user_ptr(args->data_ptr); int ret = 0; /* We manually control the domain here and pretend that it @@ -605,7 +605,7 @@ i915_gem_shmem_pread(struct drm_device *dev, int needs_clflush = 0; struct sg_page_iter sg_iter; - user_data = to_user_ptr(args->data_ptr); + user_data = u64_to_user_ptr(args->data_ptr); remain = args->size; obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); @@ -692,7 +692,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, return 0; if (!access_ok(VERIFY_WRITE, - to_user_ptr(args->data_ptr), + u64_to_user_ptr(args->data_ptr), args->size)) return -EFAULT; @@ -783,7 +783,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, if (ret) goto out_unpin; - user_data = to_user_ptr(args->data_ptr); + user_data = u64_to_user_ptr(args->data_ptr); remain = args->size; offset = i915_gem_obj_ggtt_offset(obj) + args->offset; @@ -907,7 +907,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, int needs_clflush_before = 0; struct sg_page_iter sg_iter; - user_data = to_user_ptr(args->data_ptr); + user_data = u64_to_user_ptr(args->data_ptr); remain = args->size; obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); @@ -1036,12 +1036,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, return 0; if (!access_ok(VERIFY_READ, - to_user_ptr(args->data_ptr), + u64_to_user_ptr(args->data_ptr), args->size)) return -EFAULT; if (likely(!i915.prefault_disable)) { - ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), + ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr), args->size); if (ret) return -EFAULT; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 201947b4377c..8800f410b2d2 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -492,7 +492,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; int remain, ret; - user_relocs = to_user_ptr(entry->relocs_ptr); + user_relocs = u64_to_user_ptr(entry->relocs_ptr); remain = entry->relocation_count; while (remain) { @@ -831,7 +831,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, u64 invalid_offset = (u64)-1; int j; - user_relocs = to_user_ptr(exec[i].relocs_ptr); + user_relocs = u64_to_user_ptr(exec[i].relocs_ptr); if (copy_from_user(reloc+total, user_relocs, exec[i].relocation_count * sizeof(*reloc))) { @@ -975,7 +975,7 @@ validate_exec_list(struct drm_device *dev, invalid_flags |= EXEC_OBJECT_NEEDS_GTT; for (i = 0; i < count; i++) { - char __user *ptr = to_user_ptr(exec[i].relocs_ptr); + char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr); int length; /* limited by fault_in_pages_readable() */ if (exec[i].flags & invalid_flags) @@ -1633,7 +1633,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, return -ENOMEM; } ret = copy_from_user(exec_list, - to_user_ptr(args->buffers_ptr), + u64_to_user_ptr(args->buffers_ptr), sizeof(*exec_list) * args->buffer_count); if (ret != 0) { DRM_DEBUG("copy %d exec entries failed %d\n", @@ -1669,7 +1669,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); if (!ret) { struct drm_i915_gem_exec_object __user *user_exec_list = - to_user_ptr(args->buffers_ptr); + u64_to_user_ptr(args->buffers_ptr); /* Copy the new buffer offsets back to the user's exec list. */ for (i = 0; i < args->buffer_count; i++) { @@ -1721,7 +1721,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, return -ENOMEM; } ret = copy_from_user(exec2_list, - to_user_ptr(args->buffers_ptr), + u64_to_user_ptr(args->buffers_ptr), sizeof(*exec2_list) * args->buffer_count); if (ret != 0) { DRM_DEBUG("copy %d exec entries failed %d\n", @@ -1734,7 +1734,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ struct drm_i915_gem_exec_object2 __user *user_exec_list = - to_user_ptr(args->buffers_ptr); + u64_to_user_ptr(args->buffers_ptr); int i; for (i = 0; i < args->buffer_count; i++) { diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index f3bee54c414f..cb4313c68f71 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -440,7 +440,9 @@ static bool gmbus_is_index_read(struct i2c_msg *msgs, int i, int num) { return (i + 1 < num && - !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 && + msgs[i].addr == msgs[i + 1].addr && + !(msgs[i].flags & I2C_M_RD) && + (msgs[i].len == 1 || msgs[i].len == 2) && (msgs[i + 1].flags & I2C_M_RD)); } diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index b1a0f5656175..44df959cbadb 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -145,6 +145,8 @@ static int mga_vram_init(struct mga_device *mdev) } mem = pci_iomap(mdev->dev->pdev, 0, 0); + if (!mem) + return -ENOMEM; mdev->mc.vram_size = mga_probe_vram(mdev, mem); diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index a11ec7b82b80..2e528b112e1f 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -29,11 +29,6 @@ #define BO_LOCKED 0x4000 #define BO_PINNED 0x2000 -static inline void __user *to_user_ptr(u64 address) -{ - return (void __user *)(uintptr_t)address; -} - static struct msm_gem_submit *submit_create(struct drm_device *dev, struct msm_gem_address_space *aspace, uint32_t nr_bos, uint32_t nr_cmds, @@ -107,7 +102,7 @@ static int submit_lookup_objects(struct msm_gpu *gpu, struct drm_gem_object *obj; struct msm_gem_object *msm_obj; void __user *userptr = - to_user_ptr(args->bos + (i * sizeof(submit_bo))); + u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) { @@ -362,7 +357,7 @@ static int submit_reloc(struct msm_gpu *gpu, for (i = 0; i < nr_relocs; i++) { struct drm_msm_gem_submit_reloc submit_reloc; void __user *userptr = - to_user_ptr(relocs + (i * sizeof(submit_reloc))); + u64_to_user_ptr(relocs + (i * sizeof(submit_reloc))); uint64_t iova; uint32_t off; bool valid; @@ -473,7 +468,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, for (i = 0; i < args->nr_cmds; i++) { struct drm_msm_gem_submit_cmd submit_cmd; void __user *userptr = - to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); + u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); struct msm_gem_object *msm_obj; uint64_t iova; size_t size; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c index 270e79a774b2..46e2a13cecc4 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c @@ -21,6 +21,9 @@ (0x40 + (((lm) - LM_0) * 0x004)) #define CTL_LAYER_EXT2(lm) \ (0x70 + (((lm) - LM_0) * 0x004)) +#define CTL_LAYER_EXT3(lm) \ + (0xA0 + (((lm) - LM_0) * 0x004)) + #define CTL_TOP 0x014 #define CTL_FLUSH 0x018 #define CTL_START 0x01C @@ -315,8 +318,12 @@ static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx) int i; for (i = 0; i < ctx->mixer_count; i++) { - SDE_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0); - SDE_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0); + int mixer_id = ctx->mixer_hw_caps[i].id; + + SDE_REG_WRITE(c, CTL_LAYER(mixer_id), 0); + SDE_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0); + SDE_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0); + SDE_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0); } } diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c index ab65283ceafc..a26188f9e8e9 100644 --- a/drivers/gpu/drm/msm/sde_power_handle.c +++ b/drivers/gpu/drm/msm/sde_power_handle.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -550,12 +550,13 @@ static int sde_power_reg_bus_update(u32 reg_bus_hdl, u32 usecase_ndx) } #else static int sde_power_data_bus_parse(struct platform_device *pdev, - struct sde_power_handle *phandle) + struct sde_power_data_bus_handle *pdbus) { return 0; } -static void sde_power_data_bus_unregister(u32 reg_bus_hdl) +static void sde_power_data_bus_unregister( + struct sde_power_data_bus_handle *pdbus) { } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index f97b73ec4713..f418c002d323 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -352,6 +352,7 @@ static int panel_simple_remove(struct device *dev) drm_panel_remove(&panel->base); panel_simple_disable(&panel->base); + panel_simple_unprepare(&panel->base); if (panel->ddc) put_device(&panel->ddc->dev); @@ -367,6 +368,7 @@ static void panel_simple_shutdown(struct device *dev) struct panel_simple *panel = dev_get_drvdata(dev); panel_simple_disable(&panel->base); + panel_simple_unprepare(&panel->base); } static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = { diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index b5760851195c..0c6216a6ee9e 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -45,34 +45,32 @@ static char *pre_emph_names[] = { /***** radeon AUX functions *****/ -/* Atom needs data in little endian format - * so swap as appropriate when copying data to - * or from atom. Note that atom operates on - * dw units. +/* Atom needs data in little endian format so swap as appropriate when copying + * data to or from atom. Note that atom operates on dw units. + * + * Use to_le=true when sending data to atom and provide at least + * ALIGN(num_bytes,4) bytes in the dst buffer. + * + * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4) + * byes in the src buffer. */ void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) { #ifdef __BIG_ENDIAN - u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ - u32 *dst32, *src32; + u32 src_tmp[5], dst_tmp[5]; int i; + u8 align_num_bytes = ALIGN(num_bytes, 4); - memcpy(src_tmp, src, num_bytes); - src32 = (u32 *)src_tmp; - dst32 = (u32 *)dst_tmp; if (to_le) { - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = cpu_to_le32(src32[i]); - memcpy(dst, dst_tmp, num_bytes); + memcpy(src_tmp, src, num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = cpu_to_le32(src_tmp[i]); + memcpy(dst, dst_tmp, align_num_bytes); } else { - u8 dws = num_bytes & ~3; - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = le32_to_cpu(src32[i]); - memcpy(dst, dst_tmp, dws); - if (num_bytes % 4) { - for (i = 0; i < (num_bytes % 4); i++) - dst[dws+i] = dst_tmp[dws+i]; - } + memcpy(src_tmp, src, align_num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = le32_to_cpu(src_tmp[i]); + memcpy(dst, dst_tmp, num_bytes); } #else memcpy(dst, src, num_bytes); diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 26da2f4d7b4f..a2937a693591 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -226,7 +226,6 @@ static int radeonfb_create(struct drm_fb_helper *helper, } info->par = rfbdev; - info->skip_vt_switch = true; ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index d56630c60039..117a2f52fb4e 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c @@ -346,6 +346,10 @@ static int vtg_probe(struct platform_device *pdev) return -ENOMEM; } vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); + if (!vtg->regs) { + DRM_ERROR("failed to remap I/O memory\n"); + return -ENOMEM; + } np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0); if (np) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index f3f31f995878..be3971b22a02 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -708,7 +708,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) * allocation taken by fbdev */ if (!(dev_priv->capabilities & SVGA_CAP_3D)) - mem_size *= 2; + mem_size *= 3; dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; dev_priv->prim_bb_mem = diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c index fe89b6823217..263e97235ea0 100644 --- a/drivers/iio/light/cm3232.c +++ b/drivers/iio/light/cm3232.c @@ -119,7 +119,7 @@ static int cm3232_reg_init(struct cm3232_chip *chip) if (ret < 0) dev_err(&chip->client->dev, "Error writing reg_cmd\n"); - return 0; + return ret; } /** diff --git a/drivers/iio/trigger/iio-trig-interrupt.c b/drivers/iio/trigger/iio-trig-interrupt.c index 572bc6f02ca8..e18f12b74610 100644 --- a/drivers/iio/trigger/iio-trig-interrupt.c +++ b/drivers/iio/trigger/iio-trig-interrupt.c @@ -58,7 +58,7 @@ static int iio_interrupt_trigger_probe(struct platform_device *pdev) trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL); if (!trig_info) { ret = -ENOMEM; - goto error_put_trigger; + goto error_free_trigger; } iio_trigger_set_drvdata(trig, trig_info); trig_info->irq = irq; @@ -83,8 +83,8 @@ error_release_irq: free_irq(irq, trig); error_free_trig_info: kfree(trig_info); -error_put_trigger: - iio_trigger_put(trig); +error_free_trigger: + iio_trigger_free(trig); error_ret: return ret; } @@ -99,7 +99,7 @@ static int iio_interrupt_trigger_remove(struct platform_device *pdev) iio_trigger_unregister(trig); free_irq(trig_info->irq, trig); kfree(trig_info); - iio_trigger_put(trig); + iio_trigger_free(trig); return 0; } diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c index 3dfab2bc6d69..202e8b89caf2 100644 --- a/drivers/iio/trigger/iio-trig-sysfs.c +++ b/drivers/iio/trigger/iio-trig-sysfs.c @@ -174,7 +174,7 @@ static int iio_sysfs_trigger_probe(int id) return 0; out2: - iio_trigger_put(t->trig); + iio_trigger_free(t->trig); free_t: kfree(t); out1: diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 2018d24344de..f74b11542603 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1373,7 +1373,7 @@ static void ipoib_cm_tx_reap(struct work_struct *work) while (!list_empty(&priv->cm.reap_list)) { p = list_entry(priv->cm.reap_list.next, typeof(*p), list); - list_del(&p->list); + list_del_init(&p->list); spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); ipoib_cm_tx_destroy(p); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index e397f1b0af09..9a99cee2665a 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -670,12 +670,19 @@ static void srp_path_rec_completion(int status, static int srp_lookup_path(struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; - int ret; + int ret = -ENODEV; ch->path.numb_path = 1; init_completion(&ch->done); + /* + * Avoid that the SCSI host can be removed by srp_remove_target() + * before srp_path_rec_completion() is called. + */ + if (!scsi_host_get(target->scsi_host)) + goto out; + ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client, target->srp_host->srp_dev->dev, target->srp_host->port, @@ -689,18 +696,24 @@ static int srp_lookup_path(struct srp_rdma_ch *ch) GFP_KERNEL, srp_path_rec_completion, ch, &ch->path_query); - if (ch->path_query_id < 0) - return ch->path_query_id; + ret = ch->path_query_id; + if (ret < 0) + goto put; ret = wait_for_completion_interruptible(&ch->done); if (ret < 0) - return ret; + goto put; - if (ch->status < 0) + ret = ch->status; + if (ret < 0) shost_printk(KERN_WARNING, target->scsi_host, PFX "Path record query failed\n"); - return ch->status; +put: + scsi_host_put(target->scsi_host); + +out: + return ret; } static int srp_send_req(struct srp_rdma_ch *ch, bool multich) diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index eaabf3125846..c52131233ba7 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -3425,7 +3425,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) { const char *p; unsigned len, count, leading_zero_bytes; - int ret, rc; + int ret; p = name; if (strncasecmp(p, "0x", 2) == 0) @@ -3437,10 +3437,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) count = min(len / 2, 16U); leading_zero_bytes = 16 - count; memset(i_port_id, 0, leading_zero_bytes); - rc = hex2bin(i_port_id + leading_zero_bytes, p, count); - if (rc < 0) - pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc); - ret = 0; + ret = hex2bin(i_port_id + leading_zero_bytes, p, count); + if (ret < 0) + pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret); out: return ret; } diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c index 0fd612dd76ed..aaf43befffaa 100644 --- a/drivers/input/keyboard/mpr121_touchkey.c +++ b/drivers/input/keyboard/mpr121_touchkey.c @@ -87,7 +87,8 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id) struct mpr121_touchkey *mpr121 = dev_id; struct i2c_client *client = mpr121->client; struct input_dev *input = mpr121->input_dev; - unsigned int key_num, key_val, pressed; + unsigned long bit_changed; + unsigned int key_num; int reg; reg = i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_1_ADDR); @@ -105,18 +106,22 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id) reg &= TOUCH_STATUS_MASK; /* use old press bit to figure out which bit changed */ - key_num = ffs(reg ^ mpr121->statusbits) - 1; - pressed = reg & (1 << key_num); + bit_changed = reg ^ mpr121->statusbits; mpr121->statusbits = reg; + for_each_set_bit(key_num, &bit_changed, mpr121->keycount) { + unsigned int key_val, pressed; - key_val = mpr121->keycodes[key_num]; + pressed = reg & BIT(key_num); + key_val = mpr121->keycodes[key_num]; - input_event(input, EV_MSC, MSC_SCAN, key_num); - input_report_key(input, key_val, pressed); - input_sync(input); + input_event(input, EV_MSC, MSC_SCAN, key_num); + input_report_key(input, key_val, pressed); + + dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val, + pressed ? "pressed" : "released"); - dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val, - pressed ? "pressed" : "released"); + } + input_sync(input); out: return IRQ_HANDLED; @@ -231,6 +236,7 @@ static int mpr_touchkey_probe(struct i2c_client *client, input_dev->id.bustype = BUS_I2C; input_dev->dev.parent = &client->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); + input_set_capability(input_dev, EV_MSC, MSC_SCAN); input_dev->keycode = mpr121->keycodes; input_dev->keycodesize = sizeof(mpr121->keycodes[0]); diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index f4e8fbec6a94..b5304e264881 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf) return NULL; } - while (buflen > 0) { + while (buflen >= sizeof(*union_desc)) { union_desc = (struct usb_cdc_union_desc *)buf; + if (union_desc->bLength > buflen) { + dev_err(&intf->dev, "Too large descriptor\n"); + return NULL; + } + if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE && union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) { dev_dbg(&intf->dev, "Found union header\n"); - return union_desc; + + if (union_desc->bLength >= sizeof(*union_desc)) + return union_desc; + + dev_err(&intf->dev, + "Union descriptor to short (%d vs %zd\n)", + union_desc->bLength, sizeof(*union_desc)); + return NULL; } buflen -= union_desc->bLength; diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index b8c50d883b2c..c9d491bc85e0 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1240,6 +1240,7 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0605", 0 }, { "ELAN0609", 0 }, { "ELAN060B", 0 }, + { "ELAN060C", 0 }, { "ELAN0611", 0 }, { "ELAN1000", 0 }, { } diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 00df3832faab..64f1eb8fdcbc 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1033,13 +1033,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, } } - /* Nuke the existing Config, as we're going to rewrite it */ - val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT); - - if (ste->valid) - val |= STRTAB_STE_0_V; - else - val &= ~STRTAB_STE_0_V; + /* Nuke the existing STE_0 value, as we're going to rewrite it */ + val = ste->valid ? STRTAB_STE_0_V : 0; if (ste->bypass) { val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT @@ -1068,7 +1063,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK << STRTAB_STE_0_S1CTXPTR_SHIFT) | STRTAB_STE_0_CFG_S1_TRANS; - } if (ste->s2_cfg) { diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c index 1d84ec4687e2..461693ef9d27 100644 --- a/drivers/leds/leds-qpnp-wled.c +++ b/drivers/leds/leds-qpnp-wled.c @@ -210,6 +210,7 @@ #define QPNP_WLED_SEC_ACCESS_REG(b) (b + 0xD0) #define QPNP_WLED_SEC_UNLOCK 0xA5 +#define NUM_DDIC_CODES 256 #define QPNP_WLED_MAX_STRINGS 4 #define QPNP_PM660_WLED_MAX_STRINGS 3 #define WLED_MAX_LEVEL_4095 4095 @@ -315,6 +316,7 @@ static struct wled_vref_setting vref_setting_pmi8998 = { * @ cdev - led class device * @ pdev - platform device * @ work - worker for led operation + * @ wq - workqueue for setting brightness level * @ lock - mutex lock for exclusive access * @ fdbk_op - output feedback mode * @ dim_mode - dimming mode @@ -340,6 +342,10 @@ static struct wled_vref_setting vref_setting_pmi8998 = { * @ ramp_ms - delay between ramp steps in ms * @ ramp_step - ramp step size * @ cons_sync_write_delay_us - delay between two consecutive writes to SYNC + * @ auto_calibration_ovp_count - OVP fault irq count to run auto calibration + * @ max_strings - Number of strings supported in WLED peripheral + * @ prev_level - Previous brightness level + * @ brt_map_table - Brightness map table * @ strings - supported list of strings * @ num_strings - number of strings * @ loop_auto_gm_thresh - the clamping level for auto gm @@ -353,6 +359,13 @@ static struct wled_vref_setting vref_setting_pmi8998 = { * @ en_cabc - enable or disable cabc * @ disp_type_amoled - type of display: LCD/AMOLED * @ en_ext_pfet_sc_pro - enable sc protection on external pfet + * @ prev_state - previous state of WLED + * @ stepper_en - Flag to enable stepper algorithm + * @ ovp_irq_disabled - OVP interrupt disable status + * @ auto_calib_enabled - Flag to enable auto calibration feature + * @ auto_calib_done - Flag to indicate auto calibration is done + * @ module_dis_perm - Flat to keep module permanently disabled + * @ start_ovp_fault_time - Time when the OVP fault first occurred */ struct qpnp_wled { struct led_classdev cdev; @@ -360,6 +373,7 @@ struct qpnp_wled { struct regmap *regmap; struct pmic_revid_data *pmic_rev_id; struct work_struct work; + struct workqueue_struct *wq; struct mutex lock; struct mutex bus_lock; enum qpnp_wled_fdbk_op fdbk_op; @@ -388,6 +402,8 @@ struct qpnp_wled { u16 cons_sync_write_delay_us; u16 auto_calibration_ovp_count; u16 max_strings; + u16 prev_level; + u16 *brt_map_table; u8 strings[QPNP_WLED_MAX_STRINGS]; u8 num_strings; u8 loop_auto_gm_thresh; @@ -402,6 +418,7 @@ struct qpnp_wled { bool disp_type_amoled; bool en_ext_pfet_sc_pro; bool prev_state; + bool stepper_en; bool ovp_irq_disabled; bool auto_calib_enabled; bool auto_calib_done; @@ -409,6 +426,21 @@ struct qpnp_wled { ktime_t start_ovp_fault_time; }; +static int qpnp_wled_step_delay_us = 52000; +module_param_named( + total_step_delay_us, qpnp_wled_step_delay_us, int, 0600 +); + +static int qpnp_wled_step_size_threshold = 3; +module_param_named( + step_size_threshold, qpnp_wled_step_size_threshold, int, 0600 +); + +static int qpnp_wled_step_delay_gain = 2; +module_param_named( + step_delay_gain, qpnp_wled_step_delay_gain, int, 0600 +); + /* helper to read a pmic register */ static int qpnp_wled_read_reg(struct qpnp_wled *wled, u16 addr, u8 *data) { @@ -570,6 +602,93 @@ static int qpnp_wled_set_level(struct qpnp_wled *wled, int level) return rc; } + pr_debug("level:%d\n", level); + return 0; +} + +static int qpnp_wled_set_map_level(struct qpnp_wled *wled, int level) +{ + int rc, i; + + if (level < wled->prev_level) { + for (i = wled->prev_level; i >= level; i--) { + rc = qpnp_wled_set_level(wled, wled->brt_map_table[i]); + if (rc < 0) { + pr_err("set brightness level failed, rc:%d\n", + rc); + return rc; + } + } + } else if (level > wled->prev_level) { + for (i = wled->prev_level; i <= level; i++) { + rc = qpnp_wled_set_level(wled, wled->brt_map_table[i]); + if (rc < 0) { + pr_err("set brightness level failed, rc:%d\n", + rc); + return rc; + } + } + } + + return 0; +} + +static int qpnp_wled_set_step_level(struct qpnp_wled *wled, int new_level) +{ + int rc, i, num_steps, delay_us; + u16 level, start_level, end_level, step_size; + bool level_inc = false; + + level = wled->prev_level; + start_level = wled->brt_map_table[level]; + end_level = wled->brt_map_table[new_level]; + level_inc = (new_level > level); + + num_steps = abs(start_level - end_level); + if (!num_steps) + return 0; + + delay_us = qpnp_wled_step_delay_us / num_steps; + pr_debug("level goes from [%d %d] num_steps: %d, delay: %d\n", + start_level, end_level, num_steps, delay_us); + + if (delay_us < 500) { + step_size = 1000 / delay_us; + num_steps = num_steps / step_size; + delay_us = 1000; + } else { + if (num_steps < qpnp_wled_step_size_threshold) + delay_us *= qpnp_wled_step_delay_gain; + + step_size = 1; + } + + i = start_level; + while (num_steps--) { + if (level_inc) + i += step_size; + else + i -= step_size; + + rc = qpnp_wled_set_level(wled, i); + if (rc < 0) + return rc; + + if (delay_us > 0) { + if (delay_us < 20000) + usleep_range(delay_us, delay_us + 1); + else + msleep(delay_us / USEC_PER_MSEC); + } + } + + if (i != end_level) { + i = end_level; + rc = qpnp_wled_set_level(wled, i); + if (rc < 0) + return rc; + } + return 0; } @@ -942,15 +1061,33 @@ static struct device_attribute qpnp_wled_attrs[] = { static void qpnp_wled_work(struct work_struct *work) { struct qpnp_wled *wled; - int level, rc; + int level, level_255, rc; wled = container_of(work, struct qpnp_wled, work); + mutex_lock(&wled->lock); level = wled->cdev.brightness; - mutex_lock(&wled->lock); + if (wled->brt_map_table) { + /* + * Change the 12 bit level to 8 bit level and use the mapped + * values for 12 bit level from brightness map table. + */ + level_255 = DIV_ROUND_CLOSEST(level, 16); + if (level_255 > 255) + level_255 = 255; - if (level) { + pr_debug("level: %d level_255: %d\n", level, level_255); + if (wled->stepper_en) + rc = qpnp_wled_set_step_level(wled, level_255); + else + rc = qpnp_wled_set_map_level(wled, level_255); + if (rc) { + dev_err(&wled->pdev->dev, "wled set level failed\n"); + goto unlock_mutex; + } + wled->prev_level = level_255; + } else if (level) { rc = qpnp_wled_set_level(wled, level); if (rc) { dev_err(&wled->pdev->dev, "wled set level failed\n"); @@ -1009,7 +1146,7 @@ static void qpnp_wled_set(struct led_classdev *led_cdev, level = wled->cdev.max_brightness; wled->cdev.brightness = level; - schedule_work(&wled->work); + queue_work(wled->wq, &wled->work); } static int qpnp_wled_set_disp(struct qpnp_wled *wled, u16 base_addr) @@ -2115,7 +2252,7 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled) struct property *prop; const char *temp_str; u32 temp_val; - int rc, i; + int rc, i, size; u8 *strings; wled->cdev.name = "wled"; @@ -2134,6 +2271,45 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled) return rc; } + if (of_find_property(pdev->dev.of_node, "qcom,wled-brightness-map", + NULL)) { + size = of_property_count_elems_of_size(pdev->dev.of_node, + "qcom,wled-brightness-map", sizeof(u16)); + if (size != NUM_DDIC_CODES) { + pr_err("Invalid WLED brightness map size:%d\n", size); + return rc; + } + + wled->brt_map_table = devm_kcalloc(&pdev->dev, NUM_DDIC_CODES, + sizeof(u16), GFP_KERNEL); + if (!wled->brt_map_table) + return -ENOMEM; + + rc = of_property_read_u16_array(pdev->dev.of_node, + "qcom,wled-brightness-map", wled->brt_map_table, + NUM_DDIC_CODES); + if (rc < 0) { + pr_err("Error in reading WLED brightness map, rc=%d\n", + rc); + return rc; + } + + for (i = 0; i < NUM_DDIC_CODES; i++) { + if (wled->brt_map_table[i] > WLED_MAX_LEVEL_4095) { + pr_err("WLED brightness map not in range\n"); + return -EDOM; + } + + if ((i > 1) && wled->brt_map_table[i] + < wled->brt_map_table[i - 1]) { + pr_err("WLED brightness map not in ascending order?\n"); + return -EDOM; + } + } + } + + wled->stepper_en = of_property_read_bool(pdev->dev.of_node, + "qcom,wled-stepper-en"); wled->disp_type_amoled = of_property_read_bool(pdev->dev.of_node, "qcom,disp-type-amoled"); if (wled->disp_type_amoled) { @@ -2469,6 +2645,7 @@ static int qpnp_wled_probe(struct platform_device *pdev) } wled->pmic_rev_id = get_revid_data(revid_node); + of_node_put(revid_node); if (IS_ERR_OR_NULL(wled->pmic_rev_id)) { pr_err("Unable to get pmic_revid rc=%ld\n", PTR_ERR(wled->pmic_rev_id)); @@ -2483,6 +2660,12 @@ static int qpnp_wled_probe(struct platform_device *pdev) pr_debug("PMIC subtype %d Digital major %d\n", wled->pmic_rev_id->pmic_subtype, wled->pmic_rev_id->rev4); + wled->wq = alloc_ordered_workqueue("qpnp_wled_wq", WQ_HIGHPRI); + if (!wled->wq) { + pr_err("Unable to alloc workqueue for WLED\n"); + return -ENOMEM; + } + prop = of_get_address_by_name(pdev->dev.of_node, QPNP_WLED_SINK_BASE, NULL, NULL); if (!prop) { @@ -2548,6 +2731,7 @@ sysfs_fail: led_classdev_unregister(&wled->cdev); wled_register_fail: cancel_work_sync(&wled->work); + destroy_workqueue(wled->wq); mutex_destroy(&wled->lock); return rc; } @@ -2563,6 +2747,7 @@ static int qpnp_wled_remove(struct platform_device *pdev) led_classdev_unregister(&wled->cdev); cancel_work_sync(&wled->work); + destroy_workqueue(wled->wq); mutex_destroy(&wled->lock); return 0; diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 8eeab72b93e2..4d46f2ce606f 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -406,7 +406,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) finish_wait(&ca->set->bucket_wait, &w); out: - wake_up_process(ca->alloc_thread); + if (ca->alloc_thread) + wake_up_process(ca->alloc_thread); trace_bcache_alloc(ca, reserve); @@ -478,7 +479,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, if (b == -1) goto err; - k->ptr[i] = PTR(ca->buckets[b].gen, + k->ptr[i] = MAKE_PTR(ca->buckets[b].gen, bucket_to_sector(c, b), ca->sb.nr_this_dev); diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index 243de0bf15cd..4bf15182c4da 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -584,7 +584,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey return false; for (i = 0; i < KEY_PTRS(l); i++) - if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || + if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) return false; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 29eba7219b01..6ed066a0e7c0 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -508,7 +508,7 @@ static void journal_reclaim(struct cache_set *c) continue; ja->cur_idx = next; - k->ptr[n++] = PTR(0, + k->ptr[n++] = MAKE_PTR(0, bucket_to_sector(c, ca->sb.d[ja->cur_idx]), ca->sb.nr_this_dev); } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 0ee41fd9d850..1445aab270f4 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -708,7 +708,14 @@ static void cached_dev_read_error(struct closure *cl) struct search *s = container_of(cl, struct search, cl); struct bio *bio = &s->bio.bio; - if (s->recoverable) { + /* + * If read request hit dirty data (s->read_dirty_data is true), + * then recovery a failed read request from cached device may + * get a stale data back. So read failure recovery is only + * permitted when read request hit clean data in cache device, + * or when cache read race happened. + */ + if (s->recoverable && !s->read_dirty_data) { /* Retry from the backing device: */ trace_bcache_read_retry(s->orig_bio); diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index cdceefd0e57d..2ec7f90e3455 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -928,7 +928,8 @@ static void __get_memory_limit(struct dm_bufio_client *c, buffers = c->minimum_buffers; *limit_buffers = buffers; - *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100; + *threshold_buffers = mult_frac(buffers, + DM_BUFIO_WRITEBACK_PERCENT, 100); } /* @@ -1829,19 +1830,15 @@ static int __init dm_bufio_init(void) memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); - mem = (__u64)((totalram_pages - totalhigh_pages) * - DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT; + mem = (__u64)mult_frac(totalram_pages - totalhigh_pages, + DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; if (mem > ULONG_MAX) mem = ULONG_MAX; #ifdef CONFIG_MMU - /* - * Get the size of vmalloc space the same way as VMALLOC_TOTAL - * in fs/proc/internal.h - */ - if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100) - mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100; + if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100)) + mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100); #endif dm_bufio_default_cache_size = mem; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 47ac131099d9..f7f560f5f056 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -3517,11 +3517,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj) md = container_of(kobj, struct mapped_device, kobj_holder.kobj); - if (test_bit(DMF_FREEING, &md->flags) || - dm_deleting_md(md)) - return NULL; - + spin_lock(&_minor_lock); + if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { + md = NULL; + goto out; + } dm_get(md); +out: + spin_unlock(&_minor_lock); + return md; } diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index 01adcdc52346..a9e2722f5e22 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -2856,6 +2856,9 @@ static int adv76xx_parse_dt(struct adv76xx_state *state) state->pdata.alt_data_sat = 1; state->pdata.op_format_mode_sel = ADV7604_OP_FORMAT_MODE0; state->pdata.bus_order = ADV7604_BUS_ORDER_RGB; + state->pdata.dr_str_data = ADV76XX_DR_STR_MEDIUM_HIGH; + state->pdata.dr_str_clk = ADV76XX_DR_STR_MEDIUM_HIGH; + state->pdata.dr_str_sync = ADV76XX_DR_STR_MEDIUM_HIGH; return 0; } diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c index 6a969401e950..60532929a916 100644 --- a/drivers/media/platform/msm/camera_v2/msm.c +++ b/drivers/media/platform/msm/camera_v2/msm.c @@ -1288,7 +1288,7 @@ static ssize_t write_logsync(struct file *file, const char __user *buf, uint64_t seq_num = 0; int ret; - if (copy_from_user(lbuf, buf, sizeof(lbuf))) + if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1)) return -EFAULT; ret = sscanf(lbuf, "%llu", &seq_num); diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index eb9e7feb9b13..7a16e9ea041c 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -2419,6 +2419,11 @@ static int imon_probe(struct usb_interface *interface, mutex_lock(&driver_lock); first_if = usb_ifnum_to_if(usbdev, 0); + if (!first_if) { + ret = -ENODEV; + goto fail; + } + first_if_ctx = usb_get_intfdata(first_if); if (ifnum == 0) { diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c index efc21b1da211..ca107033e429 100644 --- a/drivers/media/rc/ir-lirc-codec.c +++ b/drivers/media/rc/ir-lirc-codec.c @@ -286,11 +286,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd, if (!dev->max_timeout) return -ENOSYS; + /* Check for multiply overflow */ + if (val > U32_MAX / 1000) + return -EINVAL; + tmp = val * 1000; - if (tmp < dev->min_timeout || - tmp > dev->max_timeout) - return -EINVAL; + if (tmp < dev->min_timeout || tmp > dev->max_timeout) + return -EINVAL; dev->timeout = tmp; break; diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c index 07d08c49f4d4..b2e16bb67572 100644 --- a/drivers/media/usb/as102/as102_fw.c +++ b/drivers/media/usb/as102/as102_fw.c @@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, unsigned char *cmd, const struct firmware *firmware) { - struct as10x_fw_pkt_t fw_pkt; + struct as10x_fw_pkt_t *fw_pkt; int total_read_bytes = 0, errno = 0; unsigned char addr_has_changed = 0; + fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL); + if (!fw_pkt) + return -ENOMEM; + + for (total_read_bytes = 0; total_read_bytes < firmware->size; ) { int read_bytes = 0, data_len = 0; /* parse intel hex line */ read_bytes = parse_hex_line( (u8 *) (firmware->data + total_read_bytes), - fw_pkt.raw.address, - fw_pkt.raw.data, + fw_pkt->raw.address, + fw_pkt->raw.data, &data_len, &addr_has_changed); @@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, /* detect the end of file */ total_read_bytes += read_bytes; if (total_read_bytes == firmware->size) { - fw_pkt.u.request[0] = 0x00; - fw_pkt.u.request[1] = 0x03; + fw_pkt->u.request[0] = 0x00; + fw_pkt->u.request[1] = 0x03; /* send EOF command */ errno = bus_adap->ops->upload_fw_pkt(bus_adap, (uint8_t *) - &fw_pkt, 2, 0); + fw_pkt, 2, 0); if (errno < 0) goto error; } else { if (!addr_has_changed) { /* prepare command to send */ - fw_pkt.u.request[0] = 0x00; - fw_pkt.u.request[1] = 0x01; + fw_pkt->u.request[0] = 0x00; + fw_pkt->u.request[1] = 0x01; - data_len += sizeof(fw_pkt.u.request); - data_len += sizeof(fw_pkt.raw.address); + data_len += sizeof(fw_pkt->u.request); + data_len += sizeof(fw_pkt->raw.address); /* send cmd to device */ errno = bus_adap->ops->upload_fw_pkt(bus_adap, (uint8_t *) - &fw_pkt, + fw_pkt, data_len, 0); if (errno < 0) @@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, } } error: + kfree(fw_pkt); return (errno == 0) ? total_read_bytes : errno; } diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c index 2c5f76d588ac..04ae21278440 100644 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c @@ -1672,7 +1672,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface, nr = dev->devno; assoc_desc = udev->actconfig->intf_assoc[0]; - if (assoc_desc->bFirstInterface != ifnum) { + if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) { dev_err(d, "Not found matching IAD interface\n"); retval = -ENODEV; goto err_if; diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 7ed49646a699..7df0707a0455 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c @@ -292,7 +292,7 @@ static int stk7700P2_frontend_attach(struct dvb_usb_adapter *adap) stk7700d_dib7000p_mt2266_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } @@ -326,7 +326,7 @@ static int stk7700d_frontend_attach(struct dvb_usb_adapter *adap) stk7700d_dib7000p_mt2266_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } @@ -479,7 +479,7 @@ static int stk7700ph_frontend_attach(struct dvb_usb_adapter *adap) &stk7700ph_dib7700_xc3028_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -1010,7 +1010,7 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap) &dib7070p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -1068,7 +1068,7 @@ static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap) &dib7770p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3036,7 +3036,7 @@ static int nim7090_frontend_attach(struct dvb_usb_adapter *adap) if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &nim7090_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config); @@ -3089,7 +3089,7 @@ static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap) /* initialize IC 0 */ if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, &tfe7090pvr_dib7000p_config[0]) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3119,7 +3119,7 @@ static int tfe7090pvr_frontend1_attach(struct dvb_usb_adapter *adap) i2c = state->dib7000p_ops.get_i2c_master(adap->dev->adapter[0].fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1); if (state->dib7000p_ops.i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3194,7 +3194,7 @@ static int tfe7790p_frontend_attach(struct dvb_usb_adapter *adap) 1, 0x10, &tfe7790p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, @@ -3289,7 +3289,7 @@ static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap) stk7070pd_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3364,7 +3364,7 @@ static int novatd_frontend_attach(struct dvb_usb_adapter *adap) stk7070pd_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } @@ -3600,7 +3600,7 @@ static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap) if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap) == 0) { /* Demodulator not found for some reason? */ - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index bc45a225e710..3feaa9b154f0 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -1205,6 +1205,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, } EXPORT_SYMBOL(v4l2_ctrl_fill); +static u32 user_flags(const struct v4l2_ctrl *ctrl) +{ + u32 flags = ctrl->flags; + + if (ctrl->is_ptr) + flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; + + return flags; +} + static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes) { memset(ev->reserved, 0, sizeof(ev->reserved)); @@ -1212,7 +1222,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change ev->id = ctrl->id; ev->u.ctrl.changes = changes; ev->u.ctrl.type = ctrl->type; - ev->u.ctrl.flags = ctrl->flags; + ev->u.ctrl.flags = user_flags(ctrl); if (ctrl->is_ptr) ev->u.ctrl.value64 = 0; else @@ -2541,10 +2551,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr else qc->id = ctrl->id; strlcpy(qc->name, ctrl->name, sizeof(qc->name)); - qc->flags = ctrl->flags; + qc->flags = user_flags(ctrl); qc->type = ctrl->type; - if (ctrl->is_ptr) - qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; qc->elem_size = ctrl->elem_size; qc->elems = ctrl->elems; qc->nr_of_dims = ctrl->nr_of_dims; diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 5d7c0900fa1b..f112c5bc082a 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -257,6 +257,9 @@ static ssize_t at24_read(struct at24_data *at24, if (unlikely(!count)) return count; + if (off + count > at24->chip.byte_len) + return -EINVAL; + /* * Read data from chip, protecting against concurrent updates * from this host, but not from other I2C masters. @@ -311,6 +314,9 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf, unsigned long timeout, write_time; unsigned next_page; + if (offset + count > at24->chip.byte_len) + return -EINVAL; + /* Get corresponding I2C address and adjust offset */ client = at24_translate_offset(at24, &offset); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index ae54302be8fd..88699f852aa2 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -52,9 +52,28 @@ static void mmc_host_classdev_release(struct device *dev) kfree(host); } +static int mmc_host_prepare(struct device *dev) +{ + /* + * Since mmc_host is a virtual device, we don't have to do anything. + * If we return a positive value, the pm framework will consider that + * the runtime suspend and system suspend of this device is same and + * will set direct_complete flag as true. We don't want this as the + * mmc_host always has positive disable_depth and setting the flag + * will not speed up the suspend process. + * So return 0. + */ + return 0; +} + +static const struct dev_pm_ops mmc_pm_ops = { + .prepare = mmc_host_prepare, +}; + static struct class mmc_host_class = { .name = "mmc_host", .dev_release = mmc_host_classdev_release, + .pm = &mmc_pm_ops, }; int mmc_register_host_class(void) diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 54ab48827258..7ba109e8cf88 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2663,15 +2663,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const uint8_t *buf) { struct nand_chip *chip = mtd->priv; + int chipnr = (int)(to >> chip->chip_shift); struct mtd_oob_ops ops; int ret; - /* Wait for the device to get ready */ - panic_nand_wait(mtd, chip, 400); - /* Grab the device */ panic_nand_get_device(chip, mtd, FL_WRITING); + chip->select_chip(mtd, chipnr); + + /* Wait for the device to get ready */ + panic_nand_wait(mtd, chip, 400); + memset(&ops, 0, sizeof(ops)); ops.len = len; ops.datbuf = (uint8_t *)buf; diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index e90c6a7333d7..2e4649655181 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -191,7 +191,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev) */ static int ipddp_create(struct ipddp_route *new_rt) { - struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL); + struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL); if (rt == NULL) return -ENOMEM; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5dca77e0ffed..2cb34b0f3856 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3166,7 +3166,7 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) hash ^= (hash >> 16); hash ^= (hash >> 8); - return hash; + return hash >> 1; } /*-------------------------- Device entry points ----------------------------*/ diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index cf7c18947189..d065c0e2d18e 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c @@ -178,7 +178,6 @@ static int c_can_pci_probe(struct pci_dev *pdev, break; case BOSCH_D_CAN: priv->regs = reg_map_d_can; - priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; break; default: ret = -EINVAL; diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index e36d10520e24..717530eac70c 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -320,7 +320,6 @@ static int c_can_plat_probe(struct platform_device *pdev) break; case BOSCH_D_CAN: priv->regs = reg_map_d_can; - priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; priv->read_reg32 = d_can_plat_read_reg32; diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index b0c80859f746..1ac2090a1721 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -539,6 +539,13 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) } stats->rx_over_errors++; stats->rx_errors++; + + /* reset the CAN IP by entering reset mode + * ignoring timeout error + */ + set_reset_mode(dev); + set_normal_mode(dev); + /* clear bit */ sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG); } @@ -653,8 +660,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id) netif_wake_queue(dev); can_led_event(dev, CAN_LED_EVENT_TX); } - if (isrc & SUN4I_INT_RBUF_VLD) { - /* receive interrupt */ + if ((isrc & SUN4I_INT_RBUF_VLD) && + !(isrc & SUN4I_INT_DATA_OR)) { + /* receive interrupt - don't read if overrun occurred */ while (status & SUN4I_STA_RBUF_RDY) { /* RX buffer is not empty */ sun4i_can_rx(dev); diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 8f8418d2ac4a..a0012c3cb4f6 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -2366,9 +2366,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * 4) Get the hardware address. * 5) Put the card to sleep. */ - if (typhoon_reset(ioaddr, WaitSleep) < 0) { + err = typhoon_reset(ioaddr, WaitSleep); + if (err < 0) { err_msg = "could not reset 3XP"; - err = -EIO; goto error_out_dma; } @@ -2382,24 +2382,25 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) typhoon_init_interface(tp); typhoon_init_rings(tp); - if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { + err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST); + if (err < 0) { err_msg = "cannot boot 3XP sleep image"; - err = -EIO; goto error_out_reset; } INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS); - if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) { + err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp); + if (err < 0) { err_msg = "cannot read MAC address"; - err = -EIO; goto error_out_reset; } *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1)); *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2)); - if(!is_valid_ether_addr(dev->dev_addr)) { + if (!is_valid_ether_addr(dev->dev_addr)) { err_msg = "Could not obtain valid ethernet address, aborting"; + err = -EIO; goto error_out_reset; } @@ -2407,7 +2408,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * later when we print out the version reported. */ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); - if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { + err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp); + if (err < 0) { err_msg = "Could not get Sleep Image version"; goto error_out_reset; } @@ -2424,9 +2426,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if(xp_resp[0].numDesc != 0) tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET; - if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) { + err = typhoon_sleep(tp, PCI_D3hot, 0); + if (err < 0) { err_msg = "cannot put adapter to sleep"; - err = -EIO; goto error_out_reset; } @@ -2449,7 +2451,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features = dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM; - if(register_netdev(dev) < 0) { + err = register_netdev(dev); + if (err < 0) { err_msg = "unable to register netdev"; goto error_out_reset; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 8860e74aa28f..027705117086 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1045,15 +1045,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, goto out; } - /* Insert TSB and checksum infos */ - if (priv->tsb_en) { - skb = bcm_sysport_insert_tsb(skb, dev); - if (!skb) { - ret = NETDEV_TX_OK; - goto out; - } - } - /* The Ethernet switch we are interfaced with needs packets to be at * least 64 bytes (including FCS) otherwise they will be discarded when * they enter the switch port logic. When Broadcom tags are enabled, we @@ -1061,13 +1052,21 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, * (including FCS and tag) because the length verification is done after * the Broadcom tag is stripped off the ingress packet. */ - if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { + if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { ret = NETDEV_TX_OK; goto out; } - skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ? - ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len; + /* Insert TSB and checksum infos */ + if (priv->tsb_en) { + skb = bcm_sysport_insert_tsb(skb, dev); + if (!skb) { + ret = NETDEV_TX_OK; + goto out; + } + } + + skb_len = skb->len; mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); if (dma_mapping_error(kdev, mapping)) { diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index b1b9ebafb354..a3b2e23921bf 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -257,8 +257,8 @@ enum rx_desc_status_bits { RXFSD = 0x00000800, /* first descriptor */ RXLSD = 0x00000400, /* last descriptor */ ErrorSummary = 0x80, /* error summary */ - RUNT = 0x40, /* runt packet received */ - LONG = 0x20, /* long packet received */ + RUNTPKT = 0x40, /* runt packet received */ + LONGPKT = 0x20, /* long packet received */ FAE = 0x10, /* frame align error */ CRC = 0x08, /* crc error */ RXER = 0x04, /* receive error */ @@ -1633,7 +1633,7 @@ static int netdev_rx(struct net_device *dev) dev->name, rx_status); dev->stats.rx_errors++; /* end of a packet. */ - if (rx_status & (LONG | RUNT)) + if (rx_status & (LONGPKT | RUNTPKT)) dev->stats.rx_length_errors++; if (rx_status & RXER) dev->stats.rx_frame_errors++; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ab716042bdd2..458e2d97d096 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2968,6 +2968,7 @@ static void set_multicast_list(struct net_device *ndev) struct netdev_hw_addr *ha; unsigned int i, bit, data, crc, tmp; unsigned char hash; + unsigned int hash_high = 0, hash_low = 0; if (ndev->flags & IFF_PROMISC) { tmp = readl(fep->hwp + FEC_R_CNTRL); @@ -2990,11 +2991,7 @@ static void set_multicast_list(struct net_device *ndev) return; } - /* Clear filter and add the addresses in hash register - */ - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); - + /* Add the addresses in hash register */ netdev_for_each_mc_addr(ha, ndev) { /* calculate crc32 value of mac address */ crc = 0xffffffff; @@ -3012,16 +3009,14 @@ static void set_multicast_list(struct net_device *ndev) */ hash = (crc >> (32 - HASH_BITS)) & 0x3f; - if (hash > 31) { - tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - tmp |= 1 << (hash - 32); - writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - } else { - tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); - tmp |= 1 << hash; - writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); - } + if (hash > 31) + hash_high |= 1 << (hash - 32); + else + hash_low |= 1 << hash; } + + writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); + writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); } /* Set a MAC change in hardware. */ diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index e59d7c283cd4..645ace74429e 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. + * + * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link + * up). **/ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) { @@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) - return 0; + return 1; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex @@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) + if (ret_val) { e_dbg("Error configuring flow control\n"); + return ret_val; + } - return ret_val; + return 1; } /** diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 80ec587d510e..5205f1ebe381 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5017,7 +5017,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) case e1000_media_type_copper: if (hw->mac.get_link_status) { ret_val = hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; + link_active = ret_val > 0; } else { link_active = true; } @@ -5035,7 +5035,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) break; } - if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && + if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ e_info("Gigabit has been disabled, downgrading speed\n"); diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index de13aeacae97..8e674a0988b0 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, s32 ret_val = 0; u16 i, phy_status; + *success = false; for (i = 0; i < iterations; i++) { /* Some PHYs require the MII_BMSR register to be read * twice due to the link bit being sticky. No harm doing @@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) break; - if (phy_status & BMSR_LSTATUS) + if (phy_status & BMSR_LSTATUS) { + *success = true; break; + } if (usec_interval >= 1000) msleep(usec_interval / 1000); else udelay(usec_interval); } - *success = (i < iterations); - return ret_val; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 09281558bfbc..c21fa56afd7c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1226,7 +1226,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index af09a1b272e6..6a2d1454befe 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -2002,9 +2002,10 @@ static void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw, * function can also be used to respond to an error as the connection * resetting would also be a means of dealing with errors. **/ -static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, - struct fm10k_mbx_info *mbx) +static s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) { + s32 err = 0; const enum fm10k_mbx_state state = mbx->state; switch (state) { @@ -2017,6 +2018,7 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, case FM10K_STATE_OPEN: /* flush any incomplete work */ fm10k_sm_mbx_connect_reset(mbx); + err = FM10K_ERR_RESET_REQUESTED; break; case FM10K_STATE_CONNECT: /* Update remote value to match local value */ @@ -2026,6 +2028,8 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, } fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail); + + return err; } /** @@ -2106,7 +2110,7 @@ static s32 fm10k_sm_mbx_process(struct fm10k_hw *hw, switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) { case 0: - fm10k_sm_mbx_process_reset(hw, mbx); + err = fm10k_sm_mbx_process_reset(hw, mbx); break; case FM10K_SM_MBX_VERSION: err = fm10k_sm_mbx_process_version_1(hw, mbx); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 7f3fb51bc37b..06f35700840b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1072,6 +1072,7 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) struct fm10k_hw *hw = &interface->hw; struct fm10k_mbx_info *mbx = &hw->mbx; u32 eicr; + s32 err = 0; /* unmask any set bits related to this interrupt */ eicr = fm10k_read_reg(hw, FM10K_EICR); @@ -1087,12 +1088,15 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) /* service mailboxes */ if (fm10k_mbx_trylock(interface)) { - mbx->ops.process(hw, mbx); + err = mbx->ops.process(hw, mbx); /* handle VFLRE events */ fm10k_iov_event(interface); fm10k_mbx_unlock(interface); } + if (err == FM10K_ERR_RESET_REQUESTED) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + /* if switch toggled state we should reset GLORTs */ if (eicr & FM10K_EICR_SWITCHNOTREADY) { /* force link down for at least 4 seconds */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 4edbab6ca7ef..b5b228c9a030 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3595,7 +3595,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->cmd_type_offset_bsz & diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 26c55bba4bf3..6dcc3854844d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -663,7 +663,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* we have caught up to head, no work left to do */ if (tx_head == tx_desc) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 39db70a597ed..1ed27fcd5031 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -172,7 +172,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* we have caught up to head, no work left to do */ if (tx_head == tx_desc) diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 97bf0c3d5c69..f3f3b95d5512 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -223,6 +223,17 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + /* Make sure the PHY is in a good state. Several people have reported + * firmware leaving the PHY's page select register set to something + * other than the default of zero, which causes the PHY ID read to + * access something other than the intended register. + */ + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + /* Set phy->phy_addr and phy->id. */ ret_val = igb_get_phy_id_82575(hw); if (ret_val) diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 29f59c76878a..851225b5dc0f 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -699,9 +699,9 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw) ret_val = igb_pool_flash_update_done_i210(hw); if (ret_val) - hw_dbg("Flash update complete\n"); - else hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); out: return ret_val; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a481ea64e287..c55552c3d2f9 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3172,7 +3172,9 @@ static int __igb_close(struct net_device *netdev, bool suspending) static int igb_close(struct net_device *netdev) { - return __igb_close(netdev, false); + if (netif_device_present(netdev)) + return __igb_close(netdev, false); + return 0; } /** @@ -6431,7 +6433,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) @@ -7325,12 +7327,14 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, int retval = 0; #endif + rtnl_lock(); netif_device_detach(netdev); if (netif_running(netdev)) __igb_close(netdev, true); igb_clear_interrupt_scheme(adapter); + rtnl_unlock(); #ifdef CONFIG_PM retval = pci_save_state(pdev); @@ -7450,16 +7454,15 @@ static int igb_resume(struct device *dev) wr32(E1000_WUS, ~0); - if (netdev->flags & IFF_UP) { - rtnl_lock(); + rtnl_lock(); + if (!err && netif_running(netdev)) err = __igb_open(netdev, true); - rtnl_unlock(); - if (err) - return err; - } - netif_device_attach(netdev); - return 0; + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + + return err; } static int igb_runtime_idle(struct device *dev) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 297af801f051..519b72c41888 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -809,7 +809,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index f3168bcc7d87..f0de09db8283 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -307,6 +307,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) ixgbe_cache_ring_rss(adapter); } +#define IXGBE_RSS_64Q_MASK 0x3F #define IXGBE_RSS_16Q_MASK 0xF #define IXGBE_RSS_8Q_MASK 0x7 #define IXGBE_RSS_4Q_MASK 0x3 @@ -602,6 +603,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) **/ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) { + struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_ring_feature *f; u16 rss_i; @@ -610,7 +612,11 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) rss_i = f->limit; f->indices = rss_i; - f->mask = IXGBE_RSS_16Q_MASK; + + if (hw->mac.type < ixgbe_mac_X550) + f->mask = IXGBE_RSS_16Q_MASK; + else + f->mask = IXGBE_RSS_64Q_MASK; /* disable ATR by default, it will be configured below */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cd9b284bc83b..a5b443171b8b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1114,7 +1114,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) @@ -5878,7 +5878,8 @@ static int ixgbe_close(struct net_device *netdev) ixgbe_ptp_stop(adapter); - ixgbe_close_suspend(adapter); + if (netif_device_present(netdev)) + ixgbe_close_suspend(adapter); ixgbe_fdir_filter_exit(adapter); @@ -5923,14 +5924,12 @@ static int ixgbe_resume(struct pci_dev *pdev) if (!err && netif_running(netdev)) err = ixgbe_open(netdev); - rtnl_unlock(); - - if (err) - return err; - netif_device_attach(netdev); + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); - return 0; + return err; } #endif /* CONFIG_PM */ @@ -5945,14 +5944,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) int retval = 0; #endif + rtnl_lock(); netif_device_detach(netdev); - rtnl_lock(); if (netif_running(netdev)) ixgbe_close_suspend(adapter); - rtnl_unlock(); ixgbe_clear_interrupt_scheme(adapter); + rtnl_unlock(); #ifdef CONFIG_PM retval = pci_save_state(pdev); @@ -9221,7 +9220,7 @@ skip_bad_vf_detection: } if (netif_running(netdev)) - ixgbe_down(adapter); + ixgbe_close_suspend(adapter); if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) pci_disable_device(pdev); @@ -9291,10 +9290,12 @@ static void ixgbe_io_resume(struct pci_dev *pdev) } #endif + rtnl_lock(); if (netif_running(netdev)) - ixgbe_up(adapter); + ixgbe_open(netdev); netif_device_attach(netdev); + rtnl_unlock(); } static const struct pci_error_handlers ixgbe_err_handler = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index fb8673d63806..48d97cb730d8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -113,7 +113,7 @@ static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; - int max_retry = 10; + int max_retry = 3; int retry = 0; u8 csum_byte; u8 high_bits; @@ -1764,6 +1764,8 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u32 swfw_mask = hw->phy.phy_semaphore_mask; bool nack = true; + if (hw->mac.type >= ixgbe_mac_X550) + max_retry = 3; if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) max_retry = IXGBE_SFP_DETECT_RETRIES; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index ebe0ac950b14..31f864fb30c1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1643,8 +1643,6 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, return status; reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ | - IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC); reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 592ff237d692..50bbad37d640 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -312,7 +312,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 585e90f8341d..f735dfcb64ae 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -831,14 +831,10 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Receive error message handling */ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; - if (priv->rx_over_errors != ndev->stats.rx_over_errors) { + if (priv->rx_over_errors != ndev->stats.rx_over_errors) ndev->stats.rx_over_errors = priv->rx_over_errors; - netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n"); - } - if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) { + if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; - netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n"); - } out: return budget - quota; } diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 79de9608ac48..ed96fdefd8e5 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -1117,6 +1117,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, case TUNSETSNDBUF: if (get_user(s, sp)) return -EFAULT; + if (s <= 0) + return -EINVAL; q->sk.sk_sndbuf = s; return 0; diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index e5bb870b5461..dc454138d600 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1110,7 +1110,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) static struct lock_class_key ppp_tx_busylock; static int ppp_dev_init(struct net_device *dev) { + struct ppp *ppp; + dev->qdisc_tx_busylock = &ppp_tx_busylock; + + ppp = netdev_priv(dev); + /* Let the netdevice take a reference on the ppp file. This ensures + * that ppp_destroy_interface() won't run before the device gets + * unregistered. + */ + atomic_inc(&ppp->file.refcnt); + return 0; } @@ -1133,6 +1143,15 @@ static void ppp_dev_uninit(struct net_device *dev) wake_up_interruptible(&ppp->file.rwait); } +static void ppp_dev_priv_destructor(struct net_device *dev) +{ + struct ppp *ppp; + + ppp = netdev_priv(dev); + if (atomic_dec_and_test(&ppp->file.refcnt)) + ppp_destroy_interface(ppp); +} + static const struct net_device_ops ppp_netdev_ops = { .ndo_init = ppp_dev_init, .ndo_uninit = ppp_dev_uninit, @@ -1150,6 +1169,7 @@ static void ppp_setup(struct net_device *dev) dev->tx_queue_len = 3; dev->type = ARPHRD_PPP; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; + dev->destructor = ppp_dev_priv_destructor; netif_keep_dst(dev); } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 89ad2b750531..1b0184b3818a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1685,6 +1685,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (!dev) return -ENOMEM; + err = dev_get_valid_name(net, dev, name); + if (err < 0) + goto err_free_dev; dev_net_set(dev, net); dev->rtnl_link_ops = &tun_link_ops; @@ -2072,6 +2075,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, ret = -EFAULT; break; } + if (sndbuf <= 0) { + ret = -EINVAL; + break; + } tun->sndbuf = sndbuf; tun_set_sndbuf(tun); diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 8c408aa2f208..f9343bee1de3 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -221,7 +221,7 @@ skip: goto bad_desc; } - if (header.usb_cdc_ether_desc) { + if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) { dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize); /* because of Zaurus, we may be ignoring the host * side link address we were given. diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index e0e94b855bbe..1228d0da4075 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -724,8 +724,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ u8 *buf; int len; int temp; + int err; u8 iface_no; struct usb_cdc_parsed_header hdr; + u16 curr_ntb_format; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) @@ -823,6 +825,32 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ goto error2; } + /* + * Some Huawei devices have been observed to come out of reset in NDP32 mode. + * Let's check if this is the case, and set the device to NDP16 mode again if + * needed. + */ + if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) { + err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT, + USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, + 0, iface_no, &curr_ntb_format, 2); + if (err < 0) { + goto error2; + } + + if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) { + dev_info(&intf->dev, "resetting NTB format to 16-bit"); + err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT, + USB_TYPE_CLASS | USB_DIR_OUT + | USB_RECIP_INTERFACE, + USB_CDC_NCM_NTB16_FORMAT, + iface_no, NULL, 0); + + if (err < 0) + goto error2; + } + } + cdc_ncm_find_endpoints(dev, ctx->data); cdc_ncm_find_endpoints(dev, ctx->control); if (!dev->in || !dev->out || !dev->status) { diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c index 2680a65cd5e4..63f28908afda 100644 --- a/drivers/net/usb/huawei_cdc_ncm.c +++ b/drivers/net/usb/huawei_cdc_ncm.c @@ -80,6 +80,12 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev, * be at the end of the frame. */ drvflags |= CDC_NCM_FLAG_NDP_TO_END; + + /* Additionally, it has been reported that some Huawei E3372H devices, with + * firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence + * needing to be set to the NTB16 one again. + */ + drvflags |= CDC_NCM_FLAG_RESET_NTB16; ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags); if (ret) goto err; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 958af3b1af7f..e325ca3ad565 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -262,7 +262,7 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) } /* errors aren't fatal - we can live with the dynamic address */ - if (cdc_ether) { + if (cdc_ether && cdc_ether->wMaxSegmentSize) { dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize); usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress); } diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 55033aed6d6b..079d77678b1c 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -706,8 +706,11 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) "boot get otp board id result 0x%08x board_id %d chip_id %d\n", result, board_id, chip_id); - if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0) + if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 || + (board_id == 0)) { + ath10k_warn(ar, "board id is not exist in otp, ignore it\n"); return -EOPNOTSUPP; + } ar->id.bmi_ids_valid = true; ar->id.bmi_board_id = board_id; @@ -2088,7 +2091,7 @@ void ath10k_core_stop(struct ath10k *ar) /* try to suspend target */ if (ar->state != ATH10K_STATE_RESTARTING && ar->state != ATH10K_STATE_UTF) - ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR); + ath10k_wait_for_suspend(ar, ar->hw_values->pdev_suspend_option); ath10k_hif_stop(ar); ath10k_htt_tx_free(&ar->htt); diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index caf63b8bbba4..1437b5d29a17 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -460,6 +460,7 @@ struct ath10k_hw_ce_regs qcax_ce_regs = { }; const struct ath10k_hw_values qca988x_values = { + .pdev_suspend_option = WMI_PDEV_SUSPEND_AND_DISABLE_INTR, .rtc_state_val_on = 3, .ce_count = 8, .msi_assign_ce_max = 7, @@ -469,6 +470,7 @@ const struct ath10k_hw_values qca988x_values = { }; const struct ath10k_hw_values qca6174_values = { + .pdev_suspend_option = WMI_PDEV_SUSPEND_AND_DISABLE_INTR, .rtc_state_val_on = 3, .ce_count = 8, .msi_assign_ce_max = 7, @@ -478,6 +480,7 @@ const struct ath10k_hw_values qca6174_values = { }; const struct ath10k_hw_values qca99x0_values = { + .pdev_suspend_option = WMI_PDEV_SUSPEND_AND_DISABLE_INTR, .rtc_state_val_on = 5, .ce_count = 12, .msi_assign_ce_max = 12, @@ -487,6 +490,7 @@ const struct ath10k_hw_values qca99x0_values = { }; const struct ath10k_hw_values qca9888_values = { + .pdev_suspend_option = WMI_PDEV_SUSPEND_AND_DISABLE_INTR, .rtc_state_val_on = 3, .ce_count = 12, .msi_assign_ce_max = 12, @@ -496,13 +500,15 @@ const struct ath10k_hw_values qca9888_values = { }; const struct ath10k_hw_values qca4019_values = { - .ce_count = 12, - .num_target_ce_config_wlan = 10, - .ce_desc_meta_data_mask = 0xFFF0, - .ce_desc_meta_data_lsb = 4, + .pdev_suspend_option = WMI_PDEV_SUSPEND_AND_DISABLE_INTR, + .ce_count = 12, + .num_target_ce_config_wlan = 10, + .ce_desc_meta_data_mask = 0xFFF0, + .ce_desc_meta_data_lsb = 4, }; const struct ath10k_hw_values wcn3990_values = { + .pdev_suspend_option = WMI_PDEV_SUSPEND, .rtc_state_val_on = 5, .ce_count = 12, .msi_assign_ce_max = 12, diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 8aa696ed2e72..a37b956c558f 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -363,6 +363,7 @@ extern struct ath10k_hw_ce_regs qcax_ce_regs; extern struct fw_flag wcn3990_fw_flags; struct ath10k_hw_values { + u32 pdev_suspend_option; u32 rtc_state_val_on; u8 ce_count; u8 msi_assign_ce_max; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 16a5c5fd3925..28042100ae0a 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1237,6 +1237,36 @@ static int ath10k_monitor_recalc(struct ath10k *ar) return ath10k_monitor_stop(ar); } +static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (!arvif->is_started) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n"); + return false; + } + + return true; +} + +static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + u32 vdev_param; + + lockdep_assert_held(&ar->conf_mutex); + + vdev_param = ar->wmi.vdev_param->protection_mode; + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n", + arvif->vdev_id, arvif->use_cts_prot); + + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + arvif->use_cts_prot ? 1 : 0); +} + static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) { struct ath10k *ar = arvif->ar; @@ -5386,20 +5416,18 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ERP_CTS_PROT) { arvif->use_cts_prot = info->use_cts_prot; - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n", - arvif->vdev_id, info->use_cts_prot); ret = ath10k_recalc_rtscts_prot(arvif); if (ret) ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", arvif->vdev_id, ret); - vdev_param = ar->wmi.vdev_param->protection_mode; - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, - info->use_cts_prot ? 1 : 0); - if (ret) - ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n", - info->use_cts_prot, arvif->vdev_id, ret); + if (ath10k_mac_can_set_cts_prot(arvif)) { + ret = ath10k_mac_set_cts_prot(arvif); + if (ret) + ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", + arvif->vdev_id, ret); + } } if (changed & BSS_CHANGED_ERP_SLOT) { @@ -7463,6 +7491,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, arvif->is_up = true; } + if (ath10k_mac_can_set_cts_prot(arvif)) { + ret = ath10k_mac_set_cts_prot(arvif); + if (ret) + ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", + arvif->vdev_id, ret); + } + mutex_unlock(&ar->conf_mutex); return 0; diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 5ce4fdfca724..ba411cba6fc9 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1156,8 +1156,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats_pdev *dst; src = data; - if (data_len < sizeof(*src)) + if (data_len < sizeof(*src)) { + kfree(tb); return -EPROTO; + } data += sizeof(*src); data_len -= sizeof(*src); @@ -1177,8 +1179,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats_vdev *dst; src = data; - if (data_len < sizeof(*src)) + if (data_len < sizeof(*src)) { + kfree(tb); return -EPROTO; + } data += sizeof(*src); data_len -= sizeof(*src); @@ -1196,8 +1200,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats_peer *dst; src = data; - if (data_len < sizeof(*src)) + if (data_len < sizeof(*src)) { + kfree(tb); return -EPROTO; + } data += sizeof(*src); data_len -= sizeof(*src); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c index 5fecae0ba52e..83e5aa6a9f28 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c @@ -4295,9 +4295,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); if (err < 0) brcmf_err("setting AP mode failed %d\n", err); - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0); - if (err < 0) - brcmf_err("setting INFRA mode failed %d\n", err); if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) brcmf_fil_iovar_int_set(ifp, "mbss", 0); err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY, diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c index 4d8ad7c8975f..bcea74ad6685 100644 --- a/drivers/net/wireless/cnss2/main.c +++ b/drivers/net/wireless/cnss2/main.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -862,7 +862,7 @@ void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops) cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_UNREGISTER_DRIVER, - CNSS_EVENT_SYNC, NULL); + CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL); } EXPORT_SYMBOL(cnss_wlan_unregister_driver); @@ -1508,8 +1508,14 @@ static int cnss_driver_recovery_hdlr(struct cnss_plat_data *plat_priv, cnss_recovery_reason_to_str(recovery_data->reason), recovery_data->reason); + if (!plat_priv->driver_state) { + cnss_pr_err("Improper driver state, ignore recovery\n"); + ret = -EINVAL; + goto out; + } + if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) { - cnss_pr_err("Recovery is already in progress!\n"); + cnss_pr_err("Recovery is already in progress\n"); ret = -EINVAL; goto out; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 0708eedd9671..1c69e8140d9d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -664,7 +664,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct sk_buff *skb = NULL; - + bool rtstatus; u32 totalpacketlen; u8 u1rsvdpageloc[5] = { 0 }; bool b_dlok = false; @@ -727,7 +727,9 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet, totalpacketlen); - b_dlok = true; + rtstatus = rtl_cmd_send_packet(hw, skb); + if (rtstatus) + b_dlok = true; if (b_dlok) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD , diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index bbb789f8990b..738d541a2255 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -1377,6 +1377,7 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw) ppsc->wakeup_reason = 0; + do_gettimeofday(&ts); rtlhal->last_suspend_sec = ts.tv_sec; switch (fw_reason) { diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 72ee1c305cc4..02db20b26749 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -67,6 +67,7 @@ module_param(rx_drain_timeout_msecs, uint, 0444); unsigned int rx_stall_timeout_msecs = 60000; module_param(rx_stall_timeout_msecs, uint, 0444); +#define MAX_QUEUES_DEFAULT 8 unsigned int xenvif_max_queues; module_param_named(max_queues, xenvif_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, @@ -2157,11 +2158,12 @@ static int __init netback_init(void) if (!xen_domain()) return -ENODEV; - /* Allow as many queues as there are CPUs if user has not + /* Allow as many queues as there are CPUs but max. 8 if user has not * specified a value. */ if (xenvif_max_queues == 0) - xenvif_max_queues = num_online_cpus(); + xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT, + num_online_cpus()); if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 34a062ccb11d..fd221cc4cb79 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1840,27 +1840,19 @@ static int talk_to_netback(struct xenbus_device *dev, xennet_destroy_queues(info); err = xennet_create_queues(info, &num_queues); - if (err < 0) - goto destroy_ring; + if (err < 0) { + xenbus_dev_fatal(dev, err, "creating queues"); + kfree(info->queues); + info->queues = NULL; + goto out; + } /* Create shared ring, alloc event channel -- for each queue */ for (i = 0; i < num_queues; ++i) { queue = &info->queues[i]; err = setup_netfront(dev, queue, feature_split_evtchn); - if (err) { - /* setup_netfront() will tidy up the current - * queue on error, but we need to clean up - * those already allocated. - */ - if (i > 0) { - rtnl_lock(); - netif_set_real_num_tx_queues(info->netdev, i); - rtnl_unlock(); - goto destroy_ring; - } else { - goto out; - } - } + if (err) + goto destroy_ring; } again: @@ -1950,9 +1942,9 @@ abort_transaction_no_dev_fatal: xenbus_transaction_end(xbt, 1); destroy_ring: xennet_disconnect_backend(info); - kfree(info->queues); - info->queues = NULL; + xennet_destroy_queues(info); out: + device_unregister(&dev->dev); return err; } diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 96526dcfdd37..ff7b9632ad61 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -823,7 +823,7 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels) nsindex = to_namespace_index(ndd, 0); memset(nsindex, 0, ndd->nsarea.config_size); for (i = 0; i < 2; i++) { - int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT); + int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT); if (rc) return rc; diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index aae7379af4e4..c2184104b789 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1305,7 +1305,7 @@ static umode_t namespace_visible(struct kobject *kobj, if (a == &dev_attr_resource.attr) { if (is_namespace_blk(dev)) return 0; - return a->mode; + return 0400; } if (is_namespace_pmem(dev) || is_namespace_blk(dev)) { diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 669edbd47602..d6ceb8b91cd6 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -350,8 +350,8 @@ static void async_completion(struct nvme_queue *nvmeq, void *ctx, struct async_cmd_info *cmdinfo = ctx; cmdinfo->result = le32_to_cpup(&cqe->result); cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; - queue_kthread_work(cmdinfo->worker, &cmdinfo->work); blk_mq_free_request(cmdinfo->req); + queue_kthread_work(cmdinfo->worker, &cmdinfo->work); } static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq, diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index 53b79c5f0559..379d08f76146 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -131,6 +131,12 @@ struct mvebu_pcie { int nports; }; +struct mvebu_pcie_window { + phys_addr_t base; + phys_addr_t remap; + size_t size; +}; + /* Structure representing one PCIe interface */ struct mvebu_pcie_port { char *name; @@ -148,10 +154,8 @@ struct mvebu_pcie_port { struct mvebu_sw_pci_bridge bridge; struct device_node *dn; struct mvebu_pcie *pcie; - phys_addr_t memwin_base; - size_t memwin_size; - phys_addr_t iowin_base; - size_t iowin_size; + struct mvebu_pcie_window memwin; + struct mvebu_pcie_window iowin; u32 saved_pcie_stat; }; @@ -377,23 +381,45 @@ static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port, } } +static void mvebu_pcie_set_window(struct mvebu_pcie_port *port, + unsigned int target, unsigned int attribute, + const struct mvebu_pcie_window *desired, + struct mvebu_pcie_window *cur) +{ + if (desired->base == cur->base && desired->remap == cur->remap && + desired->size == cur->size) + return; + + if (cur->size != 0) { + mvebu_pcie_del_windows(port, cur->base, cur->size); + cur->size = 0; + cur->base = 0; + + /* + * If something tries to change the window while it is enabled + * the change will not be done atomically. That would be + * difficult to do in the general case. + */ + } + + if (desired->size == 0) + return; + + mvebu_pcie_add_windows(port, target, attribute, desired->base, + desired->size, desired->remap); + *cur = *desired; +} + static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) { - phys_addr_t iobase; + struct mvebu_pcie_window desired = {}; /* Are the new iobase/iolimit values invalid? */ if (port->bridge.iolimit < port->bridge.iobase || port->bridge.iolimitupper < port->bridge.iobaseupper || !(port->bridge.command & PCI_COMMAND_IO)) { - - /* If a window was configured, remove it */ - if (port->iowin_base) { - mvebu_pcie_del_windows(port, port->iowin_base, - port->iowin_size); - port->iowin_base = 0; - port->iowin_size = 0; - } - + mvebu_pcie_set_window(port, port->io_target, port->io_attr, + &desired, &port->iowin); return; } @@ -410,32 +436,27 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) * specifications. iobase is the bus address, port->iowin_base * is the CPU address. */ - iobase = ((port->bridge.iobase & 0xF0) << 8) | - (port->bridge.iobaseupper << 16); - port->iowin_base = port->pcie->io.start + iobase; - port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | - (port->bridge.iolimitupper << 16)) - - iobase) + 1; - - mvebu_pcie_add_windows(port, port->io_target, port->io_attr, - port->iowin_base, port->iowin_size, - iobase); + desired.remap = ((port->bridge.iobase & 0xF0) << 8) | + (port->bridge.iobaseupper << 16); + desired.base = port->pcie->io.start + desired.remap; + desired.size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | + (port->bridge.iolimitupper << 16)) - + desired.remap) + + 1; + + mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired, + &port->iowin); } static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) { + struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP}; + /* Are the new membase/memlimit values invalid? */ if (port->bridge.memlimit < port->bridge.membase || !(port->bridge.command & PCI_COMMAND_MEMORY)) { - - /* If a window was configured, remove it */ - if (port->memwin_base) { - mvebu_pcie_del_windows(port, port->memwin_base, - port->memwin_size); - port->memwin_base = 0; - port->memwin_size = 0; - } - + mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, + &desired, &port->memwin); return; } @@ -445,14 +466,12 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) * window to setup, according to the PCI-to-PCI bridge * specifications. */ - port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16); - port->memwin_size = - (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - - port->memwin_base + 1; - - mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr, - port->memwin_base, port->memwin_size, - MVEBU_MBUS_NO_REMAP); + desired.base = ((port->bridge.membase & 0xFFF0) << 16); + desired.size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - + desired.base + 1; + + mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, + &port->memwin); } /* diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index b83df942794f..193ac13de49b 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1414,8 +1414,16 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) { - if (hpp) - dev_warn(&dev->dev, "PCI-X settings not supported\n"); + int pos; + + if (!hpp) + return; + + pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); + if (!pos) + return; + + dev_warn(&dev->dev, "PCI-X settings not supported\n"); } static bool pcie_root_rcb_set(struct pci_dev *dev) @@ -1441,6 +1449,9 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) if (!hpp) return; + if (!pci_is_pcie(dev)) + return; + if (hpp->revision > 1) { dev_warn(&dev->dev, "PCIe settings rev %d not supported\n", hpp->revision); diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index af2046c87806..847f75601591 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -249,7 +249,7 @@ static int hp_wmi_display_state(void) int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state, sizeof(state), sizeof(state)); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return state; } @@ -259,7 +259,7 @@ static int hp_wmi_hddtemp_state(void) int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state, sizeof(state), sizeof(state)); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return state; } @@ -269,7 +269,7 @@ static int hp_wmi_als_state(void) int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state, sizeof(state), sizeof(state)); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return state; } @@ -280,7 +280,7 @@ static int hp_wmi_dock_state(void) sizeof(state), sizeof(state)); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return state & 0x1; } @@ -291,7 +291,7 @@ static int hp_wmi_tablet_state(void) int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, sizeof(state), sizeof(state)); if (ret) - return ret; + return ret < 0 ? ret : -EINVAL; return (state & 0x4) ? 1 : 0; } @@ -324,7 +324,7 @@ static int __init hp_wmi_enable_hotkeys(void) int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value, sizeof(value), 0); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return 0; } @@ -337,7 +337,7 @@ static int hp_wmi_set_block(void *data, bool blocked) ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, &query, sizeof(query), 0); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return 0; } @@ -429,7 +429,7 @@ static int hp_wmi_post_code_state(void) int ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 0, &state, sizeof(state), sizeof(state)); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return state; } @@ -495,7 +495,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr, int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp, sizeof(tmp), sizeof(tmp)); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return count; } @@ -516,7 +516,7 @@ static ssize_t set_postcode(struct device *dev, struct device_attribute *attr, ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 1, &tmp, sizeof(tmp), sizeof(tmp)); if (ret) - return -EINVAL; + return ret < 0 ? ret : -EINVAL; return count; } @@ -573,10 +573,12 @@ static void hp_wmi_notify(u32 value, void *context) switch (event_id) { case HPWMI_DOCK_EVENT: - input_report_switch(hp_wmi_input_dev, SW_DOCK, - hp_wmi_dock_state()); - input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, - hp_wmi_tablet_state()); + if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit)) + input_report_switch(hp_wmi_input_dev, SW_DOCK, + hp_wmi_dock_state()); + if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit)) + input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, + hp_wmi_tablet_state()); input_sync(hp_wmi_input_dev); break; case HPWMI_PARK_HDD: @@ -649,6 +651,7 @@ static int __init hp_wmi_input_setup(void) { acpi_status status; int err; + int val; hp_wmi_input_dev = input_allocate_device(); if (!hp_wmi_input_dev) @@ -659,17 +662,26 @@ static int __init hp_wmi_input_setup(void) hp_wmi_input_dev->id.bustype = BUS_HOST; __set_bit(EV_SW, hp_wmi_input_dev->evbit); - __set_bit(SW_DOCK, hp_wmi_input_dev->swbit); - __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit); + + /* Dock */ + val = hp_wmi_dock_state(); + if (!(val < 0)) { + __set_bit(SW_DOCK, hp_wmi_input_dev->swbit); + input_report_switch(hp_wmi_input_dev, SW_DOCK, val); + } + + /* Tablet mode */ + val = hp_wmi_tablet_state(); + if (!(val < 0)) { + __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit); + input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val); + } err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL); if (err) goto err_free_dev; /* Set initial hardware state */ - input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state()); - input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, - hp_wmi_tablet_state()); input_sync(hp_wmi_input_dev); if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later()) @@ -982,10 +994,12 @@ static int hp_wmi_resume_handler(struct device *device) * changed. */ if (hp_wmi_input_dev) { - input_report_switch(hp_wmi_input_dev, SW_DOCK, - hp_wmi_dock_state()); - input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, - hp_wmi_tablet_state()); + if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit)) + input_report_switch(hp_wmi_input_dev, SW_DOCK, + hp_wmi_dock_state()); + if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit)) + input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, + hp_wmi_tablet_state()); input_sync(hp_wmi_input_dev); } diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 741f3ee81cfe..5006cb6ce62d 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -909,7 +909,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long); int qeth_core_hardsetup_card(struct qeth_card *); void qeth_print_status_message(struct qeth_card *); int qeth_init_qdio_queues(struct qeth_card *); -int qeth_send_startlan(struct qeth_card *); int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, int (*reply_cb) (struct qeth_card *, struct qeth_reply *, unsigned long), diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index d10bf3da8e5f..e5b9506698b1 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2955,7 +2955,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, } EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); -int qeth_send_startlan(struct qeth_card *card) +static int qeth_send_startlan(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; @@ -2968,7 +2968,6 @@ int qeth_send_startlan(struct qeth_card *card) rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); return rc; } -EXPORT_SYMBOL_GPL(qeth_send_startlan); static int qeth_default_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -5080,6 +5079,20 @@ retriable: goto out; } + rc = qeth_send_startlan(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + if (rc == IPA_RC_LAN_OFFLINE) { + dev_warn(&card->gdev->dev, + "The LAN is offline\n"); + card->lan_online = 0; + } else { + rc = -ENODEV; + goto out; + } + } else + card->lan_online = 1; + card->options.ipa4.supported_funcs = 0; card->options.ipa6.supported_funcs = 0; card->options.adp.supported_funcs = 0; @@ -5091,14 +5104,14 @@ retriable: if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { rc = qeth_query_setadapterparms(card); if (rc < 0) { - QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); goto out; } } if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { rc = qeth_query_setdiagass(card); if (rc < 0) { - QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc); goto out; } } diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index bf1e0e39334d..58bcb3c9a86a 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1203,21 +1203,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) /* softsetup */ QETH_DBF_TEXT(SETUP, 2, "softsetp"); - rc = qeth_send_startlan(card); - if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - if (rc == 0xe080) { - dev_warn(&card->gdev->dev, - "The LAN is offline\n"); - card->lan_online = 0; - goto contin; - } - rc = -ENODEV; - goto out_remove; - } else - card->lan_online = 1; - -contin: if ((card->info.type == QETH_CARD_TYPE_OSD) || (card->info.type == QETH_CARD_TYPE_OSX)) { if (qeth_l2_start_ipassists(card)) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 285fe0b2c753..bf3c1b2301db 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3298,21 +3298,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) /* softsetup */ QETH_DBF_TEXT(SETUP, 2, "softsetp"); - rc = qeth_send_startlan(card); - if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - if (rc == 0xe080) { - dev_warn(&card->gdev->dev, - "The LAN is offline\n"); - card->lan_online = 0; - goto contin; - } - rc = -ENODEV; - goto out_remove; - } else - card->lan_online = 1; - -contin: rc = qeth_l3_setadapter_parms(card); if (rc) QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index f6446d759d7f..4639dac64e7f 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -5148,6 +5148,19 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport) */ /** + * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host + * @shost: kernel scsi host pointer. + **/ +static void +lpfc_get_host_symbolic_name(struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + + lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), + sizeof fc_host_symbolic_name(shost)); +} + +/** * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id * @shost: kernel scsi host pointer. **/ @@ -5684,6 +5697,8 @@ struct fc_function_template lpfc_transport_functions = { .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, + + .get_host_symbolic_name = lpfc_get_host_symbolic_name, .show_host_symbolic_name = 1, /* dynamic attributes the driver supports */ @@ -5751,6 +5766,8 @@ struct fc_function_template lpfc_vport_transport_functions = { .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, + + .get_host_symbolic_name = lpfc_get_host_symbolic_name, .show_host_symbolic_name = 1, /* dynamic attributes the driver supports */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index c74f74ab981c..d278362448ca 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1982,6 +1982,9 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) if (sp->cmn.fcphHigh < FC_PH3) sp->cmn.fcphHigh = FC_PH3; + sp->cmn.valid_vendor_ver_level = 0; + memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion)); + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue PLOGI: did:x%x", did, 0, 0); @@ -3966,6 +3969,9 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, } else { memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); + + sp->cmn.valid_vendor_ver_level = 0; + memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion)); } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 2cce88e967ce..a8ad97300177 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -360,6 +360,12 @@ struct csp { * Word 1 Bit 30 in PLOGI request is random offset */ #define virtual_fabric_support randomOffset /* Word 1, bit 30 */ +/* + * Word 1 Bit 29 in common service parameter is overloaded. + * Word 1 Bit 29 in FLOGI response is multiple NPort assignment + * Word 1 Bit 29 in FLOGI/PLOGI request is Valid Vendor Version Level + */ +#define valid_vendor_ver_level response_multiple_NPort /* Word 1, bit 29 */ #ifdef __BIG_ENDIAN_BITFIELD uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */ uint16_t randomOffset:1; /* FC Word 1, bit 30 */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 38e90d9c2ced..8379fbbc60db 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -118,6 +118,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); + /* ensure WQE bcopy flushed before doorbell write */ + wmb(); /* Update the host index before invoking device */ host_index = q->host_index; @@ -9805,6 +9807,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, iabt->ulpCommand = CMD_CLOSE_XRI_CN; abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; + abtsiocbp->vport = vport; lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, "0339 Abort xri x%x, original iotag x%x, " diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 769012663a8f..861c57bc4520 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -528,6 +528,12 @@ enable_vport(struct fc_vport *fc_vport) spin_lock_irq(shost->host_lock); vport->load_flag |= FC_LOADING; + if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { + spin_unlock_irq(shost->host_lock); + lpfc_issue_init_vpi(vport); + goto out; + } + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); @@ -548,6 +554,8 @@ enable_vport(struct fc_vport *fc_vport) } else { lpfc_vport_set_state(vport, FC_VPORT_FAILED); } + +out: lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1827 Vport Enabled.\n"); return VPORT_OK; diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index f429547aef7b..348678218e7f 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -2838,6 +2838,7 @@ static const struct of_device_id ufs_qcom_of_match[] = { { .compatible = "qcom,ufshc"}, {}, }; +MODULE_DEVICE_TABLE(of, ufs_qcom_of_match); static const struct dev_pm_ops ufs_qcom_pm_ops = { .suspend = ufshcd_pltfrm_suspend, diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 5a7cf839b4fd..2ef26f880d47 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -1074,7 +1074,6 @@ static inline void *ufshcd_get_variant(struct ufs_hba *hba) BUG_ON(!hba); return hba->priv; } - extern int ufshcd_runtime_suspend(struct ufs_hba *hba); extern int ufshcd_runtime_resume(struct ufs_hba *hba); extern int ufshcd_runtime_idle(struct ufs_hba *hba); diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c index b8464fdfd310..f21e9c4c4f4e 100644 --- a/drivers/soc/qcom/glink.c +++ b/drivers/soc/qcom/glink.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2991,7 +2991,7 @@ static int glink_tx_common(void *handle, void *pkt_priv, if (!wait_for_completion_timeout( &ctx->int_req_ack_complete, ctx->rx_intent_req_timeout_jiffies)) { - GLINK_ERR_CH(ctx, + GLINK_ERR( "%s: Intent request ack with size: %zu not granted for lcid\n", __func__, size); ret = -ETIMEDOUT; @@ -3011,7 +3011,7 @@ static int glink_tx_common(void *handle, void *pkt_priv, if (!wait_for_completion_timeout( &ctx->int_req_complete, ctx->rx_intent_req_timeout_jiffies)) { - GLINK_ERR_CH(ctx, + GLINK_ERR( "%s: Intent request with size: %zu not granted for lcid\n", __func__, size); ret = -ETIMEDOUT; diff --git a/drivers/soc/qcom/hab/Makefile b/drivers/soc/qcom/hab/Makefile index 83fc54d42202..77825be16fc4 100644 --- a/drivers/soc/qcom/hab/Makefile +++ b/drivers/soc/qcom/hab/Makefile @@ -9,6 +9,7 @@ msm_hab-objs = \ hab_mem_linux.o \ hab_pipe.o \ qvm_comm.o \ - hab_qvm.o + hab_qvm.o \ + hab_parser.o obj-$(CONFIG_MSM_HAB) += msm_hab.o diff --git a/drivers/soc/qcom/hab/hab.c b/drivers/soc/qcom/hab/hab.c index c6df36f5c0a2..040730d63a83 100644 --- a/drivers/soc/qcom/hab/hab.c +++ b/drivers/soc/qcom/hab/hab.c @@ -21,25 +21,32 @@ .openlock = __SPIN_LOCK_UNLOCKED(&hab_devices[__num__].openlock)\ } -/* the following has to match habmm definitions, order does not matter */ +/* + * The following has to match habmm definitions, order does not matter if + * hab config does not care either. When hab config is not present, the default + * is as guest VM all pchans are pchan opener (FE) + */ static struct hab_device hab_devices[] = { HAB_DEVICE_CNSTR(DEVICE_AUD1_NAME, MM_AUD_1, 0), HAB_DEVICE_CNSTR(DEVICE_AUD2_NAME, MM_AUD_2, 1), HAB_DEVICE_CNSTR(DEVICE_AUD3_NAME, MM_AUD_3, 2), HAB_DEVICE_CNSTR(DEVICE_AUD4_NAME, MM_AUD_4, 3), - HAB_DEVICE_CNSTR(DEVICE_CAM_NAME, MM_CAM, 4), - HAB_DEVICE_CNSTR(DEVICE_DISP1_NAME, MM_DISP_1, 5), - HAB_DEVICE_CNSTR(DEVICE_DISP2_NAME, MM_DISP_2, 6), - HAB_DEVICE_CNSTR(DEVICE_DISP3_NAME, MM_DISP_3, 7), - HAB_DEVICE_CNSTR(DEVICE_DISP4_NAME, MM_DISP_4, 8), - HAB_DEVICE_CNSTR(DEVICE_DISP5_NAME, MM_DISP_5, 9), - HAB_DEVICE_CNSTR(DEVICE_GFX_NAME, MM_GFX, 10), - HAB_DEVICE_CNSTR(DEVICE_VID_NAME, MM_VID, 11), - HAB_DEVICE_CNSTR(DEVICE_MISC_NAME, MM_MISC, 12), - HAB_DEVICE_CNSTR(DEVICE_QCPE1_NAME, MM_QCPE_VM1, 13), - HAB_DEVICE_CNSTR(DEVICE_QCPE2_NAME, MM_QCPE_VM2, 14), - HAB_DEVICE_CNSTR(DEVICE_QCPE3_NAME, MM_QCPE_VM3, 15), - HAB_DEVICE_CNSTR(DEVICE_QCPE4_NAME, MM_QCPE_VM4, 16) + HAB_DEVICE_CNSTR(DEVICE_CAM1_NAME, MM_CAM_1, 4), + HAB_DEVICE_CNSTR(DEVICE_CAM2_NAME, MM_CAM_2, 5), + HAB_DEVICE_CNSTR(DEVICE_DISP1_NAME, MM_DISP_1, 6), + HAB_DEVICE_CNSTR(DEVICE_DISP2_NAME, MM_DISP_2, 7), + HAB_DEVICE_CNSTR(DEVICE_DISP3_NAME, MM_DISP_3, 8), + HAB_DEVICE_CNSTR(DEVICE_DISP4_NAME, MM_DISP_4, 9), + HAB_DEVICE_CNSTR(DEVICE_DISP5_NAME, MM_DISP_5, 10), + HAB_DEVICE_CNSTR(DEVICE_GFX_NAME, MM_GFX, 11), + HAB_DEVICE_CNSTR(DEVICE_VID_NAME, MM_VID, 12), + HAB_DEVICE_CNSTR(DEVICE_MISC_NAME, MM_MISC, 13), + HAB_DEVICE_CNSTR(DEVICE_QCPE1_NAME, MM_QCPE_VM1, 14), + HAB_DEVICE_CNSTR(DEVICE_QCPE2_NAME, MM_QCPE_VM2, 15), + HAB_DEVICE_CNSTR(DEVICE_QCPE3_NAME, MM_QCPE_VM3, 16), + HAB_DEVICE_CNSTR(DEVICE_QCPE4_NAME, MM_QCPE_VM4, 17), + HAB_DEVICE_CNSTR(DEVICE_CLK1_NAME, MM_CLK_VM1, 18), + HAB_DEVICE_CNSTR(DEVICE_CLK2_NAME, MM_CLK_VM2, 19), }; struct hab_driver hab_driver = { @@ -71,6 +78,7 @@ struct uhab_context *hab_ctx_alloc(int kernel) kref_init(&ctx->refcount); ctx->import_ctx = habmem_imp_hyp_open(); if (!ctx->import_ctx) { + pr_err("habmem_imp_hyp_open failed\n"); kfree(ctx); return NULL; } @@ -148,6 +156,7 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx, dev = find_hab_device(mm_id); if (dev == NULL) { + pr_err("HAB device %d is not initialized\n", mm_id); ret = -EINVAL; goto err; } @@ -161,6 +170,7 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx, vchan = hab_vchan_alloc(ctx, pchan); if (!vchan) { + pr_err("vchan alloc failed\n"); ret = -ENOMEM; goto err; } @@ -187,6 +197,9 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx, vchan->otherend_id = recv_request->vchan_id; hab_open_request_free(recv_request); + vchan->session_id = open_id; + pr_debug("vchan->session_id:%d\n", vchan->session_id); + /* Send Ack sequence */ hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK, pchan, 0, sub_id, open_id); @@ -221,6 +234,7 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx, dev = find_hab_device(mm_id); if (dev == NULL) { + pr_err("failed to find dev based on id %d\n", mm_id); ret = -EINVAL; goto err; } @@ -249,6 +263,9 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx, vchan->otherend_id = otherend_vchan_id; + vchan->session_id = open_id; + pr_debug("vchan->session_id:%d\n", vchan->session_id); + /* Send Init-Ack sequence */ hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK, pchan, vchan->id, sub_id, open_id); @@ -259,7 +276,7 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx, /* Wait for Ack sequence */ hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK, pchan, 0, sub_id, open_id); - ret = hab_open_listen(ctx, dev, &request, &recv_request, HZ); + ret = hab_open_listen(ctx, dev, &request, &recv_request, 0); if (ret != -EAGAIN) break; @@ -280,6 +297,7 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx, hab_pchan_put(pchan); return vchan; err: + pr_err("listen on mmid %d failed\n", mm_id); if (vchan) hab_vchan_put(vchan); if (pchan) @@ -304,12 +322,19 @@ long hab_vchan_send(struct uhab_context *ctx, } vchan = hab_get_vchan_fromvcid(vcid, ctx); - if (!vchan || vchan->otherend_closed) - return -ENODEV; + if (!vchan || vchan->otherend_closed) { + ret = -ENODEV; + goto err; + } HAB_HEADER_SET_SIZE(header, sizebytes); - HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_MSG); + if (flags & HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT) + HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_PROFILE); + else + HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_MSG); + HAB_HEADER_SET_ID(header, vchan->otherend_id); + HAB_HEADER_SET_SESSION_ID(header, vchan->session_id); while (1) { ret = physical_channel_send(vchan->pchan, &header, data); @@ -321,7 +346,11 @@ long hab_vchan_send(struct uhab_context *ctx, schedule(); } - hab_vchan_put(vchan); + +err: + if (vchan) + hab_vchan_put(vchan); + return ret; } @@ -335,7 +364,7 @@ struct hab_message *hab_vchan_recv(struct uhab_context *ctx, int nonblocking_flag = flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING; vchan = hab_get_vchan_fromvcid(vcid, ctx); - if (!vchan || vchan->otherend_closed) + if (!vchan) return ERR_PTR(-ENODEV); if (nonblocking_flag) { @@ -351,6 +380,8 @@ struct hab_message *hab_vchan_recv(struct uhab_context *ctx, if (!message) { if (nonblocking_flag) ret = -EAGAIN; + else if (vchan->otherend_closed) + ret = -ENODEV; else ret = -EPIPE; } @@ -369,7 +400,11 @@ int hab_vchan_open(struct uhab_context *ctx, int32_t *vcid, uint32_t flags) { - struct virtual_channel *vchan; + struct virtual_channel *vchan = NULL; + struct hab_device *dev; + + pr_debug("Open mmid=%d, loopback mode=%d, loopback num=%d\n", + mmid, hab_driver.b_loopback, hab_driver.loopback_num); if (!vcid) return -EINVAL; @@ -383,14 +418,29 @@ int hab_vchan_open(struct uhab_context *ctx, vchan = frontend_open(ctx, mmid, LOOPBACK_DOM); } } else { - if (hab_driver.b_server_dom) - vchan = backend_listen(ctx, mmid); - else - vchan = frontend_open(ctx, mmid, 0); + dev = find_hab_device(mmid); + + if (dev) { + struct physical_channel *pchan = + hab_pchan_find_domid(dev, HABCFG_VMID_DONT_CARE); + + if (pchan->is_be) + vchan = backend_listen(ctx, mmid); + else + vchan = frontend_open(ctx, mmid, + HABCFG_VMID_DONT_CARE); + } else { + pr_err("failed to find device, mmid %d\n", mmid); + } } - if (IS_ERR(vchan)) + if (IS_ERR(vchan)) { + pr_err("vchan open failed over mmid=%d\n", mmid); return PTR_ERR(vchan); + } + + pr_debug("vchan id %x, remote id %x\n", + vchan->id, vchan->otherend_id); write_lock(&ctx->ctx_lock); list_add_tail(&vchan->node, &ctx->vchannels); @@ -403,12 +453,13 @@ int hab_vchan_open(struct uhab_context *ctx, void hab_send_close_msg(struct virtual_channel *vchan) { - struct hab_header header; + struct hab_header header = {0}; if (vchan && !vchan->otherend_closed) { HAB_HEADER_SET_SIZE(header, 0); HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_CLOSE); HAB_HEADER_SET_ID(header, vchan->otherend_id); + HAB_HEADER_SET_SESSION_ID(header, vchan->session_id); physical_channel_send(vchan->pchan, &header, NULL); } } @@ -442,6 +493,220 @@ void hab_vchan_close(struct uhab_context *ctx, int32_t vcid) write_unlock(&ctx->ctx_lock); } +/* + * To name the pchan - the pchan has two ends, either FE or BE locally. + * if is_be is true, then this is listener for BE. pchane name use remote + * FF's vmid from the table. + * if is_be is false, then local is FE as opener. pchan name use local FE's + * vmid (self) + */ +static int hab_initialize_pchan_entry(struct hab_device *mmid_device, + int vmid_local, int vmid_remote, int is_be) +{ + char pchan_name[MAX_VMID_NAME_SIZE]; + struct physical_channel *pchan = NULL; + int ret; + int vmid = is_be ? vmid_remote : vmid_local; + + if (!mmid_device) { + pr_err("habdev %pK, vmid local %d, remote %d, is be %d\n", + mmid_device, vmid_local, vmid_remote, is_be); + return -EINVAL; + } + + snprintf(pchan_name, MAX_VMID_NAME_SIZE, "vm%d-", vmid); + strlcat(pchan_name, mmid_device->name, MAX_VMID_NAME_SIZE); + + ret = habhyp_commdev_alloc((void **)&pchan, is_be, pchan_name, + vmid_remote, mmid_device); + if (ret == 0) { + pr_debug("pchan %s added, vmid local %d, remote %d, is_be %d, total %d\n", + pchan_name, vmid_local, vmid_remote, is_be, + mmid_device->pchan_cnt); + } else { + pr_err("failed %d to allocate pchan %s, vmid local %d, remote %d, is_be %d, total %d\n", + ret, pchan_name, vmid_local, vmid_remote, + is_be, mmid_device->pchan_cnt); + } + + return ret; +} + +static void hab_generate_pchan(struct local_vmid *settings, int i, int j) +{ + int k, ret = 0; + + pr_debug("%d as mmid %d in vmid %d\n", + HABCFG_GET_MMID(settings, i, j), j, i); + + switch (HABCFG_GET_MMID(settings, i, j)) { + case MM_AUD_START/100: + for (k = MM_AUD_START + 1; k < MM_AUD_END; k++) { + /* + * if this local pchan end is BE, then use + * remote FE's vmid. If local end is FE, then + * use self vmid + */ + ret += hab_initialize_pchan_entry( + find_hab_device(k), + settings->self, + HABCFG_GET_VMID(settings, i), + HABCFG_GET_BE(settings, i, j)); + } + break; + + case MM_CAM_START/100: + for (k = MM_CAM_START + 1; k < MM_CAM_END; k++) { + ret += hab_initialize_pchan_entry( + find_hab_device(k), + settings->self, + HABCFG_GET_VMID(settings, i), + HABCFG_GET_BE(settings, i, j)); + } + break; + + case MM_DISP_START/100: + for (k = MM_DISP_START + 1; k < MM_DISP_END; k++) { + ret += hab_initialize_pchan_entry( + find_hab_device(k), + settings->self, + HABCFG_GET_VMID(settings, i), + HABCFG_GET_BE(settings, i, j)); + } + break; + + case MM_GFX_START/100: + for (k = MM_GFX_START + 1; k < MM_GFX_END; k++) { + ret += hab_initialize_pchan_entry( + find_hab_device(k), + settings->self, + HABCFG_GET_VMID(settings, i), + HABCFG_GET_BE(settings, i, j)); + } + break; + + case MM_VID_START/100: + for (k = MM_VID_START + 1; k < MM_VID_END; k++) { + ret += hab_initialize_pchan_entry( + find_hab_device(k), + settings->self, + HABCFG_GET_VMID(settings, i), + HABCFG_GET_BE(settings, i, j)); + } + break; + + case MM_MISC_START/100: + for (k = MM_MISC_START + 1; k < MM_MISC_END; k++) { + ret += hab_initialize_pchan_entry( + find_hab_device(k), + settings->self, + HABCFG_GET_VMID(settings, i), + HABCFG_GET_BE(settings, i, j)); + } + break; + + case MM_QCPE_START/100: + for (k = MM_QCPE_START + 1; k < MM_QCPE_END; k++) { + ret += hab_initialize_pchan_entry( + find_hab_device(k), + settings->self, + HABCFG_GET_VMID(settings, i), + HABCFG_GET_BE(settings, i, j)); + } + break; + + case MM_CLK_START/100: + for (k = MM_CLK_START + 1; k < MM_CLK_END; k++) { + ret += hab_initialize_pchan_entry( + find_hab_device(k), + settings->self, + HABCFG_GET_VMID(settings, i), + HABCFG_GET_BE(settings, i, j)); + } + break; + + default: + pr_err("failed to find mmid %d, i %d, j %d\n", + HABCFG_GET_MMID(settings, i, j), i, j); + + break; + } +} + +/* + * generate pchan list based on hab settings table. + * return status 0: success, otherwise failure + */ +static int hab_generate_pchan_list(struct local_vmid *settings) +{ + int i, j; + + /* scan by valid VMs, then mmid */ + pr_debug("self vmid is %d\n", settings->self); + for (i = 0; i < HABCFG_VMID_MAX; i++) { + if (HABCFG_GET_VMID(settings, i) != HABCFG_VMID_INVALID && + HABCFG_GET_VMID(settings, i) != settings->self) { + pr_debug("create pchans for vm %d\n", i); + + for (j = 1; j <= HABCFG_MMID_AREA_MAX; j++) { + if (HABCFG_GET_MMID(settings, i, j) + != HABCFG_VMID_INVALID) + hab_generate_pchan(settings, i, j); + } + } + } + + return 0; +} + +/* + * This function checks hypervisor plug-in readiness, read in hab configs, + * and configure pchans + */ +int do_hab_parse(void) +{ + int result; + int i; + struct hab_device *device; + int pchan_total = 0; + + /* first check if hypervisor plug-in is ready */ + result = hab_hypervisor_register(); + if (result) { + pr_err("register HYP plug-in failed, ret %d\n", result); + return result; + } + + /* Initialize open Q before first pchan starts */ + for (i = 0; i < hab_driver.ndevices; i++) { + device = &hab_driver.devp[i]; + init_waitqueue_head(&device->openq); + } + + /* read in hab config and create pchans*/ + memset(&hab_driver.settings, HABCFG_VMID_INVALID, + sizeof(hab_driver.settings)); + + pr_debug("prepare default gvm 2 settings...\n"); + fill_default_gvm_settings(&hab_driver.settings, 2, + MM_AUD_START, MM_ID_MAX); + + /* now generate hab pchan list */ + result = hab_generate_pchan_list(&hab_driver.settings); + if (result) { + pr_err("generate pchan list failed, ret %d\n", result); + } else { + for (i = 0; i < hab_driver.ndevices; i++) { + device = &hab_driver.devp[i]; + pchan_total += device->pchan_cnt; + } + pr_debug("ret %d, total %d pchans added, ndevices %d\n", + result, pchan_total, hab_driver.ndevices); + } + + return result; +} + static int hab_open(struct inode *inodep, struct file *filep) { int result = 0; @@ -468,6 +733,8 @@ static int hab_release(struct inode *inodep, struct file *filep) if (!ctx) return 0; + pr_debug("inode %pK, filep %pK\n", inodep, filep); + write_lock(&ctx->ctx_lock); list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) { @@ -635,9 +902,7 @@ static const struct dma_map_ops hab_dma_ops = { static int __init hab_init(void) { int result; - int i; dev_t dev; - struct hab_device *device; result = alloc_chrdev_region(&hab_driver.major, 0, 1, "hab"); @@ -676,24 +941,22 @@ static int __init hab_init(void) goto err; } - for (i = 0; i < hab_driver.ndevices; i++) { - device = &hab_driver.devp[i]; - init_waitqueue_head(&device->openq); - } - - hab_hypervisor_register(); + /* read in hab config, then configure pchans */ + result = do_hab_parse(); - hab_driver.kctx = hab_ctx_alloc(1); - if (!hab_driver.kctx) { - pr_err("hab_ctx_alloc failed"); - result = -ENOMEM; - hab_hypervisor_unregister(); - goto err; - } + if (!result) { + hab_driver.kctx = hab_ctx_alloc(1); + if (!hab_driver.kctx) { + pr_err("hab_ctx_alloc failed"); + result = -ENOMEM; + hab_hypervisor_unregister(); + goto err; + } - set_dma_ops(hab_driver.dev, &hab_dma_ops); + set_dma_ops(hab_driver.dev, &hab_dma_ops); - return result; + return result; + } err: if (!IS_ERR_OR_NULL(hab_driver.dev)) @@ -703,6 +966,7 @@ err: cdev_del(&hab_driver.cdev); unregister_chrdev_region(dev, 1); + pr_err("Error in hab init, result %d\n", result); return result; } diff --git a/drivers/soc/qcom/hab/hab.h b/drivers/soc/qcom/hab/hab.h index 805e5b4a7008..19a8584edd35 100644 --- a/drivers/soc/qcom/hab/hab.h +++ b/drivers/soc/qcom/hab/hab.h @@ -13,7 +13,7 @@ #ifndef __HAB_H #define __HAB_H -#define pr_fmt(fmt) "hab: " fmt +#define pr_fmt(fmt) "|hab:%s:%d|" fmt, __func__, __LINE__ #include <linux/types.h> @@ -47,6 +47,7 @@ enum hab_payload_type { HAB_PAYLOAD_TYPE_EXPORT_ACK, HAB_PAYLOAD_TYPE_PROFILE, HAB_PAYLOAD_TYPE_CLOSE, + HAB_PAYLOAD_TYPE_MAX, }; #define LOOPBACK_DOM 0xFF @@ -61,7 +62,8 @@ enum hab_payload_type { #define DEVICE_AUD2_NAME "hab_aud2" #define DEVICE_AUD3_NAME "hab_aud3" #define DEVICE_AUD4_NAME "hab_aud4" -#define DEVICE_CAM_NAME "hab_cam" +#define DEVICE_CAM1_NAME "hab_cam1" +#define DEVICE_CAM2_NAME "hab_cam2" #define DEVICE_DISP1_NAME "hab_disp1" #define DEVICE_DISP2_NAME "hab_disp2" #define DEVICE_DISP3_NAME "hab_disp3" @@ -74,6 +76,48 @@ enum hab_payload_type { #define DEVICE_QCPE2_NAME "hab_qcpe_vm2" #define DEVICE_QCPE3_NAME "hab_qcpe_vm3" #define DEVICE_QCPE4_NAME "hab_qcpe_vm4" +#define DEVICE_CLK1_NAME "hab_clock_vm1" +#define DEVICE_CLK2_NAME "hab_clock_vm2" + +/* make sure concascaded name is less than this value */ +#define MAX_VMID_NAME_SIZE 30 + +#define HABCFG_FILE_SIZE_MAX 256 +#define HABCFG_MMID_AREA_MAX (MM_ID_MAX/100) + +#define HABCFG_VMID_MAX 16 +#define HABCFG_VMID_INVALID (-1) +#define HABCFG_VMID_DONT_CARE (-2) + +#define HABCFG_ID_LINE_LIMIT "," +#define HABCFG_ID_VMID "VMID=" +#define HABCFG_ID_BE "BE=" +#define HABCFG_ID_FE "FE=" +#define HABCFG_ID_MMID "MMID=" +#define HABCFG_ID_RANGE "-" +#define HABCFG_ID_DONTCARE "X" + +#define HABCFG_FOUND_VMID 1 +#define HABCFG_FOUND_FE_MMIDS 2 +#define HABCFG_FOUND_BE_MMIDS 3 +#define HABCFG_FOUND_NOTHING (-1) + +#define HABCFG_BE_FALSE 0 +#define HABCFG_BE_TRUE 1 + +#define HABCFG_GET_VMID(_local_cfg_, _vmid_) \ + ((settings)->vmid_mmid_list[_vmid_].vmid) +#define HABCFG_GET_MMID(_local_cfg_, _vmid_, _mmid_) \ + ((settings)->vmid_mmid_list[_vmid_].mmid[_mmid_]) +#define HABCFG_GET_BE(_local_cfg_, _vmid_, _mmid_) \ + ((settings)->vmid_mmid_list[_vmid_].is_listener[_mmid_]) + +struct hab_header { + uint32_t id_type_size; + uint32_t session_id; + uint32_t signature; + uint32_t sequence; +} __packed; /* "Size" of the HAB_HEADER_ID and HAB_VCID_ID must match */ #define HAB_HEADER_SIZE_SHIFT 0 @@ -96,34 +140,44 @@ enum hab_payload_type { #define HAB_VCID_GET_ID(vcid) \ (((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT) + +#define HAB_HEADER_SET_SESSION_ID(header, sid) ((header).session_id = (sid)) + #define HAB_HEADER_SET_SIZE(header, size) \ - ((header).info = (((header).info) & (~HAB_HEADER_SIZE_MASK)) | \ - (((size) << HAB_HEADER_SIZE_SHIFT) & HAB_HEADER_SIZE_MASK)) + ((header).id_type_size = ((header).id_type_size & \ + (~HAB_HEADER_SIZE_MASK)) | \ + (((size) << HAB_HEADER_SIZE_SHIFT) & \ + HAB_HEADER_SIZE_MASK)) #define HAB_HEADER_SET_TYPE(header, type) \ - ((header).info = (((header).info) & (~HAB_HEADER_TYPE_MASK)) | \ - (((type) << HAB_HEADER_TYPE_SHIFT) & HAB_HEADER_TYPE_MASK)) + ((header).id_type_size = ((header).id_type_size & \ + (~HAB_HEADER_TYPE_MASK)) | \ + (((type) << HAB_HEADER_TYPE_SHIFT) & \ + HAB_HEADER_TYPE_MASK)) #define HAB_HEADER_SET_ID(header, id) \ - ((header).info = (((header).info) & (~HAB_HEADER_ID_MASK)) | \ - ((HAB_VCID_GET_ID(id) << HAB_HEADER_ID_SHIFT) \ - & HAB_HEADER_ID_MASK)) + ((header).id_type_size = ((header).id_type_size & \ + (~HAB_HEADER_ID_MASK)) | \ + ((HAB_VCID_GET_ID(id) << HAB_HEADER_ID_SHIFT) & \ + HAB_HEADER_ID_MASK)) #define HAB_HEADER_GET_SIZE(header) \ - ((((header).info) & HAB_HEADER_SIZE_MASK) >> HAB_HEADER_SIZE_SHIFT) + (((header).id_type_size & \ + HAB_HEADER_SIZE_MASK) >> HAB_HEADER_SIZE_SHIFT) #define HAB_HEADER_GET_TYPE(header) \ - ((((header).info) & HAB_HEADER_TYPE_MASK) >> HAB_HEADER_TYPE_SHIFT) + (((header).id_type_size & \ + HAB_HEADER_TYPE_MASK) >> HAB_HEADER_TYPE_SHIFT) #define HAB_HEADER_GET_ID(header) \ - (((((header).info) & HAB_HEADER_ID_MASK) >> \ + ((((header).id_type_size & HAB_HEADER_ID_MASK) >> \ (HAB_HEADER_ID_SHIFT - HAB_VCID_ID_SHIFT)) & HAB_VCID_ID_MASK) -struct hab_header { - uint32_t info; -}; +#define HAB_HEADER_GET_SESSION_ID(header) ((header).session_id) struct physical_channel { + char name[MAX_VMID_NAME_SIZE]; + int is_be; struct kref refcount; struct hab_device *habdev; struct list_head node; @@ -138,6 +192,10 @@ struct physical_channel { int closed; spinlock_t rxbuf_lock; + + /* vchans over this pchan */ + struct list_head vchannels; + rwlock_t vchans_lock; }; struct hab_open_send_data { @@ -179,9 +237,10 @@ struct hab_message { }; struct hab_device { - const char *name; + char name[MAX_VMID_NAME_SIZE]; unsigned int id; struct list_head pchannels; + int pchan_cnt; struct mutex pchan_lock; struct list_head openq_list; spinlock_t openlock; @@ -211,19 +270,37 @@ struct uhab_context { int kernel; }; +/* + * array to describe the VM and its MMID configuration as what is connected to + * so this is describing a pchan's remote side + */ +struct vmid_mmid_desc { + int vmid; /* remote vmid */ + int mmid[HABCFG_MMID_AREA_MAX+1]; /* selected or not */ + int is_listener[HABCFG_MMID_AREA_MAX+1]; /* yes or no */ +}; + +struct local_vmid { + int32_t self; /* only this field is for local */ + struct vmid_mmid_desc vmid_mmid_list[HABCFG_VMID_MAX]; +}; + struct hab_driver { struct device *dev; struct cdev cdev; dev_t major; struct class *class; - int irq; - int ndevices; struct hab_device *devp; struct uhab_context *kctx; + + struct local_vmid settings; /* parser results */ + int b_server_dom; int loopback_num; int b_loopback; + + void *hyp_priv; /* hypervisor plug-in storage */ }; struct virtual_channel { @@ -243,12 +320,14 @@ struct virtual_channel { struct physical_channel *pchan; struct uhab_context *ctx; struct list_head node; + struct list_head pnode; struct list_head rx_list; wait_queue_head_t rx_queue; spinlock_t rx_lock; int id; int otherend_id; int otherend_closed; + uint32_t session_id; }; /* @@ -271,7 +350,7 @@ struct export_desc { void *kva; int payload_count; unsigned char payload[1]; -}; +} __packed; int hab_vchan_open(struct uhab_context *ctx, unsigned int mmid, int32_t *vcid, uint32_t flags); @@ -286,6 +365,7 @@ struct hab_message *hab_vchan_recv(struct uhab_context *ctx, int vcid, unsigned int flags); void hab_vchan_stop(struct virtual_channel *vchan); +void hab_vchans_stop(struct physical_channel *pchan); void hab_vchan_stop_notify(struct virtual_channel *vchan); int hab_mem_export(struct uhab_context *ctx, @@ -350,7 +430,7 @@ void hab_open_request_init(struct hab_open_request *request, int open_id); int hab_open_request_send(struct hab_open_request *request); int hab_open_request_add(struct physical_channel *pchan, - struct hab_header *header); + size_t sizebytes, int request_type); void hab_open_request_free(struct hab_open_request *request); int hab_open_listen(struct uhab_context *ctx, struct hab_device *dev, @@ -361,7 +441,7 @@ int hab_open_listen(struct uhab_context *ctx, struct virtual_channel *hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan); struct virtual_channel *hab_vchan_get(struct physical_channel *pchan, - uint32_t vchan_id); + struct hab_header *header); void hab_vchan_put(struct virtual_channel *vchan); struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid, @@ -394,6 +474,9 @@ static inline void hab_ctx_put(struct uhab_context *ctx) void hab_send_close_msg(struct virtual_channel *vchan); int hab_hypervisor_register(void); void hab_hypervisor_unregister(void); +int habhyp_commdev_alloc(void **commdev, int is_be, char *name, + int vmid_remote, struct hab_device *mmid_device); +int habhyp_commdev_dealloc(void *commdev); int physical_channel_read(struct physical_channel *pchan, void *payload, @@ -407,6 +490,13 @@ void physical_channel_rx_dispatch(unsigned long physical_channel); int loopback_pchan_create(char *dev_name); +int hab_parse(struct local_vmid *settings); + +int do_hab_parse(void); + +int fill_default_gvm_settings(struct local_vmid *settings, + int vmid_local, int mmid_start, int mmid_end); + bool hab_is_loopback(void); /* Global singleton HAB instance */ diff --git a/drivers/soc/qcom/hab/hab_mem_linux.c b/drivers/soc/qcom/hab/hab_mem_linux.c index ab4b9d0885cb..ecc3f52a6662 100644 --- a/drivers/soc/qcom/hab/hab_mem_linux.c +++ b/drivers/soc/qcom/hab/hab_mem_linux.c @@ -35,6 +35,7 @@ struct importer_context { int cnt; /* pages allocated for local file */ struct list_head imp_list; struct file *filp; + rwlock_t implist_lock; }; void *habmm_hyp_allocate_grantable(int page_count, @@ -73,8 +74,12 @@ static int habmem_get_dma_pages(unsigned long address, int fd; vma = find_vma(current->mm, address); - if (!vma || !vma->vm_file) + if (!vma || !vma->vm_file) { + pr_err("cannot find vma\n"); goto err; + } + + pr_debug("vma flags %lx\n", vma->vm_flags); /* Look for the fd that matches this the vma file */ fd = iterate_fd(current->files, 0, match_file, vma->vm_file); @@ -103,6 +108,7 @@ static int habmem_get_dma_pages(unsigned long address, for_each_sg(sg_table->sgl, s, sg_table->nents, i) { page = sg_page(s); + pr_debug("sgl length %d\n", s->length); for (j = page_offset; j < (s->length >> PAGE_SHIFT); j++) { pages[rc] = nth_page(page, j); @@ -136,6 +142,12 @@ err: return rc; } +/* + * exporter - grant & revoke + * degenerate sharabled page list based on CPU friendly virtual "address". + * The result as an array is stored in ppdata to return to caller + * page size 4KB is assumed + */ int habmem_hyp_grant_user(unsigned long address, int page_count, int flags, @@ -220,6 +232,7 @@ void *habmem_imp_hyp_open(void) if (!priv) return NULL; + rwlock_init(&priv->implist_lock); INIT_LIST_HEAD(&priv->imp_list); return priv; @@ -261,7 +274,7 @@ long habmem_imp_hyp_map(void *imp_ctx, uint32_t userflags) { struct page **pages; - struct compressed_pfns *pfn_table = impdata; + struct compressed_pfns *pfn_table = (struct compressed_pfns *)impdata; struct pages_list *pglist; struct importer_context *priv = imp_ctx; unsigned long pfn; @@ -310,6 +323,9 @@ long habmem_imp_hyp_map(void *imp_ctx, kfree(pglist); pr_err("%ld pages vmap failed\n", pglist->npages); return -ENOMEM; + } else { + pr_debug("%ld pages vmap pass, return %pK\n", + pglist->npages, pglist->kva); } pglist->uva = NULL; @@ -320,8 +336,11 @@ long habmem_imp_hyp_map(void *imp_ctx, pglist->kva = NULL; } + write_lock(&priv->implist_lock); list_add_tail(&pglist->list, &priv->imp_list); priv->cnt++; + write_unlock(&priv->implist_lock); + pr_debug("index returned %llx\n", *index); return 0; } @@ -333,11 +352,15 @@ long habmm_imp_hyp_unmap(void *imp_ctx, int kernel) { struct importer_context *priv = imp_ctx; - struct pages_list *pglist; + struct pages_list *pglist, *tmp; int found = 0; uint64_t pg_index = index >> PAGE_SHIFT; - list_for_each_entry(pglist, &priv->imp_list, list) { + write_lock(&priv->implist_lock); + list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) { + pr_debug("node pglist %pK, kernel %d, pg_index %llx\n", + pglist, pglist->kernel, pg_index); + if (kernel) { if (pglist->kva == (void *)((uintptr_t)index)) found = 1; @@ -353,11 +376,15 @@ long habmm_imp_hyp_unmap(void *imp_ctx, } } + write_unlock(&priv->implist_lock); if (!found) { pr_err("failed to find export id on index %llx\n", index); return -EINVAL; } + pr_debug("detach pglist %pK, index %llx, kernel %d, list cnt %d\n", + pglist, pglist->index, pglist->kernel, priv->cnt); + if (kernel) if (pglist->kva) vunmap(pglist->kva); @@ -393,6 +420,8 @@ static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } + pr_debug("Fault page index %d\n", page_idx); + page = pglist->pages[page_idx]; get_page(page); vmf->page = page; @@ -422,15 +451,20 @@ int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma) struct pages_list *pglist; int bfound = 0; + pr_debug("mmap request start %lX, len %ld, index %lX\n", + vma->vm_start, length, vma->vm_pgoff); + + read_lock(&imp_ctx->implist_lock); list_for_each_entry(pglist, &imp_ctx->imp_list, list) { if (pglist->index == vma->vm_pgoff) { bfound = 1; break; } } + read_unlock(&imp_ctx->implist_lock); if (!bfound) { - pr_err("Failed to find pglist vm_pgoff: %d\n", vma->vm_pgoff); + pr_err("Failed to find pglist vm_pgoff: %ld\n", vma->vm_pgoff); return -EINVAL; } diff --git a/drivers/soc/qcom/hab/hab_mimex.c b/drivers/soc/qcom/hab/hab_mimex.c index aaef9aa9f414..67601590908e 100644 --- a/drivers/soc/qcom/hab/hab_mimex.c +++ b/drivers/soc/qcom/hab/hab_mimex.c @@ -31,11 +31,11 @@ static int hab_export_ack_find(struct uhab_context *ctx, struct hab_export_ack *expect_ack) { int ret = 0; - struct hab_export_ack_recvd *ack_recvd; + struct hab_export_ack_recvd *ack_recvd, *tmp; spin_lock_bh(&ctx->expq_lock); - list_for_each_entry(ack_recvd, &ctx->exp_rxq, node) { + list_for_each_entry_safe(ack_recvd, tmp, &ctx->exp_rxq, node) { if (ack_recvd->ack.export_id == expect_ack->export_id && ack_recvd->ack.vcid_local == expect_ack->vcid_local && ack_recvd->ack.vcid_remote == expect_ack->vcid_remote) { @@ -197,6 +197,7 @@ static int habmem_export_vchan(struct uhab_context *ctx, HAB_HEADER_SET_SIZE(header, sizebytes); HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT); HAB_HEADER_SET_ID(header, vchan->otherend_id); + HAB_HEADER_SET_SESSION_ID(header, vchan->session_id); ret = physical_channel_send(vchan->pchan, &header, exp); if (ret != 0) { @@ -228,6 +229,8 @@ int hab_mem_export(struct uhab_context *ctx, if (!ctx || !param || param->sizebytes > HAB_MAX_EXPORT_SIZE) return -EINVAL; + pr_debug("vc %X, mem size %d\n", param->vcid, param->sizebytes); + vchan = hab_get_vchan_fromvcid(param->vcid, ctx); if (!vchan || !vchan->pchan) { ret = -ENODEV; @@ -303,7 +306,10 @@ int hab_mem_unexport(struct uhab_context *ctx, return -EINVAL; ret = habmem_hyp_revoke(exp->payload, exp->payload_count); - + if (ret) { + pr_err("Error found in revoke grant with ret %d", ret); + return ret; + } habmem_remove_export(exp); return ret; } @@ -335,6 +341,10 @@ int hab_mem_import(struct uhab_context *ctx, return ret; } + pr_debug("call map id: %d pcnt %d remote_dom %d 1st_ref:0x%X\n", + exp->export_id, exp->payload_count, exp->domid_local, + *((uint32_t *)exp->payload)); + ret = habmem_imp_hyp_map(ctx->import_ctx, exp->payload, exp->payload_count, @@ -349,6 +359,8 @@ int hab_mem_import(struct uhab_context *ctx, exp->domid_local, *((uint32_t *)exp->payload)); return ret; } + pr_debug("import index %llx, kva %llx, kernel %d\n", + exp->import_index, param->kva, kernel); param->index = exp->import_index; param->kva = (uint64_t)exp->kva; @@ -373,6 +385,9 @@ int hab_mem_unimport(struct uhab_context *ctx, list_del(&exp->node); ctx->import_total--; found = 1; + + pr_debug("found id:%d payload cnt:%d kernel:%d\n", + exp->export_id, exp->payload_count, kernel); break; } } @@ -385,7 +400,10 @@ int hab_mem_unimport(struct uhab_context *ctx, exp->import_index, exp->payload_count, kernel); - + if (ret) { + pr_err("unmap fail id:%d pcnt:%d kernel:%d\n", + exp->export_id, exp->payload_count, kernel); + } param->kva = (uint64_t)exp->kva; kfree(exp); } diff --git a/drivers/soc/qcom/hab/hab_msg.c b/drivers/soc/qcom/hab/hab_msg.c index f08cc83fe9fc..700239a25652 100644 --- a/drivers/soc/qcom/hab/hab_msg.c +++ b/drivers/soc/qcom/hab/hab_msg.c @@ -55,13 +55,12 @@ hab_msg_dequeue(struct virtual_channel *vchan, int wait_flag) vchan->otherend_closed); } - if (!ret && !vchan->otherend_closed) { + /* return all the received messages before the remote close */ + if (!ret && !hab_rx_queue_empty(vchan)) { spin_lock_bh(&vchan->rx_lock); - if (!list_empty(&vchan->rx_list)) { - message = list_first_entry(&vchan->rx_list, + message = list_first_entry(&vchan->rx_list, struct hab_message, node); - list_del(&message->node); - } + list_del(&message->node); spin_unlock_bh(&vchan->rx_lock); } @@ -91,8 +90,9 @@ static int hab_export_enqueue(struct virtual_channel *vchan, return 0; } -static int hab_send_export_ack(struct physical_channel *pchan, - struct export_desc *exp) +static int hab_send_export_ack(struct virtual_channel *vchan, + struct physical_channel *pchan, + struct export_desc *exp) { struct hab_export_ack exp_ack = { .export_id = exp->export_id, @@ -104,11 +104,12 @@ static int hab_send_export_ack(struct physical_channel *pchan, HAB_HEADER_SET_SIZE(header, sizeof(exp_ack)); HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT_ACK); HAB_HEADER_SET_ID(header, exp->vcid_local); + HAB_HEADER_SET_SESSION_ID(header, vchan->session_id); return physical_channel_send(pchan, &header, &exp_ack); } static int hab_receive_create_export_ack(struct physical_channel *pchan, - struct uhab_context *ctx) + struct uhab_context *ctx, size_t sizebytes) { struct hab_export_ack_recvd *ack_recvd = kzalloc(sizeof(*ack_recvd), GFP_ATOMIC); @@ -116,11 +117,20 @@ static int hab_receive_create_export_ack(struct physical_channel *pchan, if (!ack_recvd) return -ENOMEM; + if (sizeof(ack_recvd->ack) != sizebytes) + pr_err("exp ack size %lu is not as arrived %zu\n", + sizeof(ack_recvd->ack), sizebytes); + if (physical_channel_read(pchan, &ack_recvd->ack, - sizeof(ack_recvd->ack)) != sizeof(ack_recvd->ack)) + sizebytes) != sizebytes) return -EIO; + pr_debug("receive export id %d, local vc %X, vd remote %X\n", + ack_recvd->ack.export_id, + ack_recvd->ack.vcid_local, + ack_recvd->ack.vcid_remote); + spin_lock_bh(&ctx->expq_lock); list_add_tail(&ack_recvd->node, &ctx->exp_rxq); spin_unlock_bh(&ctx->expq_lock); @@ -137,20 +147,48 @@ void hab_msg_recv(struct physical_channel *pchan, size_t sizebytes = HAB_HEADER_GET_SIZE(*header); uint32_t payload_type = HAB_HEADER_GET_TYPE(*header); uint32_t vchan_id = HAB_HEADER_GET_ID(*header); + uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header); struct virtual_channel *vchan = NULL; struct export_desc *exp_desc; + struct timeval tv; /* get the local virtual channel if it isn't an open message */ if (payload_type != HAB_PAYLOAD_TYPE_INIT && payload_type != HAB_PAYLOAD_TYPE_INIT_ACK && payload_type != HAB_PAYLOAD_TYPE_ACK) { - vchan = hab_vchan_get(pchan, vchan_id); + + /* sanity check the received message */ + if (payload_type >= HAB_PAYLOAD_TYPE_MAX || + vchan_id > (HAB_HEADER_ID_MASK >> HAB_HEADER_ID_SHIFT) + || !vchan_id || !session_id) { + pr_err("Invalid message received, payload type %d, vchan id %x, sizebytes %zx, session %d\n", + payload_type, vchan_id, sizebytes, session_id); + } + + vchan = hab_vchan_get(pchan, header); if (!vchan) { + pr_debug("vchan is not found, payload type %d, vchan id %x, sizebytes %zx, session %d\n", + payload_type, vchan_id, sizebytes, session_id); + + if (sizebytes) + pr_err("message is dropped\n"); + return; } else if (vchan->otherend_closed) { hab_vchan_put(vchan); + pr_debug("vchan remote is closed, payload type %d, vchan id %x, sizebytes %zx, session %d\n", + payload_type, vchan_id, sizebytes, session_id); + + if (sizebytes) + pr_err("message is dropped\n"); + return; } + } else { + if (sizebytes != sizeof(struct hab_open_send_data)) { + pr_err("Invalid open request received, payload type %d, vchan id %x, sizebytes %zx, session %d\n", + payload_type, vchan_id, sizebytes, session_id); + } } switch (payload_type) { @@ -165,9 +203,12 @@ void hab_msg_recv(struct physical_channel *pchan, case HAB_PAYLOAD_TYPE_INIT: case HAB_PAYLOAD_TYPE_INIT_ACK: case HAB_PAYLOAD_TYPE_ACK: - ret = hab_open_request_add(pchan, header); - if (ret) + ret = hab_open_request_add(pchan, sizebytes, payload_type); + if (ret) { + pr_err("open request add failed, ret %d, payload type %d, sizebytes %zx\n", + ret, payload_type, sizebytes); break; + } wake_up_interruptible(&dev->openq); break; @@ -185,22 +226,49 @@ void hab_msg_recv(struct physical_channel *pchan, exp_desc->domid_local = pchan->dom_id; hab_export_enqueue(vchan, exp_desc); - hab_send_export_ack(pchan, exp_desc); + hab_send_export_ack(vchan, pchan, exp_desc); break; case HAB_PAYLOAD_TYPE_EXPORT_ACK: - ret = hab_receive_create_export_ack(pchan, vchan->ctx); - if (ret) + ret = hab_receive_create_export_ack(pchan, vchan->ctx, + sizebytes); + if (ret) { + pr_err("failed to handled export ack %d\n", ret); break; - + } wake_up_interruptible(&vchan->ctx->exp_wq); break; case HAB_PAYLOAD_TYPE_CLOSE: + /* remote request close */ + pr_debug("remote side request close\n"); + pr_debug(" vchan id %X, other end %X, session %d\n", + vchan->id, vchan->otherend_id, session_id); hab_vchan_stop(vchan); break; + case HAB_PAYLOAD_TYPE_PROFILE: + do_gettimeofday(&tv); + + /* pull down the incoming data */ + message = hab_msg_alloc(pchan, sizebytes); + if (!message) { + pr_err("msg alloc failed\n"); + break; + } + + ((uint64_t *)message->data)[2] = tv.tv_sec; + ((uint64_t *)message->data)[3] = tv.tv_usec; + hab_msg_queue(vchan, message); + break; + default: + pr_err("unknown msg is received\n"); + pr_err("payload type %d, vchan id %x\n", + payload_type, vchan_id); + pr_err("sizebytes %zx, session %d\n", + sizebytes, session_id); + break; } if (vchan) diff --git a/drivers/soc/qcom/hab/hab_open.c b/drivers/soc/qcom/hab/hab_open.c index 66468aa43afd..35f3281604e2 100644 --- a/drivers/soc/qcom/hab/hab_open.c +++ b/drivers/soc/qcom/hab/hab_open.c @@ -42,7 +42,7 @@ int hab_open_request_send(struct hab_open_request *request) } int hab_open_request_add(struct physical_channel *pchan, - struct hab_header *header) + size_t sizebytes, int request_type) { struct hab_open_node *node; struct hab_device *dev = pchan->habdev; @@ -53,12 +53,11 @@ int hab_open_request_add(struct physical_channel *pchan, if (!node) return -ENOMEM; - if (physical_channel_read(pchan, &data, HAB_HEADER_GET_SIZE(*header)) != - HAB_HEADER_GET_SIZE(*header)) + if (physical_channel_read(pchan, &data, sizebytes) != sizebytes) return -EIO; request = &node->request; - request->type = HAB_HEADER_GET_TYPE(*header); + request->type = request_type; request->pchan = pchan; request->vchan_id = data.vchan_id; request->sub_id = data.sub_id; diff --git a/drivers/soc/qcom/hab/hab_parser.c b/drivers/soc/qcom/hab/hab_parser.c new file mode 100644 index 000000000000..a38d9bcf26b9 --- /dev/null +++ b/drivers/soc/qcom/hab/hab_parser.c @@ -0,0 +1,65 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "hab.h" + +/* + * set valid mmid value in tbl to show this is valid entry. All inputs here are + * normalized to 1 based integer + */ +static int fill_vmid_mmid_tbl(struct vmid_mmid_desc *tbl, int32_t vm_start, + int32_t vm_range, int32_t mmid_start, + int32_t mmid_range, int32_t be) +{ + int ret = 0; + int i, j; + + for (i = vm_start; i < vm_start+vm_range; i++) { + tbl[i].vmid = i; /* set valid vmid value to make it usable */ + for (j = mmid_start; j < mmid_start + mmid_range; j++) { + /* sanity check */ + if (tbl[i].mmid[j] != HABCFG_VMID_INVALID) { + pr_err("overwrite previous setting, i %d, j %d, be %d\n", + i, j, tbl[i].is_listener[j]); + } + tbl[i].mmid[j] = j; + tbl[i].is_listener[j] = be; /* BE IS listen */ + } + } + + return ret; +} + +void dump_settings(struct local_vmid *settings) +{ + int i, j; + + pr_debug("self vmid is %d\n", settings->self); + for (i = 0; i < HABCFG_VMID_MAX; i++) { + pr_debug("remote vmid %d\n", + settings->vmid_mmid_list[i].vmid); + for (j = 0; j <= HABCFG_MMID_AREA_MAX; j++) { + pr_debug("mmid %d, is_be %d\n", + settings->vmid_mmid_list[i].mmid[j], + settings->vmid_mmid_list[i].is_listener[j]); + } + } +} + +int fill_default_gvm_settings(struct local_vmid *settings, int vmid_local, + int mmid_start, int mmid_end) { + settings->self = vmid_local; + /* default gvm always talks to host as vm0 */ + return fill_vmid_mmid_tbl(settings->vmid_mmid_list, 0, 1, + mmid_start/100, (mmid_end-mmid_start)/100+1, HABCFG_BE_FALSE); +} diff --git a/drivers/soc/qcom/hab/hab_pchan.c b/drivers/soc/qcom/hab/hab_pchan.c index 1ad727f7d90f..36bc29b7bd0c 100644 --- a/drivers/soc/qcom/hab/hab_pchan.c +++ b/drivers/soc/qcom/hab/hab_pchan.c @@ -31,10 +31,13 @@ hab_pchan_alloc(struct hab_device *habdev, int otherend_id) pchan->closed = 1; pchan->hyp_data = NULL; + INIT_LIST_HEAD(&pchan->vchannels); + rwlock_init(&pchan->vchans_lock); spin_lock_init(&pchan->rxbuf_lock); mutex_lock(&habdev->pchan_lock); list_add_tail(&pchan->node, &habdev->pchannels); + habdev->pchan_cnt++; mutex_unlock(&habdev->pchan_lock); return pchan; @@ -47,6 +50,7 @@ static void hab_pchan_free(struct kref *ref) mutex_lock(&pchan->habdev->pchan_lock); list_del(&pchan->node); + pchan->habdev->pchan_cnt--; mutex_unlock(&pchan->habdev->pchan_lock); kfree(pchan->hyp_data); kfree(pchan); @@ -59,11 +63,14 @@ hab_pchan_find_domid(struct hab_device *dev, int dom_id) mutex_lock(&dev->pchan_lock); list_for_each_entry(pchan, &dev->pchannels, node) - if (pchan->dom_id == dom_id) + if (pchan->dom_id == dom_id || dom_id == HABCFG_VMID_DONT_CARE) break; - if (pchan->dom_id != dom_id) + if (pchan->dom_id != dom_id && dom_id != HABCFG_VMID_DONT_CARE) { + pr_err("dom_id mismatch requested %d, existing %d\n", + dom_id, pchan->dom_id); pchan = NULL; + } if (pchan && !kref_get_unless_zero(&pchan->refcount)) pchan = NULL; diff --git a/drivers/soc/qcom/hab/hab_qvm.c b/drivers/soc/qcom/hab/hab_qvm.c index a37590f23c61..fec06cbbd0c7 100644 --- a/drivers/soc/qcom/hab/hab_qvm.c +++ b/drivers/soc/qcom/hab/hab_qvm.c @@ -21,9 +21,51 @@ #include <linux/of.h> #include <linux/of_platform.h> -#define DEFAULT_HAB_SHMEM_IRQ 7 -#define SHMEM_PHYSICAL_ADDR 0x1c050000 +struct shmem_irq_config { + unsigned long factory_addr; /* from gvm settings when provided */ + int irq; /* from gvm settings when provided */ +}; + +/* + * this is for platform does not provide probe features. the size should match + * hab device side (all mmids) + */ +static struct shmem_irq_config pchan_factory_settings[] = { + {0x1b000000, 7}, + {0x1b001000, 8}, + {0x1b002000, 9}, + {0x1b003000, 10}, + {0x1b004000, 11}, + {0x1b005000, 12}, + {0x1b006000, 13}, + {0x1b007000, 14}, + {0x1b008000, 15}, + {0x1b009000, 16}, + {0x1b00a000, 17}, + {0x1b00b000, 18}, + {0x1b00c000, 19}, + {0x1b00d000, 20}, + {0x1b00e000, 21}, + {0x1b00f000, 22}, + {0x1b010000, 23}, + {0x1b011000, 24}, + {0x1b012000, 25}, + {0x1b013000, 26}, + +}; + +static struct qvm_plugin_info { + struct shmem_irq_config *pchan_settings; + int setting_size; + int curr; + int probe_cnt; +} qvm_priv_info = { + pchan_factory_settings, + ARRAY_SIZE(pchan_factory_settings), + 0, + ARRAY_SIZE(pchan_factory_settings) +}; static irqreturn_t shm_irq_handler(int irq, void *_pchan) { @@ -43,22 +85,22 @@ static irqreturn_t shm_irq_handler(int irq, void *_pchan) return rc; } +/* + * this is only for guest + */ static uint64_t get_guest_factory_paddr(struct qvm_channel *dev, - const char *name, uint32_t pages) + unsigned long factory_addr, int irq, const char *name, uint32_t pages) { int i; - dev->guest_factory = ioremap(SHMEM_PHYSICAL_ADDR, PAGE_SIZE); - - if (!dev->guest_factory) { - pr_err("Couldn't map guest_factory\n"); - return 0; - } + pr_debug("name = %s, factory paddr = 0x%lx, irq %d, pages %d\n", + name, factory_addr, irq, pages); + dev->guest_factory = (struct guest_shm_factory *)factory_addr; if (dev->guest_factory->signature != GUEST_SHM_SIGNATURE) { - pr_err("shmem factory signature incorrect: %ld != %lu\n", - GUEST_SHM_SIGNATURE, dev->guest_factory->signature); - iounmap(dev->guest_factory); + pr_err("signature error: %ld != %llu, factory addr %lx\n", + GUEST_SHM_SIGNATURE, dev->guest_factory->signature, + factory_addr); return 0; } @@ -77,16 +119,22 @@ static uint64_t get_guest_factory_paddr(struct qvm_channel *dev, /* See if we successfully created/attached to the region. */ if (dev->guest_factory->status != GSS_OK) { pr_err("create failed: %d\n", dev->guest_factory->status); - iounmap(dev->guest_factory); return 0; } - pr_debug("shm creation size %x\n", dev->guest_factory->size); + pr_debug("shm creation size %x, paddr=%llx, vector %d, dev %pK\n", + dev->guest_factory->size, + dev->guest_factory->shmem, + dev->guest_intr, + dev); + + dev->factory_addr = factory_addr; + dev->irq = irq; return dev->guest_factory->shmem; } -static int create_dispatcher(struct physical_channel *pchan, int id) +static int create_dispatcher(struct physical_channel *pchan) { struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data; int ret; @@ -94,21 +142,45 @@ static int create_dispatcher(struct physical_channel *pchan, int id) tasklet_init(&dev->task, physical_channel_rx_dispatch, (unsigned long) pchan); - ret = request_irq(hab_driver.irq, shm_irq_handler, IRQF_SHARED, - hab_driver.devp[id].name, pchan); + pr_debug("request_irq: irq = %d, pchan name = %s", + dev->irq, pchan->name); + ret = request_irq(dev->irq, shm_irq_handler, IRQF_SHARED, + pchan->name, pchan); if (ret) pr_err("request_irq for %s failed: %d\n", - hab_driver.devp[id].name, ret); + pchan->name, ret); return ret; } -static struct physical_channel *habhyp_commdev_alloc(int id) +void hab_pipe_reset(struct physical_channel *pchan) { - struct qvm_channel *dev; - struct physical_channel *pchan = NULL; - int ret = 0, channel = 0; + struct hab_pipe_endpoint *pipe_ep; + struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data; + + pipe_ep = hab_pipe_init(dev->pipe, PIPE_SHMEM_SIZE, + pchan->is_be ? 0 : 1); + if (dev->pipe_ep != pipe_ep) + pr_warn("The pipe endpoint must not change\n"); +} + +/* + * allocate hypervisor plug-in specific resource for pchan, and call hab pchan + * alloc common function. hab driver struct is directly accessed. + * commdev: pointer to store the pchan address + * id: index to hab_device (mmids) + * is_be: pchan local endpoint role + * name: pchan name + * return: status 0: success, otherwise: failures + */ +int habhyp_commdev_alloc(void **commdev, int is_be, char *name, + int vmid_remote, struct hab_device *mmid_device) +{ + struct qvm_channel *dev = NULL; + struct qvm_plugin_info *qvm_priv = hab_driver.hyp_priv; + struct physical_channel **pchan = (struct physical_channel **)commdev; + int ret = 0, coid = 0, channel = 0; char *shmdata; uint32_t pipe_alloc_size = hab_pipe_calc_required_bytes(PIPE_SHMEM_SIZE); @@ -119,15 +191,27 @@ static struct physical_channel *habhyp_commdev_alloc(int id) int total_pages; struct page **pages; + pr_debug("habhyp_commdev_alloc: pipe_alloc_size is %d\n", + pipe_alloc_size); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) - return ERR_PTR(-ENOMEM); + return -ENOMEM; spin_lock_init(&dev->io_lock); paddr = get_guest_factory_paddr(dev, - hab_driver.devp[id].name, + qvm_priv->pchan_settings[qvm_priv->curr].factory_addr, + qvm_priv->pchan_settings[qvm_priv->curr].irq, + name, pipe_alloc_pages); + qvm_priv->curr++; + if (qvm_priv->curr > qvm_priv->probe_cnt) { + pr_err("factory setting %d overflow probed cnt %d\n", + qvm_priv->curr, qvm_priv->probe_cnt); + ret = -1; + goto err; + } total_pages = dev->guest_factory->size + 1; pages = kmalloc_array(total_pages, sizeof(struct page *), GFP_KERNEL); @@ -147,72 +231,138 @@ static struct physical_channel *habhyp_commdev_alloc(int id) } shmdata = (char *)dev->guest_ctrl + PAGE_SIZE; + + pr_debug("ctrl page 0x%llx mapped at 0x%pK, idx %d\n", + paddr, dev->guest_ctrl, dev->guest_ctrl->idx); + pr_debug("data buffer mapped at 0x%pK\n", shmdata); dev->idx = dev->guest_ctrl->idx; kfree(pages); dev->pipe = (struct hab_pipe *) shmdata; + pr_debug("\"%s\": pipesize %d, addr 0x%pK, be %d\n", name, + pipe_alloc_size, dev->pipe, is_be); dev->pipe_ep = hab_pipe_init(dev->pipe, PIPE_SHMEM_SIZE, - dev->be ? 0 : 1); - - pchan = hab_pchan_alloc(&hab_driver.devp[id], dev->be); - if (!pchan) { + is_be ? 0 : 1); + /* newly created pchan is added to mmid device list */ + *pchan = hab_pchan_alloc(mmid_device, vmid_remote); + if (!(*pchan)) { ret = -ENOMEM; goto err; } - pchan->closed = 0; - pchan->hyp_data = (void *)dev; + (*pchan)->closed = 0; + (*pchan)->hyp_data = (void *)dev; + strlcpy((*pchan)->name, name, MAX_VMID_NAME_SIZE); + (*pchan)->is_be = is_be; dev->channel = channel; + dev->coid = coid; - ret = create_dispatcher(pchan, id); - if (ret < 0) + ret = create_dispatcher(*pchan); + if (ret) goto err; - return pchan; + return ret; err: kfree(dev); - if (pchan) - hab_pchan_put(pchan); + if (*pchan) + hab_pchan_put(*pchan); pr_err("habhyp_commdev_alloc failed: %d\n", ret); - return ERR_PTR(ret); + return ret; +} + +int habhyp_commdev_dealloc(void *commdev) +{ + struct physical_channel *pchan = (struct physical_channel *)commdev; + struct qvm_channel *dev = pchan->hyp_data; + + + kfree(dev); + hab_pchan_put(pchan); + return 0; } int hab_hypervisor_register(void) { - int ret = 0, i; + int ret = 0; hab_driver.b_server_dom = 0; - /* - * Can still attempt to instantiate more channels if one fails. - * Others can be retried later. - */ - for (i = 0; i < hab_driver.ndevices; i++) { - if (IS_ERR(habhyp_commdev_alloc(i))) - ret = -EAGAIN; - } + pr_info("initializing for %s VM\n", hab_driver.b_server_dom ? + "host" : "guest"); + + hab_driver.hyp_priv = &qvm_priv_info; return ret; } void hab_hypervisor_unregister(void) { + int status, i; + + for (i = 0; i < hab_driver.ndevices; i++) { + struct hab_device *dev = &hab_driver.devp[i]; + struct physical_channel *pchan; + + list_for_each_entry(pchan, &dev->pchannels, node) { + status = habhyp_commdev_dealloc(pchan); + if (status) { + pr_err("failed to free pchan %pK, i %d, ret %d\n", + pchan, i, status); + } + } + } + + qvm_priv_info.probe_cnt = 0; + qvm_priv_info.curr = 0; } static int hab_shmem_probe(struct platform_device *pdev) { - int irq = platform_get_irq(pdev, 0); + int irq = 0; + struct resource *mem; + void *shmem_base = NULL; + int ret = 0; + + /* hab in one GVM will not have pchans more than one VM could allowed */ + if (qvm_priv_info.probe_cnt >= hab_driver.ndevices) { + pr_err("no more channel, current %d, maximum %d\n", + qvm_priv_info.probe_cnt, hab_driver.ndevices); + return -ENODEV; + } - if (irq > 0) - hab_driver.irq = irq; - else - hab_driver.irq = DEFAULT_HAB_SHMEM_IRQ; + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + pr_err("no interrupt for the channel %d, error %d\n", + qvm_priv_info.probe_cnt, irq); + return irq; + } + qvm_priv_info.pchan_settings[qvm_priv_info.probe_cnt].irq = irq; - return 0; + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + pr_err("can not get io mem resource for channel %d\n", + qvm_priv_info.probe_cnt); + return -EINVAL; + } + shmem_base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(shmem_base)) { + pr_err("ioremap failed for channel %d, mem %pK\n", + qvm_priv_info.probe_cnt, mem); + return -EINVAL; + } + qvm_priv_info.pchan_settings[qvm_priv_info.probe_cnt].factory_addr + = (unsigned long)((uintptr_t)shmem_base); + + pr_debug("pchan idx %d, hab irq=%d shmem_base=%pK, mem %pK\n", + qvm_priv_info.probe_cnt, irq, shmem_base, mem); + + qvm_priv_info.probe_cnt++; + + return ret; } static int hab_shmem_remove(struct platform_device *pdev) @@ -220,6 +370,23 @@ static int hab_shmem_remove(struct platform_device *pdev) return 0; } +static void hab_shmem_shutdown(struct platform_device *pdev) +{ + int i; + struct qvm_channel *dev; + struct physical_channel *pchan; + struct hab_device hab_dev; + + for (i = 0; i < hab_driver.ndevices; i++) { + hab_dev = hab_driver.devp[i]; + pr_debug("detaching %s\n", hab_dev.name); + list_for_each_entry(pchan, &hab_dev.pchannels, node) { + dev = (struct qvm_channel *)pchan->hyp_data; + dev->guest_ctrl->detach = 0; + } + } +} + static const struct of_device_id hab_shmem_match_table[] = { {.compatible = "qvm,guest_shm"}, {}, @@ -228,6 +395,7 @@ static const struct of_device_id hab_shmem_match_table[] = { static struct platform_driver hab_shmem_driver = { .probe = hab_shmem_probe, .remove = hab_shmem_remove, + .shutdown = hab_shmem_shutdown, .driver = { .name = "hab_shmem", .of_match_table = of_match_ptr(hab_shmem_match_table), @@ -236,12 +404,14 @@ static struct platform_driver hab_shmem_driver = { static int __init hab_shmem_init(void) { + qvm_priv_info.probe_cnt = 0; return platform_driver_register(&hab_shmem_driver); } static void __exit hab_shmem_exit(void) { platform_driver_unregister(&hab_shmem_driver); + qvm_priv_info.probe_cnt = 0; } core_initcall(hab_shmem_init); diff --git a/drivers/soc/qcom/hab/hab_qvm.h b/drivers/soc/qcom/hab/hab_qvm.h index e94b82f87942..b483f4c21331 100644 --- a/drivers/soc/qcom/hab/hab_qvm.h +++ b/drivers/soc/qcom/hab/hab_qvm.h @@ -30,6 +30,7 @@ struct qvm_channel { struct tasklet_struct task; struct guest_shm_factory *guest_factory; struct guest_shm_control *guest_ctrl; + /* cached guest ctrl idx value to prevent trap when accessed */ uint32_t idx; int channel; @@ -37,11 +38,15 @@ struct qvm_channel { unsigned int guest_intr; unsigned int guest_iid; + unsigned int factory_addr; + unsigned int irq; + }; /* Shared mem size in each direction for communication pipe */ #define PIPE_SHMEM_SIZE (128 * 1024) void *qnx_hyp_rx_dispatch(void *data); +void hab_pipe_reset(struct physical_channel *pchan); #endif /* __HAB_QNX_H */ diff --git a/drivers/soc/qcom/hab/hab_vchan.c b/drivers/soc/qcom/hab/hab_vchan.c index 75a3fad68ab5..91ae173f7e83 100644 --- a/drivers/soc/qcom/hab/hab_vchan.c +++ b/drivers/soc/qcom/hab/hab_vchan.c @@ -40,6 +40,9 @@ hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan) hab_pchan_get(pchan); vchan->pchan = pchan; + write_lock(&pchan->vchans_lock); + list_add_tail(&vchan->pnode, &pchan->vchannels); + write_unlock(&pchan->vchans_lock); vchan->id = ((id << HAB_VCID_ID_SHIFT) & HAB_VCID_ID_MASK) | ((pchan->habdev->id << HAB_VCID_MMID_SHIFT) & HAB_VCID_MMID_MASK) | @@ -66,19 +69,22 @@ hab_vchan_free(struct kref *ref) struct virtual_channel *vchan = container_of(ref, struct virtual_channel, refcount); struct hab_message *message, *msg_tmp; - struct export_desc *exp; + struct export_desc *exp, *exp_tmp; struct physical_channel *pchan = vchan->pchan; struct uhab_context *ctx = vchan->ctx; + struct virtual_channel *vc, *vc_tmp; + spin_lock_bh(&vchan->rx_lock); list_for_each_entry_safe(message, msg_tmp, &vchan->rx_list, node) { list_del(&message->node); hab_msg_free(message); } + spin_unlock_bh(&vchan->rx_lock); do { found = 0; write_lock(&ctx->exp_lock); - list_for_each_entry(exp, &ctx->exp_whse, node) { + list_for_each_entry_safe(exp, exp_tmp, &ctx->exp_whse, node) { if (exp->vcid_local == vchan->id) { list_del(&exp->node); found = 1; @@ -95,7 +101,7 @@ hab_vchan_free(struct kref *ref) do { found = 0; spin_lock_bh(&ctx->imp_lock); - list_for_each_entry(exp, &ctx->imp_whse, node) { + list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) { if (exp->vcid_remote == vchan->id) { list_del(&exp->node); found = 1; @@ -117,6 +123,15 @@ hab_vchan_free(struct kref *ref) idr_remove(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan->id)); spin_unlock_bh(&pchan->vid_lock); + write_lock(&pchan->vchans_lock); + list_for_each_entry_safe(vc, vc_tmp, &pchan->vchannels, pnode) { + if (vchan == vc) { + list_del(&vc->pnode); + break; + } + } + write_unlock(&pchan->vchans_lock); + hab_pchan_put(pchan); hab_ctx_put(ctx); @@ -124,14 +139,17 @@ hab_vchan_free(struct kref *ref) } struct virtual_channel* -hab_vchan_get(struct physical_channel *pchan, uint32_t vchan_id) +hab_vchan_get(struct physical_channel *pchan, struct hab_header *header) { struct virtual_channel *vchan; + uint32_t vchan_id = HAB_HEADER_GET_ID(*header); + uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header); spin_lock_bh(&pchan->vid_lock); vchan = idr_find(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan_id)); if (vchan) - if (!kref_get_unless_zero(&vchan->refcount)) + if ((vchan->session_id != session_id) || + (!kref_get_unless_zero(&vchan->refcount))) vchan = NULL; spin_unlock_bh(&pchan->vid_lock); @@ -146,6 +164,17 @@ void hab_vchan_stop(struct virtual_channel *vchan) } } +void hab_vchans_stop(struct physical_channel *pchan) +{ + struct virtual_channel *vchan, *tmp; + + read_lock(&pchan->vchans_lock); + list_for_each_entry_safe(vchan, tmp, &pchan->vchannels, pnode) { + hab_vchan_stop(vchan); + } + read_unlock(&pchan->vchans_lock); +} + void hab_vchan_stop_notify(struct virtual_channel *vchan) { hab_send_close_msg(vchan); diff --git a/drivers/soc/qcom/hab/khab.c b/drivers/soc/qcom/hab/khab.c index f7499773ae42..05e6aa2fa7ca 100644 --- a/drivers/soc/qcom/hab/khab.c +++ b/drivers/soc/qcom/hab/khab.c @@ -117,7 +117,7 @@ int32_t habmm_import(int32_t handle, void **buff_shared, uint32_t size_bytes, param.flags = flags; ret = hab_mem_import(hab_driver.kctx, ¶m, 1); - if (!IS_ERR(ret)) + if (!ret) *buff_shared = (void *)(uintptr_t)param.kva; return ret; diff --git a/drivers/soc/qcom/hab/qvm_comm.c b/drivers/soc/qcom/hab/qvm_comm.c index 20a631e13794..41e34be9ac21 100644 --- a/drivers/soc/qcom/hab/qvm_comm.c +++ b/drivers/soc/qcom/hab/qvm_comm.c @@ -21,6 +21,7 @@ static inline void habhyp_notify(void *commdev) dev->guest_ctrl->notify = ~0; } +/* this is only used to read payload, never the head! */ int physical_channel_read(struct physical_channel *pchan, void *payload, size_t read_size) @@ -33,6 +34,8 @@ int physical_channel_read(struct physical_channel *pchan, return 0; } +#define HAB_HEAD_SIGNATURE 0xBEE1BEE1 + int physical_channel_send(struct physical_channel *pchan, struct hab_header *header, void *payload) @@ -40,6 +43,7 @@ int physical_channel_send(struct physical_channel *pchan, int sizebytes = HAB_HEADER_GET_SIZE(*header); struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data; int total_size = sizeof(*header) + sizebytes; + struct timeval tv; if (total_size > dev->pipe_ep->tx_info.sh_buf->size) return -EINVAL; /* too much data for ring */ @@ -53,6 +57,8 @@ int physical_channel_send(struct physical_channel *pchan, return -EAGAIN; /* not enough free space */ } + header->signature = HAB_HEAD_SIGNATURE; + if (hab_pipe_write(dev->pipe_ep, (unsigned char *)header, sizeof(*header)) != sizeof(*header)) { @@ -60,6 +66,12 @@ int physical_channel_send(struct physical_channel *pchan, return -EIO; } + if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) { + do_gettimeofday(&tv); + ((uint64_t *)payload)[0] = tv.tv_sec; + ((uint64_t *)payload)[1] = tv.tv_usec; + } + if (sizebytes) { if (hab_pipe_write(dev->pipe_ep, (unsigned char *)payload, @@ -89,6 +101,14 @@ void physical_channel_rx_dispatch(unsigned long data) sizeof(header)) != sizeof(header)) break; /* no data available */ + if (header.signature != HAB_HEAD_SIGNATURE) { + pr_err("HAB signature mismatch, expect %X, received %X, id_type_size %X, session %X, sequence %X\n", + HAB_HEAD_SIGNATURE, header.signature, + header.id_type_size, + header.session_id, + header.sequence); + } + hab_msg_recv(pchan, &header); } spin_unlock_bh(&pchan->rxbuf_lock); diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 4ec3b6762cfd..7f71824d9548 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -563,6 +563,12 @@ static int icnss_assign_msa_perm_all(struct icnss_priv *priv, int i; enum icnss_msa_perm old_perm; + if (priv->nr_mem_region > QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01) { + icnss_pr_err("Invalid memory region len %d\n", + priv->nr_mem_region); + return -EINVAL; + } + for (i = 0; i < priv->nr_mem_region; i++) { old_perm = priv->mem_region[i].perm; ret = icnss_assign_msa_perm(&priv->mem_region[i], new_perm); diff --git a/drivers/soc/qcom/msm_glink_pkt.c b/drivers/soc/qcom/msm_glink_pkt.c index 2a2d213f8ca0..ecc633749204 100644 --- a/drivers/soc/qcom/msm_glink_pkt.c +++ b/drivers/soc/qcom/msm_glink_pkt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -572,8 +572,10 @@ static void glink_pkt_notify_state_worker(struct work_struct *work) mutex_lock(&devp->ch_lock); devp->ch_state = event; if (event == GLINK_CONNECTED) { - if (!devp->handle) - devp->handle = handle; + if (!devp->handle) { + GLINK_PKT_ERR("%s: Invalid device handle\n", __func__); + goto exit; + } devp->in_reset = 0; wake_up_interruptible(&devp->ch_opened_wait_queue); } else if (event == GLINK_REMOTE_DISCONNECTED) { @@ -585,6 +587,7 @@ static void glink_pkt_notify_state_worker(struct work_struct *work) devp->handle = NULL; wake_up_interruptible(&devp->ch_closed_wait_queue); } +exit: mutex_unlock(&devp->ch_lock); kfree(work_item); } diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c index afc40461e8e8..7ef16ad5575b 100644 --- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c +++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c @@ -137,7 +137,7 @@ static int msm_audio_ion_smmu_map(struct ion_client *client, mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex)); if (cmd_rsp_size != sizeof(cmd_rsp)) { - pr_err("%s: invalid size for cmd rsp %lu, expected %lu\n", + pr_err("%s: invalid size for cmd rsp %u, expected %zu\n", __func__, cmd_rsp_size, sizeof(cmd_rsp)); rc = -EIO; goto err; @@ -218,7 +218,7 @@ static int msm_audio_ion_smmu_unmap(struct ion_client *client, } if (cmd_rsp_size != sizeof(cmd_rsp)) { - pr_err("%s: invalid size for cmd rsp %lu\n", + pr_err("%s: invalid size for cmd rsp %u\n", __func__, cmd_rsp_size); rc = -EIO; goto err; diff --git a/drivers/soc/qcom/rpm-smd-debug.c b/drivers/soc/qcom/rpm-smd-debug.c index 6ef90b23aed5..2b66d6d5434d 100644 --- a/drivers/soc/qcom/rpm-smd-debug.c +++ b/drivers/soc/qcom/rpm-smd-debug.c @@ -90,23 +90,23 @@ static ssize_t rsc_ops_write(struct file *fp, const char __user *user_buffer, cmp += pos; if (sscanf(cmp, "%5s %n", key_str, &pos) != 1) { pr_err("Invalid number of arguments passed\n"); - goto err; + goto err_request; } if (strlen(key_str) > 4) { pr_err("Key value cannot be more than 4 charecters"); - goto err; + goto err_request; } key = string_to_uint(key_str); if (!key) { pr_err("Key values entered incorrectly\n"); - goto err; + goto err_request; } cmp += pos; if (sscanf(cmp, "%u %n", &data, &pos) != 1) { pr_err("Invalid number of arguments passed\n"); - goto err; + goto err_request; } if (msm_rpm_add_kvp_data(req, key, diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 1f4a1f02a2cd..fec1ef2b1748 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -315,6 +315,7 @@ config SPI_FSL_SPI config SPI_FSL_DSPI tristate "Freescale DSPI controller" select REGMAP_MMIO + depends on HAS_DMA depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST help This enables support for the Freescale DSPI controller in master diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index d22de4c8c399..3de39bd794b6 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -863,7 +863,7 @@ static int sh_msiof_transfer_one(struct spi_master *master, break; copy32 = copy_bswap32; } else if (bits <= 16) { - if (l & 1) + if (l & 3) break; copy32 = copy_wswap32; } else { diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c index e8d0ff2d5c9b..808d6ebf6c94 100644 --- a/drivers/staging/iio/cdc/ad7150.c +++ b/drivers/staging/iio/cdc/ad7150.c @@ -272,7 +272,7 @@ static int ad7150_write_event_config(struct iio_dev *indio_dev, error_ret: mutex_unlock(&chip->state_lock); - return 0; + return ret; } static int ad7150_read_event_value(struct iio_dev *indio_dev, diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c index 035dd456d7d6..737747354db6 100644 --- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c +++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c @@ -259,7 +259,7 @@ out_free_irq: out1: iio_trigger_unregister(st->trig); out: - iio_trigger_put(st->trig); + iio_trigger_free(st->trig); return ret; } @@ -272,7 +272,7 @@ static int iio_bfin_tmr_trigger_remove(struct platform_device *pdev) peripheral_free(st->t->pin); free_irq(st->irq, st); iio_trigger_unregister(st->trig); - iio_trigger_put(st->trig); + iio_trigger_free(st->trig); return 0; } diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c index 7df978371c9a..44fffbd1bc74 100644 --- a/drivers/staging/lustre/lustre/llite/llite_mmap.c +++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c @@ -402,15 +402,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) result = VM_FAULT_LOCKED; break; case -ENODATA: + case -EAGAIN: case -EFAULT: result = VM_FAULT_NOPAGE; break; case -ENOMEM: result = VM_FAULT_OOM; break; - case -EAGAIN: - result = VM_FAULT_RETRY; - break; default: result = VM_FAULT_SIGBUS; break; diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c index 70b8f4fabfad..e658e11e1829 100644 --- a/drivers/staging/panel/panel.c +++ b/drivers/staging/panel/panel.c @@ -1431,17 +1431,25 @@ static ssize_t lcd_write(struct file *file, static int lcd_open(struct inode *inode, struct file *file) { + int ret; + + ret = -EBUSY; if (!atomic_dec_and_test(&lcd_available)) - return -EBUSY; /* open only once at a time */ + goto fail; /* open only once at a time */ + ret = -EPERM; if (file->f_mode & FMODE_READ) /* device is write-only */ - return -EPERM; + goto fail; if (lcd.must_clear) { lcd_clear_display(); lcd.must_clear = false; } return nonseekable_open(inode, file); + + fail: + atomic_inc(&lcd_available); + return ret; } static int lcd_release(struct inode *inode, struct file *file) @@ -1704,14 +1712,21 @@ static ssize_t keypad_read(struct file *file, static int keypad_open(struct inode *inode, struct file *file) { + int ret; + + ret = -EBUSY; if (!atomic_dec_and_test(&keypad_available)) - return -EBUSY; /* open only once at a time */ + goto fail; /* open only once at a time */ + ret = -EPERM; if (file->f_mode & FMODE_WRITE) /* device is read-only */ - return -EPERM; + goto fail; keypad_buflen = 0; /* flush the buffer on opening */ return 0; + fail: + atomic_inc(&keypad_available); + return ret; } static int keypad_release(struct inode *inode, struct file *file) diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h index 971bf457f32d..e75a386344e4 100644 --- a/drivers/staging/rtl8188eu/include/rtw_debug.h +++ b/drivers/staging/rtl8188eu/include/rtw_debug.h @@ -75,7 +75,7 @@ extern u32 GlobalDebugLevel; #define DBG_88E_LEVEL(_level, fmt, arg...) \ do { \ if (_level <= GlobalDebugLevel) \ - pr_info(DRIVER_PREFIX"ERROR " fmt, ##arg); \ + pr_info(DRIVER_PREFIX fmt, ##arg); \ } while (0) #define DBG_88E(...) \ diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c index edfc6805e012..2b348439242f 100644 --- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c +++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c @@ -199,7 +199,7 @@ static inline char *translate_scan(struct _adapter *padapter, iwe.cmd = SIOCGIWMODE; memcpy((u8 *)&cap, r8712_get_capability_from_ie(pnetwork->network.IEs), 2); - cap = le16_to_cpu(cap); + le16_to_cpus(&cap); if (cap & (WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_BSS)) { if (cap & WLAN_CAPABILITY_BSS) iwe.u.mode = (u32)IW_MODE_MASTER; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 1ff1c83e2df5..bb73401f5761 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1759,7 +1759,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct iscsi_tm *hdr; int out_of_order_cmdsn = 0, ret; bool sess_ref = false; - u8 function; + u8 function, tcm_function = TMR_UNKNOWN; hdr = (struct iscsi_tm *) buf; hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; @@ -1805,10 +1805,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, * LIO-Target $FABRIC_MOD */ if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { - - u8 tcm_function; - int ret; - transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, conn->sess->se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG, cmd->sense_buffer + 2); @@ -1844,15 +1840,14 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, return iscsit_add_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); } - - ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, - tcm_function, GFP_KERNEL); - if (ret < 0) - return iscsit_add_reject_cmd(cmd, + } + ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function, + GFP_KERNEL); + if (ret < 0) + return iscsit_add_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); - cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; - } + cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC; cmd->i_state = ISTATE_SEND_TASKMGTRSP; @@ -1928,12 +1923,14 @@ attach: if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); - if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) + if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) { out_of_order_cmdsn = 1; - else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) + } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { + target_put_sess_cmd(&cmd->se_cmd); return 0; - else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) { return -1; + } } iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index f69f4902dc07..ee16a45f1607 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -350,7 +350,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) if (acl->dynamic_node_acl) { acl->dynamic_node_acl = 0; } - list_del(&acl->acl_list); + list_del_init(&acl->acl_list); tpg->num_node_acls--; mutex_unlock(&tpg->acl_node_mutex); @@ -572,7 +572,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) * in transport_deregister_session(). */ list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { - list_del(&nacl->acl_list); + list_del_init(&nacl->acl_list); se_tpg->num_node_acls--; core_tpg_wait_for_nacl_pr_ref(nacl); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index f71bedea973a..37abf881ca75 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -431,7 +431,7 @@ static void target_complete_nacl(struct kref *kref) } mutex_lock(&se_tpg->acl_node_mutex); - list_del(&nacl->acl_list); + list_del_init(&nacl->acl_list); mutex_unlock(&se_tpg->acl_node_mutex); core_tpg_wait_for_nacl_pr_ref(nacl); @@ -503,7 +503,7 @@ void transport_free_session(struct se_session *se_sess) spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); if (se_nacl->dynamic_stop) - list_del(&se_nacl->acl_list); + list_del_init(&se_nacl->acl_list); } mutex_unlock(&se_tpg->acl_node_mutex); @@ -1970,6 +1970,8 @@ static void target_restart_delayed_cmds(struct se_device *dev) list_del(&cmd->se_delayed_node); spin_unlock(&dev->delayed_cmd_lock); + cmd->transport_state |= CMD_T_SENT; + __target_execute_cmd(cmd, true); if (cmd->sam_task_attr == TCM_ORDERED_TAG) @@ -2007,6 +2009,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd) pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", dev->dev_cur_ordered_id); } + cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; + restart: target_restart_delayed_cmds(dev); } diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig new file mode 100644 index 000000000000..2330a4eb4e8b --- /dev/null +++ b/drivers/tee/Kconfig @@ -0,0 +1,18 @@ +# Generic Trusted Execution Environment Configuration +config TEE + tristate "Trusted Execution Environment support" + select DMA_SHARED_BUFFER + select GENERIC_ALLOCATOR + help + This implements a generic interface towards a Trusted Execution + Environment (TEE). + +if TEE + +menu "TEE drivers" + +source "drivers/tee/optee/Kconfig" + +endmenu + +endif diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile new file mode 100644 index 000000000000..7a4e4a1ac39c --- /dev/null +++ b/drivers/tee/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_TEE) += tee.o +tee-objs += tee_core.o +tee-objs += tee_shm.o +tee-objs += tee_shm_pool.o +obj-$(CONFIG_OPTEE) += optee/ diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig new file mode 100644 index 000000000000..0126de898036 --- /dev/null +++ b/drivers/tee/optee/Kconfig @@ -0,0 +1,7 @@ +# OP-TEE Trusted Execution Environment Configuration +config OPTEE + tristate "OP-TEE" + depends on HAVE_ARM_SMCCC + help + This implements the OP-TEE Trusted Execution Environment (TEE) + driver. diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile new file mode 100644 index 000000000000..92fe5789bcce --- /dev/null +++ b/drivers/tee/optee/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_OPTEE) += optee.o +optee-objs += core.o +optee-objs += call.o +optee-objs += rpc.o +optee-objs += supp.o diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c new file mode 100644 index 000000000000..f7b7b404c990 --- /dev/null +++ b/drivers/tee/optee/call.c @@ -0,0 +1,444 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/arm-smccc.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include "optee_private.h" +#include "optee_smc.h" + +struct optee_call_waiter { + struct list_head list_node; + struct completion c; +}; + +static void optee_cq_wait_init(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + /* + * We're preparing to make a call to secure world. In case we can't + * allocate a thread in secure world we'll end up waiting in + * optee_cq_wait_for_completion(). + * + * Normally if there's no contention in secure world the call will + * complete and we can cleanup directly with optee_cq_wait_final(). + */ + mutex_lock(&cq->mutex); + + /* + * We add ourselves to the queue, but we don't wait. This + * guarantees that we don't lose a completion if secure world + * returns busy and another thread just exited and try to complete + * someone. + */ + init_completion(&w->c); + list_add_tail(&w->list_node, &cq->waiters); + + mutex_unlock(&cq->mutex); +} + +static void optee_cq_wait_for_completion(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + wait_for_completion(&w->c); + + mutex_lock(&cq->mutex); + + /* Move to end of list to get out of the way for other waiters */ + list_del(&w->list_node); + reinit_completion(&w->c); + list_add_tail(&w->list_node, &cq->waiters); + + mutex_unlock(&cq->mutex); +} + +static void optee_cq_complete_one(struct optee_call_queue *cq) +{ + struct optee_call_waiter *w; + + list_for_each_entry(w, &cq->waiters, list_node) { + if (!completion_done(&w->c)) { + complete(&w->c); + break; + } + } +} + +static void optee_cq_wait_final(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + /* + * We're done with the call to secure world. The thread in secure + * world that was used for this call is now available for some + * other task to use. + */ + mutex_lock(&cq->mutex); + + /* Get out of the list */ + list_del(&w->list_node); + + /* Wake up one eventual waiting task */ + optee_cq_complete_one(cq); + + /* + * If we're completed we've got a completion from another task that + * was just done with its call to secure world. Since yet another + * thread now is available in secure world wake up another eventual + * waiting task. + */ + if (completion_done(&w->c)) + optee_cq_complete_one(cq); + + mutex_unlock(&cq->mutex); +} + +/* Requires the filpstate mutex to be held */ +static struct optee_session *find_session(struct optee_context_data *ctxdata, + u32 session_id) +{ + struct optee_session *sess; + + list_for_each_entry(sess, &ctxdata->sess_list, list_node) + if (sess->session_id == session_id) + return sess; + + return NULL; +} + +/** + * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world + * @ctx: calling context + * @parg: physical address of message to pass to secure world + * + * Does and SMC to OP-TEE in secure world and handles eventual resulting + * Remote Procedure Calls (RPC) from OP-TEE. + * + * Returns return code from secure world, 0 is OK + */ +u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) +{ + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_call_waiter w; + struct optee_rpc_param param = { }; + u32 ret; + + param.a0 = OPTEE_SMC_CALL_WITH_ARG; + reg_pair_from_64(¶m.a1, ¶m.a2, parg); + /* Initialize waiter */ + optee_cq_wait_init(&optee->call_queue, &w); + while (true) { + struct arm_smccc_res res; + + optee->invoke_fn(param.a0, param.a1, param.a2, param.a3, + param.a4, param.a5, param.a6, param.a7, + &res); + + if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) { + /* + * Out of threads in secure world, wait for a thread + * become available. + */ + optee_cq_wait_for_completion(&optee->call_queue, &w); + } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { + param.a0 = res.a0; + param.a1 = res.a1; + param.a2 = res.a2; + param.a3 = res.a3; + optee_handle_rpc(ctx, ¶m); + } else { + ret = res.a0; + break; + } + } + + /* + * We're done with our thread in secure world, if there's any + * thread waiters wake up one. + */ + optee_cq_wait_final(&optee->call_queue, &w); + + return ret; +} + +static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params, + struct optee_msg_arg **msg_arg, + phys_addr_t *msg_parg) +{ + int rc; + struct tee_shm *shm; + struct optee_msg_arg *ma; + + shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params), + TEE_SHM_MAPPED); + if (IS_ERR(shm)) + return shm; + + ma = tee_shm_get_va(shm, 0); + if (IS_ERR(ma)) { + rc = PTR_ERR(ma); + goto out; + } + + rc = tee_shm_get_pa(shm, 0, msg_parg); + if (rc) + goto out; + + memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); + ma->num_params = num_params; + *msg_arg = ma; +out: + if (rc) { + tee_shm_free(shm); + return ERR_PTR(rc); + } + + return shm; +} + +int optee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param) +{ + struct optee_context_data *ctxdata = ctx->data; + int rc; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess = NULL; + + /* +2 for the meta parameters added below */ + shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION; + msg_arg->cancel_id = arg->cancel_id; + + /* + * Initialize and add the meta parameters needed when opening a + * session. + */ + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | + OPTEE_MSG_ATTR_META; + msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | + OPTEE_MSG_ATTR_META; + memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid)); + memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid)); + msg_arg->params[1].u.value.c = arg->clnt_login; + + rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param); + if (rc) + goto out; + + sess = kzalloc(sizeof(*sess), GFP_KERNEL); + if (!sess) { + rc = -ENOMEM; + goto out; + } + + if (optee_do_call_with_arg(ctx, msg_parg)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + if (msg_arg->ret == TEEC_SUCCESS) { + /* A new session has been created, add it to the list. */ + sess->session_id = msg_arg->session; + mutex_lock(&ctxdata->mutex); + list_add(&sess->list_node, &ctxdata->sess_list); + mutex_unlock(&ctxdata->mutex); + } else { + kfree(sess); + } + + if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) { + arg->ret = TEEC_ERROR_COMMUNICATION; + arg->ret_origin = TEEC_ORIGIN_COMMS; + /* Close session again to avoid leakage */ + optee_close_session(ctx, msg_arg->session); + } else { + arg->session = msg_arg->session; + arg->ret = msg_arg->ret; + arg->ret_origin = msg_arg->ret_origin; + } +out: + tee_shm_free(shm); + + return rc; +} + +int optee_close_session(struct tee_context *ctx, u32 session) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess; + + /* Check that the session is valid and remove it from the list */ + mutex_lock(&ctxdata->mutex); + sess = find_session(ctxdata, session); + if (sess) + list_del(&sess->list_node); + mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + kfree(sess); + + shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; + msg_arg->session = session; + optee_do_call_with_arg(ctx, msg_parg); + + tee_shm_free(shm); + return 0; +} + +int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, + struct tee_param *param) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess; + int rc; + + /* Check that the session is valid */ + mutex_lock(&ctxdata->mutex); + sess = find_session(ctxdata, arg->session); + mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + + shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND; + msg_arg->func = arg->func; + msg_arg->session = arg->session; + msg_arg->cancel_id = arg->cancel_id; + + rc = optee_to_msg_param(msg_arg->params, arg->num_params, param); + if (rc) + goto out; + + if (optee_do_call_with_arg(ctx, msg_parg)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + arg->ret = msg_arg->ret; + arg->ret_origin = msg_arg->ret_origin; +out: + tee_shm_free(shm); + return rc; +} + +int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess; + + /* Check that the session is valid */ + mutex_lock(&ctxdata->mutex); + sess = find_session(ctxdata, session); + mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + + shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_CANCEL; + msg_arg->session = session; + msg_arg->cancel_id = cancel_id; + optee_do_call_with_arg(ctx, msg_parg); + + tee_shm_free(shm); + return 0; +} + +/** + * optee_enable_shm_cache() - Enables caching of some shared memory allocation + * in OP-TEE + * @optee: main service struct + */ +void optee_enable_shm_cache(struct optee *optee) +{ + struct optee_call_waiter w; + + /* We need to retry until secure world isn't busy. */ + optee_cq_wait_init(&optee->call_queue, &w); + while (true) { + struct arm_smccc_res res; + + optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, + 0, &res); + if (res.a0 == OPTEE_SMC_RETURN_OK) + break; + optee_cq_wait_for_completion(&optee->call_queue, &w); + } + optee_cq_wait_final(&optee->call_queue, &w); +} + +/** + * optee_disable_shm_cache() - Disables caching of some shared memory allocation + * in OP-TEE + * @optee: main service struct + */ +void optee_disable_shm_cache(struct optee *optee) +{ + struct optee_call_waiter w; + + /* We need to retry until secure world isn't busy. */ + optee_cq_wait_init(&optee->call_queue, &w); + while (true) { + union { + struct arm_smccc_res smccc; + struct optee_smc_disable_shm_cache_result result; + } res; + + optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, + 0, &res.smccc); + if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL) + break; /* All shm's freed */ + if (res.result.status == OPTEE_SMC_RETURN_OK) { + struct tee_shm *shm; + + shm = reg_pair_to_ptr(res.result.shm_upper32, + res.result.shm_lower32); + tee_shm_free(shm); + } else { + optee_cq_wait_for_completion(&optee->call_queue, &w); + } + } + optee_cq_wait_final(&optee->call_queue, &w); +} diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c new file mode 100644 index 000000000000..58169e519422 --- /dev/null +++ b/drivers/tee/optee/core.c @@ -0,0 +1,622 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/arm-smccc.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/tee_drv.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include "optee_private.h" +#include "optee_smc.h" + +#define DRIVER_NAME "optee" + +#define OPTEE_SHM_NUM_PRIV_PAGES 1 + +/** + * optee_from_msg_param() - convert from OPTEE_MSG parameters to + * struct tee_param + * @params: subsystem internal parameter representation + * @num_params: number of elements in the parameter arrays + * @msg_params: OPTEE_MSG parameters + * Returns 0 on success or <0 on failure + */ +int optee_from_msg_param(struct tee_param *params, size_t num_params, + const struct optee_msg_param *msg_params) +{ + int rc; + size_t n; + struct tee_shm *shm; + phys_addr_t pa; + + for (n = 0; n < num_params; n++) { + struct tee_param *p = params + n; + const struct optee_msg_param *mp = msg_params + n; + u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK; + + switch (attr) { + case OPTEE_MSG_ATTR_TYPE_NONE: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; + memset(&p->u, 0, sizeof(p->u)); + break; + case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT: + case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT + + attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; + p->u.value.a = mp->u.value.a; + p->u.value.b = mp->u.value.b; + p->u.value.c = mp->u.value.c; + break; + case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: + case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + + attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT; + p->u.memref.size = mp->u.tmem.size; + shm = (struct tee_shm *)(unsigned long) + mp->u.tmem.shm_ref; + if (!shm) { + p->u.memref.shm_offs = 0; + p->u.memref.shm = NULL; + break; + } + rc = tee_shm_get_pa(shm, 0, &pa); + if (rc) + return rc; + p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; + p->u.memref.shm = shm; + + /* Check that the memref is covered by the shm object */ + if (p->u.memref.size) { + size_t o = p->u.memref.shm_offs + + p->u.memref.size - 1; + + rc = tee_shm_get_pa(shm, o, NULL); + if (rc) + return rc; + } + break; + default: + return -EINVAL; + } + } + return 0; +} + +/** + * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters + * @msg_params: OPTEE_MSG parameters + * @num_params: number of elements in the parameter arrays + * @params: subsystem itnernal parameter representation + * Returns 0 on success or <0 on failure + */ +int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, + const struct tee_param *params) +{ + int rc; + size_t n; + phys_addr_t pa; + + for (n = 0; n < num_params; n++) { + const struct tee_param *p = params + n; + struct optee_msg_param *mp = msg_params + n; + + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: + mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; + memset(&mp->u, 0, sizeof(mp->u)); + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr - + TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + mp->u.value.a = p->u.value.a; + mp->u.value.b = p->u.value.b; + mp->u.value.c = p->u.value.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + + p->attr - + TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; + mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm; + mp->u.tmem.size = p->u.memref.size; + if (!p->u.memref.shm) { + mp->u.tmem.buf_ptr = 0; + break; + } + rc = tee_shm_get_pa(p->u.memref.shm, + p->u.memref.shm_offs, &pa); + if (rc) + return rc; + mp->u.tmem.buf_ptr = pa; + mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED << + OPTEE_MSG_ATTR_CACHE_SHIFT; + break; + default: + return -EINVAL; + } + } + return 0; +} + +static void optee_get_version(struct tee_device *teedev, + struct tee_ioctl_version_data *vers) +{ + struct tee_ioctl_version_data v = { + .impl_id = TEE_IMPL_ID_OPTEE, + .impl_caps = TEE_OPTEE_CAP_TZ, + .gen_caps = TEE_GEN_CAP_GP, + }; + *vers = v; +} + +static int optee_open(struct tee_context *ctx) +{ + struct optee_context_data *ctxdata; + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + + ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL); + if (!ctxdata) + return -ENOMEM; + + if (teedev == optee->supp_teedev) { + bool busy = true; + + mutex_lock(&optee->supp.ctx_mutex); + if (!optee->supp.ctx) { + busy = false; + optee->supp.ctx = ctx; + } + mutex_unlock(&optee->supp.ctx_mutex); + if (busy) { + kfree(ctxdata); + return -EBUSY; + } + } + + mutex_init(&ctxdata->mutex); + INIT_LIST_HEAD(&ctxdata->sess_list); + + ctx->data = ctxdata; + return 0; +} + +static void optee_release(struct tee_context *ctx) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct tee_shm *shm; + struct optee_msg_arg *arg = NULL; + phys_addr_t parg; + struct optee_session *sess; + struct optee_session *sess_tmp; + + if (!ctxdata) + return; + + shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED); + if (!IS_ERR(shm)) { + arg = tee_shm_get_va(shm, 0); + /* + * If va2pa fails for some reason, we can't call + * optee_close_session(), only free the memory. Secure OS + * will leak sessions and finally refuse more sessions, but + * we will at least let normal world reclaim its memory. + */ + if (!IS_ERR(arg)) + tee_shm_va2pa(shm, arg, &parg); + } + + list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list, + list_node) { + list_del(&sess->list_node); + if (!IS_ERR_OR_NULL(arg)) { + memset(arg, 0, sizeof(*arg)); + arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; + arg->session = sess->session_id; + optee_do_call_with_arg(ctx, parg); + } + kfree(sess); + } + kfree(ctxdata); + + if (!IS_ERR(shm)) + tee_shm_free(shm); + + ctx->data = NULL; + + if (teedev == optee->supp_teedev) { + mutex_lock(&optee->supp.ctx_mutex); + optee->supp.ctx = NULL; + mutex_unlock(&optee->supp.ctx_mutex); + } +} + +static struct tee_driver_ops optee_ops = { + .get_version = optee_get_version, + .open = optee_open, + .release = optee_release, + .open_session = optee_open_session, + .close_session = optee_close_session, + .invoke_func = optee_invoke_func, + .cancel_req = optee_cancel_req, +}; + +static struct tee_desc optee_desc = { + .name = DRIVER_NAME "-clnt", + .ops = &optee_ops, + .owner = THIS_MODULE, +}; + +static struct tee_driver_ops optee_supp_ops = { + .get_version = optee_get_version, + .open = optee_open, + .release = optee_release, + .supp_recv = optee_supp_recv, + .supp_send = optee_supp_send, +}; + +static struct tee_desc optee_supp_desc = { + .name = DRIVER_NAME "-supp", + .ops = &optee_supp_ops, + .owner = THIS_MODULE, + .flags = TEE_DESC_PRIVILEGED, +}; + +static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) +{ + struct arm_smccc_res res; + + invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res); + + if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 && + res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3) + return true; + return false; +} + +static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_calls_revision_result result; + } res; + + invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc); + + if (res.result.major == OPTEE_MSG_REVISION_MAJOR && + (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR) + return true; + return false; +} + +static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, + u32 *sec_caps) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_exchange_capabilities_result result; + } res; + u32 a1 = 0; + + /* + * TODO This isn't enough to tell if it's UP system (from kernel + * point of view) or not, is_smp() returns the the information + * needed, but can't be called directly from here. + */ + if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1) + a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR; + + invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0, + &res.smccc); + + if (res.result.status != OPTEE_SMC_RETURN_OK) + return false; + + *sec_caps = res.result.capabilities; + return true; +} + +static struct tee_shm_pool * +optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_get_shm_config_result result; + } res; + struct tee_shm_pool *pool; + unsigned long vaddr; + phys_addr_t paddr; + size_t size; + phys_addr_t begin; + phys_addr_t end; + void *va; + struct tee_shm_pool_mem_info priv_info; + struct tee_shm_pool_mem_info dmabuf_info; + + invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); + if (res.result.status != OPTEE_SMC_RETURN_OK) { + pr_info("shm service not available\n"); + return ERR_PTR(-ENOENT); + } + + if (res.result.settings != OPTEE_SMC_SHM_CACHED) { + pr_err("only normal cached shared memory supported\n"); + return ERR_PTR(-EINVAL); + } + + begin = roundup(res.result.start, PAGE_SIZE); + end = rounddown(res.result.start + res.result.size, PAGE_SIZE); + paddr = begin; + size = end - begin; + + if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) { + pr_err("too small shared memory area\n"); + return ERR_PTR(-EINVAL); + } + + va = memremap(paddr, size, MEMREMAP_WB); + if (!va) { + pr_err("shared memory ioremap failed\n"); + return ERR_PTR(-EINVAL); + } + vaddr = (unsigned long)va; + + priv_info.vaddr = vaddr; + priv_info.paddr = paddr; + priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + + pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info); + if (IS_ERR(pool)) { + memunmap(va); + goto out; + } + + *memremaped_shm = va; +out: + return pool; +} + +/* Simple wrapper functions to be able to use a function pointer */ +static void optee_smccc_smc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *res) +{ + arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res); +} + +static void optee_smccc_hvc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *res) +{ + arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); +} + +static optee_invoke_fn *get_invoke_func(struct device_node *np) +{ + const char *method; + + pr_info("probing for conduit method from DT.\n"); + + if (of_property_read_string(np, "method", &method)) { + pr_warn("missing \"method\" property\n"); + return ERR_PTR(-ENXIO); + } + + if (!strcmp("hvc", method)) + return optee_smccc_hvc; + else if (!strcmp("smc", method)) + return optee_smccc_smc; + + pr_warn("invalid \"method\" property: %s\n", method); + return ERR_PTR(-EINVAL); +} + +static struct optee *optee_probe(struct device_node *np) +{ + optee_invoke_fn *invoke_fn; + struct tee_shm_pool *pool; + struct optee *optee = NULL; + void *memremaped_shm = NULL; + struct tee_device *teedev; + u32 sec_caps; + int rc; + + invoke_fn = get_invoke_func(np); + if (IS_ERR(invoke_fn)) + return (void *)invoke_fn; + + if (!optee_msg_api_uid_is_optee_api(invoke_fn)) { + pr_warn("api uid mismatch\n"); + return ERR_PTR(-EINVAL); + } + + if (!optee_msg_api_revision_is_compatible(invoke_fn)) { + pr_warn("api revision mismatch\n"); + return ERR_PTR(-EINVAL); + } + + if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { + pr_warn("capabilities mismatch\n"); + return ERR_PTR(-EINVAL); + } + + /* + * We have no other option for shared memory, if secure world + * doesn't have any reserved memory we can use we can't continue. + */ + if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) + return ERR_PTR(-EINVAL); + + pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm); + if (IS_ERR(pool)) + return (void *)pool; + + optee = kzalloc(sizeof(*optee), GFP_KERNEL); + if (!optee) { + rc = -ENOMEM; + goto err; + } + + optee->invoke_fn = invoke_fn; + + teedev = tee_device_alloc(&optee_desc, NULL, pool, optee); + if (IS_ERR(teedev)) { + rc = PTR_ERR(teedev); + goto err; + } + optee->teedev = teedev; + + teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee); + if (IS_ERR(teedev)) { + rc = PTR_ERR(teedev); + goto err; + } + optee->supp_teedev = teedev; + + rc = tee_device_register(optee->teedev); + if (rc) + goto err; + + rc = tee_device_register(optee->supp_teedev); + if (rc) + goto err; + + mutex_init(&optee->call_queue.mutex); + INIT_LIST_HEAD(&optee->call_queue.waiters); + optee_wait_queue_init(&optee->wait_queue); + optee_supp_init(&optee->supp); + optee->memremaped_shm = memremaped_shm; + optee->pool = pool; + + optee_enable_shm_cache(optee); + + pr_info("initialized driver\n"); + return optee; +err: + if (optee) { + /* + * tee_device_unregister() is safe to call even if the + * devices hasn't been registered with + * tee_device_register() yet. + */ + tee_device_unregister(optee->supp_teedev); + tee_device_unregister(optee->teedev); + kfree(optee); + } + if (pool) + tee_shm_pool_free(pool); + if (memremaped_shm) + memunmap(memremaped_shm); + return ERR_PTR(rc); +} + +static void optee_remove(struct optee *optee) +{ + /* + * Ask OP-TEE to free all cached shared memory objects to decrease + * reference counters and also avoid wild pointers in secure world + * into the old shared memory range. + */ + optee_disable_shm_cache(optee); + + /* + * The two devices has to be unregistered before we can free the + * other resources. + */ + tee_device_unregister(optee->supp_teedev); + tee_device_unregister(optee->teedev); + + tee_shm_pool_free(optee->pool); + if (optee->memremaped_shm) + memunmap(optee->memremaped_shm); + optee_wait_queue_exit(&optee->wait_queue); + optee_supp_uninit(&optee->supp); + mutex_destroy(&optee->call_queue.mutex); + + kfree(optee); +} + +static const struct of_device_id optee_match[] = { + { .compatible = "linaro,optee-tz" }, + {}, +}; + +static struct optee *optee_svc; + +static int __init optee_driver_init(void) +{ + struct device_node *fw_np; + struct device_node *np; + struct optee *optee; + + /* Node is supposed to be below /firmware */ + fw_np = of_find_node_by_name(NULL, "firmware"); + if (!fw_np) + return -ENODEV; + + np = of_find_matching_node(fw_np, optee_match); + of_node_put(fw_np); + if (!np) + return -ENODEV; + + optee = optee_probe(np); + of_node_put(np); + + if (IS_ERR(optee)) + return PTR_ERR(optee); + + optee_svc = optee; + + return 0; +} +module_init(optee_driver_init); + +static void __exit optee_driver_exit(void) +{ + struct optee *optee = optee_svc; + + optee_svc = NULL; + if (optee) + optee_remove(optee); +} +module_exit(optee_driver_exit); + +MODULE_AUTHOR("Linaro"); +MODULE_DESCRIPTION("OP-TEE driver"); +MODULE_SUPPORTED_DEVICE(""); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h new file mode 100644 index 000000000000..dd7a06ee0462 --- /dev/null +++ b/drivers/tee/optee/optee_msg.h @@ -0,0 +1,418 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _OPTEE_MSG_H +#define _OPTEE_MSG_H + +#include <linux/bitops.h> +#include <linux/types.h> + +/* + * This file defines the OP-TEE message protocol used to communicate + * with an instance of OP-TEE running in secure world. + * + * This file is divided into three sections. + * 1. Formatting of messages. + * 2. Requests from normal world + * 3. Requests from secure world, Remote Procedure Call (RPC), handled by + * tee-supplicant. + */ + +/***************************************************************************** + * Part 1 - formatting of messages + *****************************************************************************/ + +#define OPTEE_MSG_ATTR_TYPE_NONE 0x0 +#define OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 0x1 +#define OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT 0x2 +#define OPTEE_MSG_ATTR_TYPE_VALUE_INOUT 0x3 +#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5 +#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6 +#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7 +#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9 +#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa +#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb + +#define OPTEE_MSG_ATTR_TYPE_MASK GENMASK(7, 0) + +/* + * Meta parameter to be absorbed by the Secure OS and not passed + * to the Trusted Application. + * + * Currently only used with OPTEE_MSG_CMD_OPEN_SESSION. + */ +#define OPTEE_MSG_ATTR_META BIT(8) + +/* + * The temporary shared memory object is not physically contigous and this + * temp memref is followed by another fragment until the last temp memref + * that doesn't have this bit set. + */ +#define OPTEE_MSG_ATTR_FRAGMENT BIT(9) + +/* + * Memory attributes for caching passed with temp memrefs. The actual value + * used is defined outside the message protocol with the exception of + * OPTEE_MSG_ATTR_CACHE_PREDEFINED which means the attributes already + * defined for the memory range should be used. If optee_smc.h is used as + * bearer of this protocol OPTEE_SMC_SHM_* is used for values. + */ +#define OPTEE_MSG_ATTR_CACHE_SHIFT 16 +#define OPTEE_MSG_ATTR_CACHE_MASK GENMASK(2, 0) +#define OPTEE_MSG_ATTR_CACHE_PREDEFINED 0 + +/* + * Same values as TEE_LOGIN_* from TEE Internal API + */ +#define OPTEE_MSG_LOGIN_PUBLIC 0x00000000 +#define OPTEE_MSG_LOGIN_USER 0x00000001 +#define OPTEE_MSG_LOGIN_GROUP 0x00000002 +#define OPTEE_MSG_LOGIN_APPLICATION 0x00000004 +#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005 +#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006 + +/** + * struct optee_msg_param_tmem - temporary memory reference parameter + * @buf_ptr: Address of the buffer + * @size: Size of the buffer + * @shm_ref: Temporary shared memory reference, pointer to a struct tee_shm + * + * Secure and normal world communicates pointers as physical address + * instead of the virtual address. This is because secure and normal world + * have completely independent memory mapping. Normal world can even have a + * hypervisor which need to translate the guest physical address (AKA IPA + * in ARM documentation) to a real physical address before passing the + * structure to secure world. + */ +struct optee_msg_param_tmem { + u64 buf_ptr; + u64 size; + u64 shm_ref; +}; + +/** + * struct optee_msg_param_rmem - registered memory reference parameter + * @offs: Offset into shared memory reference + * @size: Size of the buffer + * @shm_ref: Shared memory reference, pointer to a struct tee_shm + */ +struct optee_msg_param_rmem { + u64 offs; + u64 size; + u64 shm_ref; +}; + +/** + * struct optee_msg_param_value - opaque value parameter + * + * Value parameters are passed unchecked between normal and secure world. + */ +struct optee_msg_param_value { + u64 a; + u64 b; + u64 c; +}; + +/** + * struct optee_msg_param - parameter used together with struct optee_msg_arg + * @attr: attributes + * @tmem: parameter by temporary memory reference + * @rmem: parameter by registered memory reference + * @value: parameter by opaque value + * + * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in + * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value, + * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and + * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem. + * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used. + */ +struct optee_msg_param { + u64 attr; + union { + struct optee_msg_param_tmem tmem; + struct optee_msg_param_rmem rmem; + struct optee_msg_param_value value; + } u; +}; + +/** + * struct optee_msg_arg - call argument + * @cmd: Command, one of OPTEE_MSG_CMD_* or OPTEE_MSG_RPC_CMD_* + * @func: Trusted Application function, specific to the Trusted Application, + * used if cmd == OPTEE_MSG_CMD_INVOKE_COMMAND + * @session: In parameter for all OPTEE_MSG_CMD_* except + * OPTEE_MSG_CMD_OPEN_SESSION where it's an output parameter instead + * @cancel_id: Cancellation id, a unique value to identify this request + * @ret: return value + * @ret_origin: origin of the return value + * @num_params: number of parameters supplied to the OS Command + * @params: the parameters supplied to the OS Command + * + * All normal calls to Trusted OS uses this struct. If cmd requires further + * information than what these field holds it can be passed as a parameter + * tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding + * attrs field). All parameters tagged as meta has to come first. + * + * Temp memref parameters can be fragmented if supported by the Trusted OS + * (when optee_smc.h is bearer of this protocol this is indicated with + * OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM). If a logical memref parameter is + * fragmented then has all but the last fragment the + * OPTEE_MSG_ATTR_FRAGMENT bit set in attrs. Even if a memref is fragmented + * it will still be presented as a single logical memref to the Trusted + * Application. + */ +struct optee_msg_arg { + u32 cmd; + u32 func; + u32 session; + u32 cancel_id; + u32 pad; + u32 ret; + u32 ret_origin; + u32 num_params; + + /* num_params tells the actual number of element in params */ + struct optee_msg_param params[0]; +}; + +/** + * OPTEE_MSG_GET_ARG_SIZE - return size of struct optee_msg_arg + * + * @num_params: Number of parameters embedded in the struct optee_msg_arg + * + * Returns the size of the struct optee_msg_arg together with the number + * of embedded parameters. + */ +#define OPTEE_MSG_GET_ARG_SIZE(num_params) \ + (sizeof(struct optee_msg_arg) + \ + sizeof(struct optee_msg_param) * (num_params)) + +/***************************************************************************** + * Part 2 - requests from normal world + *****************************************************************************/ + +/* + * Return the following UID if using API specified in this file without + * further extensions: + * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b. + * Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1, + * OPTEE_MSG_UID_2, OPTEE_MSG_UID_3. + */ +#define OPTEE_MSG_UID_0 0x384fb3e0 +#define OPTEE_MSG_UID_1 0xe7f811e3 +#define OPTEE_MSG_UID_2 0xaf630002 +#define OPTEE_MSG_UID_3 0xa5d5c51b +#define OPTEE_MSG_FUNCID_CALLS_UID 0xFF01 + +/* + * Returns 2.0 if using API specified in this file without further + * extensions. Represented in 2 32-bit words in OPTEE_MSG_REVISION_MAJOR + * and OPTEE_MSG_REVISION_MINOR + */ +#define OPTEE_MSG_REVISION_MAJOR 2 +#define OPTEE_MSG_REVISION_MINOR 0 +#define OPTEE_MSG_FUNCID_CALLS_REVISION 0xFF03 + +/* + * Get UUID of Trusted OS. + * + * Used by non-secure world to figure out which Trusted OS is installed. + * Note that returned UUID is the UUID of the Trusted OS, not of the API. + * + * Returns UUID in 4 32-bit words in the same way as + * OPTEE_MSG_FUNCID_CALLS_UID described above. + */ +#define OPTEE_MSG_OS_OPTEE_UUID_0 0x486178e0 +#define OPTEE_MSG_OS_OPTEE_UUID_1 0xe7f811e3 +#define OPTEE_MSG_OS_OPTEE_UUID_2 0xbc5e0002 +#define OPTEE_MSG_OS_OPTEE_UUID_3 0xa5d5c51b +#define OPTEE_MSG_FUNCID_GET_OS_UUID 0x0000 + +/* + * Get revision of Trusted OS. + * + * Used by non-secure world to figure out which version of the Trusted OS + * is installed. Note that the returned revision is the revision of the + * Trusted OS, not of the API. + * + * Returns revision in 2 32-bit words in the same way as + * OPTEE_MSG_CALLS_REVISION described above. + */ +#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001 + +/* + * Do a secure call with struct optee_msg_arg as argument + * The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd + * + * OPTEE_MSG_CMD_OPEN_SESSION opens a session to a Trusted Application. + * The first two parameters are tagged as meta, holding two value + * parameters to pass the following information: + * param[0].u.value.a-b uuid of Trusted Application + * param[1].u.value.a-b uuid of Client + * param[1].u.value.c Login class of client OPTEE_MSG_LOGIN_* + * + * OPTEE_MSG_CMD_INVOKE_COMMAND invokes a command a previously opened + * session to a Trusted Application. struct optee_msg_arg::func is Trusted + * Application function, specific to the Trusted Application. + * + * OPTEE_MSG_CMD_CLOSE_SESSION closes a previously opened session to + * Trusted Application. + * + * OPTEE_MSG_CMD_CANCEL cancels a currently invoked command. + * + * OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The + * information is passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + * [| OPTEE_MSG_ATTR_FRAGMENT] + * [in] param[0].u.tmem.buf_ptr physical address (of first fragment) + * [in] param[0].u.tmem.size size (of first fragment) + * [in] param[0].u.tmem.shm_ref holds shared memory reference + * ... + * The shared memory can optionally be fragmented, temp memrefs can follow + * each other with all but the last with the OPTEE_MSG_ATTR_FRAGMENT bit set. + * + * OPTEE_MSG_CMD_UNREGISTER_SHM unregisteres a previously registered shared + * memory reference. The information is passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + * [in] param[0].u.rmem.shm_ref holds shared memory reference + * [in] param[0].u.rmem.offs 0 + * [in] param[0].u.rmem.size 0 + */ +#define OPTEE_MSG_CMD_OPEN_SESSION 0 +#define OPTEE_MSG_CMD_INVOKE_COMMAND 1 +#define OPTEE_MSG_CMD_CLOSE_SESSION 2 +#define OPTEE_MSG_CMD_CANCEL 3 +#define OPTEE_MSG_CMD_REGISTER_SHM 4 +#define OPTEE_MSG_CMD_UNREGISTER_SHM 5 +#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004 + +/***************************************************************************** + * Part 3 - Requests from secure world, RPC + *****************************************************************************/ + +/* + * All RPC is done with a struct optee_msg_arg as bearer of information, + * struct optee_msg_arg::arg holds values defined by OPTEE_MSG_RPC_CMD_* below + * + * RPC communication with tee-supplicant is reversed compared to normal + * client communication desribed above. The supplicant receives requests + * and sends responses. + */ + +/* + * Load a TA into memory, defined in tee-supplicant + */ +#define OPTEE_MSG_RPC_CMD_LOAD_TA 0 + +/* + * Reserved + */ +#define OPTEE_MSG_RPC_CMD_RPMB 1 + +/* + * File system access, defined in tee-supplicant + */ +#define OPTEE_MSG_RPC_CMD_FS 2 + +/* + * Get time + * + * Returns number of seconds and nano seconds since the Epoch, + * 1970-01-01 00:00:00 +0000 (UTC). + * + * [out] param[0].u.value.a Number of seconds + * [out] param[0].u.value.b Number of nano seconds. + */ +#define OPTEE_MSG_RPC_CMD_GET_TIME 3 + +/* + * Wait queue primitive, helper for secure world to implement a wait queue. + * + * If secure world need to wait for a secure world mutex it issues a sleep + * request instead of spinning in secure world. Conversely is a wakeup + * request issued when a secure world mutex with a thread waiting thread is + * unlocked. + * + * Waiting on a key + * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP + * [in] param[0].u.value.b wait key + * + * Waking up a key + * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP + * [in] param[0].u.value.b wakeup key + */ +#define OPTEE_MSG_RPC_CMD_WAIT_QUEUE 4 +#define OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP 0 +#define OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP 1 + +/* + * Suspend execution + * + * [in] param[0].value .a number of milliseconds to suspend + */ +#define OPTEE_MSG_RPC_CMD_SUSPEND 5 + +/* + * Allocate a piece of shared memory + * + * Shared memory can optionally be fragmented, to support that additional + * spare param entries are allocated to make room for eventual fragments. + * The spare param entries has .attr = OPTEE_MSG_ATTR_TYPE_NONE when + * unused. All returned temp memrefs except the last should have the + * OPTEE_MSG_ATTR_FRAGMENT bit set in the attr field. + * + * [in] param[0].u.value.a type of memory one of + * OPTEE_MSG_RPC_SHM_TYPE_* below + * [in] param[0].u.value.b requested size + * [in] param[0].u.value.c required alignment + * + * [out] param[0].u.tmem.buf_ptr physical address (of first fragment) + * [out] param[0].u.tmem.size size (of first fragment) + * [out] param[0].u.tmem.shm_ref shared memory reference + * ... + * [out] param[n].u.tmem.buf_ptr physical address + * [out] param[n].u.tmem.size size + * [out] param[n].u.tmem.shm_ref shared memory reference (same value + * as in param[n-1].u.tmem.shm_ref) + */ +#define OPTEE_MSG_RPC_CMD_SHM_ALLOC 6 +/* Memory that can be shared with a non-secure user space application */ +#define OPTEE_MSG_RPC_SHM_TYPE_APPL 0 +/* Memory only shared with non-secure kernel */ +#define OPTEE_MSG_RPC_SHM_TYPE_KERNEL 1 + +/* + * Free shared memory previously allocated with OPTEE_MSG_RPC_CMD_SHM_ALLOC + * + * [in] param[0].u.value.a type of memory one of + * OPTEE_MSG_RPC_SHM_TYPE_* above + * [in] param[0].u.value.b value of shared memory reference + * returned in param[0].u.tmem.shm_ref + * above + */ +#define OPTEE_MSG_RPC_CMD_SHM_FREE 7 + +#endif /* _OPTEE_MSG_H */ diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h new file mode 100644 index 000000000000..c374cd594314 --- /dev/null +++ b/drivers/tee/optee/optee_private.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef OPTEE_PRIVATE_H +#define OPTEE_PRIVATE_H + +#include <linux/arm-smccc.h> +#include <linux/semaphore.h> +#include <linux/tee_drv.h> +#include <linux/types.h> +#include "optee_msg.h" + +#define OPTEE_MAX_ARG_SIZE 1024 + +/* Some Global Platform error codes used in this driver */ +#define TEEC_SUCCESS 0x00000000 +#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006 +#define TEEC_ERROR_COMMUNICATION 0xFFFF000E +#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C + +#define TEEC_ORIGIN_COMMS 0x00000002 + +typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, + struct arm_smccc_res *); + +struct optee_call_queue { + /* Serializes access to this struct */ + struct mutex mutex; + struct list_head waiters; +}; + +struct optee_wait_queue { + /* Serializes access to this struct */ + struct mutex mu; + struct list_head db; +}; + +/** + * struct optee_supp - supplicant synchronization struct + * @ctx the context of current connected supplicant. + * if !NULL the supplicant device is available for use, + * else busy + * @ctx_mutex: held while accessing @ctx + * @func: supplicant function id to call + * @ret: call return value + * @num_params: number of elements in @param + * @param: parameters for @func + * @req_posted: if true, a request has been posted to the supplicant + * @supp_next_send: if true, next step is for supplicant to send response + * @thrd_mutex: held by the thread doing a request to supplicant + * @supp_mutex: held by supplicant while operating on this struct + * @data_to_supp: supplicant is waiting on this for next request + * @data_from_supp: requesting thread is waiting on this to get the result + */ +struct optee_supp { + struct tee_context *ctx; + /* Serializes access of ctx */ + struct mutex ctx_mutex; + + u32 func; + u32 ret; + size_t num_params; + struct tee_param *param; + + bool req_posted; + bool supp_next_send; + /* Serializes access to this struct for requesting thread */ + struct mutex thrd_mutex; + /* Serializes access to this struct for supplicant threads */ + struct mutex supp_mutex; + struct completion data_to_supp; + struct completion data_from_supp; +}; + +/** + * struct optee - main service struct + * @supp_teedev: supplicant device + * @teedev: client device + * @invoke_fn: function to issue smc or hvc + * @call_queue: queue of threads waiting to call @invoke_fn + * @wait_queue: queue of threads from secure world waiting for a + * secure world sync object + * @supp: supplicant synchronization struct for RPC to supplicant + * @pool: shared memory pool + * @memremaped_shm virtual address of memory in shared memory pool + */ +struct optee { + struct tee_device *supp_teedev; + struct tee_device *teedev; + optee_invoke_fn *invoke_fn; + struct optee_call_queue call_queue; + struct optee_wait_queue wait_queue; + struct optee_supp supp; + struct tee_shm_pool *pool; + void *memremaped_shm; +}; + +struct optee_session { + struct list_head list_node; + u32 session_id; +}; + +struct optee_context_data { + /* Serializes access to this struct */ + struct mutex mutex; + struct list_head sess_list; +}; + +struct optee_rpc_param { + u32 a0; + u32 a1; + u32 a2; + u32 a3; + u32 a4; + u32 a5; + u32 a6; + u32 a7; +}; + +void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param); + +void optee_wait_queue_init(struct optee_wait_queue *wq); +void optee_wait_queue_exit(struct optee_wait_queue *wq); + +u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, + struct tee_param *param); + +int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len); +int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len); +void optee_supp_init(struct optee_supp *supp); +void optee_supp_uninit(struct optee_supp *supp); + +int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, + struct tee_param *param); +int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params, + struct tee_param *param); + +u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg); +int optee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param); +int optee_close_session(struct tee_context *ctx, u32 session); +int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, + struct tee_param *param); +int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session); + +void optee_enable_shm_cache(struct optee *optee); +void optee_disable_shm_cache(struct optee *optee); + +int optee_from_msg_param(struct tee_param *params, size_t num_params, + const struct optee_msg_param *msg_params); +int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, + const struct tee_param *params); + +/* + * Small helpers + */ + +static inline void *reg_pair_to_ptr(u32 reg0, u32 reg1) +{ + return (void *)(unsigned long)(((u64)reg0 << 32) | reg1); +} + +static inline void reg_pair_from_64(u32 *reg0, u32 *reg1, u64 val) +{ + *reg0 = val >> 32; + *reg1 = val; +} + +#endif /*OPTEE_PRIVATE_H*/ diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h new file mode 100644 index 000000000000..13b7c98cdf25 --- /dev/null +++ b/drivers/tee/optee/optee_smc.h @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef OPTEE_SMC_H +#define OPTEE_SMC_H + +#include <linux/arm-smccc.h> +#include <linux/bitops.h> + +#define OPTEE_SMC_STD_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS, (func_num)) +#define OPTEE_SMC_FAST_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS, (func_num)) + +/* + * Function specified by SMC Calling convention. + */ +#define OPTEE_SMC_FUNCID_CALLS_COUNT 0xFF00 +#define OPTEE_SMC_CALLS_COUNT \ + ARM_SMCCC_CALL_VAL(OPTEE_SMC_FAST_CALL, SMCCC_SMC_32, \ + SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_COUNT) + +/* + * Normal cached memory (write-back), shareable for SMP systems and not + * shareable for UP systems. + */ +#define OPTEE_SMC_SHM_CACHED 1 + +/* + * a0..a7 is used as register names in the descriptions below, on arm32 + * that translates to r0..r7 and on arm64 to w0..w7. In both cases it's + * 32-bit registers. + */ + +/* + * Function specified by SMC Calling convention + * + * Return one of the following UIDs if using API specified in this file + * without further extentions: + * 65cb6b93-af0c-4617-8ed6-644a8d1140f8 + * see also OPTEE_SMC_UID_* in optee_msg.h + */ +#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID +#define OPTEE_SMC_CALLS_UID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_UID) + +/* + * Function specified by SMC Calling convention + * + * Returns 2.0 if using API specified in this file without further extentions. + * see also OPTEE_MSG_REVISION_* in optee_msg.h + */ +#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION +#define OPTEE_SMC_CALLS_REVISION \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_REVISION) + +struct optee_smc_calls_revision_result { + unsigned long major; + unsigned long minor; + unsigned long reserved0; + unsigned long reserved1; +}; + +/* + * Get UUID of Trusted OS. + * + * Used by non-secure world to figure out which Trusted OS is installed. + * Note that returned UUID is the UUID of the Trusted OS, not of the API. + * + * Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID + * described above. + */ +#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID +#define OPTEE_SMC_CALL_GET_OS_UUID \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID) + +/* + * Get revision of Trusted OS. + * + * Used by non-secure world to figure out which version of the Trusted OS + * is installed. Note that the returned revision is the revision of the + * Trusted OS, not of the API. + * + * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION + * described above. + */ +#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION +#define OPTEE_SMC_CALL_GET_OS_REVISION \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION) + +/* + * Call with struct optee_msg_arg as argument + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG + * a1 Upper 32bit of a 64bit physical pointer to a struct optee_msg_arg + * a2 Lower 32bit of a 64bit physical pointer to a struct optee_msg_arg + * a3 Cache settings, not used if physical pointer is in a predefined shared + * memory area else per OPTEE_SMC_SHM_* + * a4-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 Return value, OPTEE_SMC_RETURN_* + * a1-3 Not used + * a4-7 Preserved + * + * OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage: + * a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT + * a1-3 Preserved + * a4-7 Preserved + * + * RPC return register usage: + * a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val) + * a1-2 RPC parameters + * a3-7 Resume information, must be preserved + * + * Possible return values: + * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this + * function. + * OPTEE_SMC_RETURN_OK Call completed, result updated in + * the previously supplied struct + * optee_msg_arg. + * OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded, + * try again later. + * OPTEE_SMC_RETURN_EBADADDR Bad physcial pointer to struct + * optee_msg_arg. + * OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg + * OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal + * world. + */ +#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG +#define OPTEE_SMC_CALL_WITH_ARG \ + OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG) + +/* + * Get Shared Memory Config + * + * Returns the Secure/Non-secure shared memory config. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Have config return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 Physical address of start of SHM + * a2 Size of of SHM + * a3 Cache settings of memory, as defined by the + * OPTEE_SMC_SHM_* values above + * a4-7 Preserved + * + * Not available register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL + * a1-3 Not used + * a4-7 Preserved + */ +#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7 +#define OPTEE_SMC_GET_SHM_CONFIG \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG) + +struct optee_smc_get_shm_config_result { + unsigned long status; + unsigned long start; + unsigned long size; + unsigned long settings; +}; + +/* + * Exchanges capabilities between normal world and secure world + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES + * a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_* + * a2-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* + * a2-7 Preserved + * + * Error return register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world + * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* + * a2-7 Preserved + */ +/* Normal world works as a uniprocessor system */ +#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR BIT(0) +/* Secure world has reserved shared memory for normal world to use */ +#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0) +/* Secure world can communicate via previously unregistered shared memory */ +#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1) +#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 +#define OPTEE_SMC_EXCHANGE_CAPABILITIES \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES) + +struct optee_smc_exchange_capabilities_result { + unsigned long status; + unsigned long capabilities; + unsigned long reserved0; + unsigned long reserved1; +}; + +/* + * Disable and empties cache of shared memory objects + * + * Secure world can cache frequently used shared memory objects, for + * example objects used as RPC arguments. When secure world is idle this + * function returns one shared memory reference to free. To disable the + * cache and free all cached objects this function has to be called until + * it returns OPTEE_SMC_RETURN_ENOTAVAIL. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 Upper 32bit of a 64bit Shared memory cookie + * a2 Lower 32bit of a 64bit Shared memory cookie + * a3-7 Preserved + * + * Cache empty return register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL + * a1-7 Preserved + * + * Not idle return register usage: + * a0 OPTEE_SMC_RETURN_EBUSY + * a1-7 Preserved + */ +#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE 10 +#define OPTEE_SMC_DISABLE_SHM_CACHE \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE) + +struct optee_smc_disable_shm_cache_result { + unsigned long status; + unsigned long shm_upper32; + unsigned long shm_lower32; + unsigned long reserved0; +}; + +/* + * Enable cache of shared memory objects + * + * Secure world can cache frequently used shared memory objects, for + * example objects used as RPC arguments. When secure world is idle this + * function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If + * secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1-7 Preserved + * + * Not idle return register usage: + * a0 OPTEE_SMC_RETURN_EBUSY + * a1-7 Preserved + */ +#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE 11 +#define OPTEE_SMC_ENABLE_SHM_CACHE \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE) + +/* + * Resume from RPC (for example after processing an IRQ) + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC + * a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned + * OPTEE_SMC_RETURN_RPC in a0 + * + * Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above. + * + * Possible return values + * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this + * function. + * OPTEE_SMC_RETURN_OK Original call completed, result + * updated in the previously supplied. + * struct optee_msg_arg + * OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal + * world. + * OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume + * information was corrupt. + */ +#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC 3 +#define OPTEE_SMC_CALL_RETURN_FROM_RPC \ + OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC) + +#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000 +#define OPTEE_SMC_RETURN_RPC_PREFIX 0xFFFF0000 +#define OPTEE_SMC_RETURN_RPC_FUNC_MASK 0x0000FFFF + +#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \ + ((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK) + +#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX) + +/* + * Allocate memory for RPC parameter passing. The memory is used to hold a + * struct optee_msg_arg. + * + * "Call" register usage: + * a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC + * a1 Size in bytes of required argument memory + * a2 Not used + * a3 Resume information, must be preserved + * a4-5 Not used + * a6-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1 Upper 32bits of 64bit physical pointer to allocated + * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't + * be allocated. + * a2 Lower 32bits of 64bit physical pointer to allocated + * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't + * be allocated + * a3 Preserved + * a4 Upper 32bits of 64bit Shared memory cookie used when freeing + * the memory or doing an RPC + * a5 Lower 32bits of 64bit Shared memory cookie used when freeing + * the memory or doing an RPC + * a6-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_ALLOC 0 +#define OPTEE_SMC_RETURN_RPC_ALLOC \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC) + +/* + * Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC + * + * "Call" register usage: + * a0 This value, OPTEE_SMC_RETURN_RPC_FREE + * a1 Upper 32bits of 64bit shared memory cookie belonging to this + * argument memory + * a2 Lower 32bits of 64bit shared memory cookie belonging to this + * argument memory + * a3-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-2 Not used + * a3-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_FREE 2 +#define OPTEE_SMC_RETURN_RPC_FREE \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE) + +/* + * Deliver an IRQ in normal world. + * + * "Call" register usage: + * a0 OPTEE_SMC_RETURN_RPC_IRQ + * a1-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_IRQ 4 +#define OPTEE_SMC_RETURN_RPC_IRQ \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_IRQ) + +/* + * Do an RPC request. The supplied struct optee_msg_arg tells which + * request to do and the parameters for the request. The following fields + * are used (the rest are unused): + * - cmd the Request ID + * - ret return value of the request, filled in by normal world + * - num_params number of parameters for the request + * - params the parameters + * - param_attrs attributes of the parameters + * + * "Call" register usage: + * a0 OPTEE_SMC_RETURN_RPC_CMD + * a1 Upper 32bit of a 64bit Shared memory cookie holding a + * struct optee_msg_arg, must be preserved, only the data should + * be updated + * a2 Lower 32bit of a 64bit Shared memory cookie holding a + * struct optee_msg_arg, must be preserved, only the data should + * be updated + * a3-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-2 Not used + * a3-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_CMD 5 +#define OPTEE_SMC_RETURN_RPC_CMD \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD) + +/* Returned in a0 */ +#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF + +/* Returned in a0 only from Trusted OS functions */ +#define OPTEE_SMC_RETURN_OK 0x0 +#define OPTEE_SMC_RETURN_ETHREAD_LIMIT 0x1 +#define OPTEE_SMC_RETURN_EBUSY 0x2 +#define OPTEE_SMC_RETURN_ERESUME 0x3 +#define OPTEE_SMC_RETURN_EBADADDR 0x4 +#define OPTEE_SMC_RETURN_EBADCMD 0x5 +#define OPTEE_SMC_RETURN_ENOMEM 0x6 +#define OPTEE_SMC_RETURN_ENOTAVAIL 0x7 +#define OPTEE_SMC_RETURN_IS_RPC(ret) __optee_smc_return_is_rpc((ret)) + +static inline bool __optee_smc_return_is_rpc(u32 ret) +{ + return ret != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION && + (ret & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) == + OPTEE_SMC_RETURN_RPC_PREFIX; +} + +#endif /* OPTEE_SMC_H */ diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c new file mode 100644 index 000000000000..8814eca06021 --- /dev/null +++ b/drivers/tee/optee/rpc.c @@ -0,0 +1,396 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include "optee_private.h" +#include "optee_smc.h" + +struct wq_entry { + struct list_head link; + struct completion c; + u32 key; +}; + +void optee_wait_queue_init(struct optee_wait_queue *priv) +{ + mutex_init(&priv->mu); + INIT_LIST_HEAD(&priv->db); +} + +void optee_wait_queue_exit(struct optee_wait_queue *priv) +{ + mutex_destroy(&priv->mu); +} + +static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg) +{ + struct timespec64 ts; + + if (arg->num_params != 1) + goto bad; + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT) + goto bad; + + getnstimeofday64(&ts); + arg->params[0].u.value.a = ts.tv_sec; + arg->params[0].u.value.b = ts.tv_nsec; + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + +static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key) +{ + struct wq_entry *w; + + mutex_lock(&wq->mu); + + list_for_each_entry(w, &wq->db, link) + if (w->key == key) + goto out; + + w = kmalloc(sizeof(*w), GFP_KERNEL); + if (w) { + init_completion(&w->c); + w->key = key; + list_add_tail(&w->link, &wq->db); + } +out: + mutex_unlock(&wq->mu); + return w; +} + +static void wq_sleep(struct optee_wait_queue *wq, u32 key) +{ + struct wq_entry *w = wq_entry_get(wq, key); + + if (w) { + wait_for_completion(&w->c); + mutex_lock(&wq->mu); + list_del(&w->link); + mutex_unlock(&wq->mu); + kfree(w); + } +} + +static void wq_wakeup(struct optee_wait_queue *wq, u32 key) +{ + struct wq_entry *w = wq_entry_get(wq, key); + + if (w) + complete(&w->c); +} + +static void handle_rpc_func_cmd_wq(struct optee *optee, + struct optee_msg_arg *arg) +{ + if (arg->num_params != 1) + goto bad; + + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) + goto bad; + + switch (arg->params[0].u.value.a) { + case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP: + wq_sleep(&optee->wait_queue, arg->params[0].u.value.b); + break; + case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP: + wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b); + break; + default: + goto bad; + } + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + +static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg) +{ + u32 msec_to_wait; + + if (arg->num_params != 1) + goto bad; + + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) + goto bad; + + msec_to_wait = arg->params[0].u.value.a; + + /* set task's state to interruptible sleep */ + set_current_state(TASK_INTERRUPTIBLE); + + /* take a nap */ + msleep(msec_to_wait); + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + +static void handle_rpc_supp_cmd(struct tee_context *ctx, + struct optee_msg_arg *arg) +{ + struct tee_param *params; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + params = kmalloc_array(arg->num_params, sizeof(struct tee_param), + GFP_KERNEL); + if (!params) { + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + return; + } + + if (optee_from_msg_param(params, arg->num_params, arg->params)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + goto out; + } + + arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params); + + if (optee_to_msg_param(arg->params, arg->num_params, params)) + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +out: + kfree(params); +} + +static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz) +{ + u32 ret; + struct tee_param param; + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct tee_shm *shm; + + param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL; + param.u.value.b = sz; + param.u.value.c = 0; + + ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, ¶m); + if (ret) + return ERR_PTR(-ENOMEM); + + mutex_lock(&optee->supp.ctx_mutex); + /* Increases count as secure world doesn't have a reference */ + shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c); + mutex_unlock(&optee->supp.ctx_mutex); + return shm; +} + +static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, + struct optee_msg_arg *arg) +{ + phys_addr_t pa; + struct tee_shm *shm; + size_t sz; + size_t n; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + if (!arg->num_params || + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + for (n = 1; n < arg->num_params; n++) { + if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + } + + sz = arg->params[0].u.value.b; + switch (arg->params[0].u.value.a) { + case OPTEE_MSG_RPC_SHM_TYPE_APPL: + shm = cmd_alloc_suppl(ctx, sz); + break; + case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: + shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED); + break; + default: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + if (IS_ERR(shm)) { + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + return; + } + + if (tee_shm_get_pa(shm, 0, &pa)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + goto bad; + } + + arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; + arg->params[0].u.tmem.buf_ptr = pa; + arg->params[0].u.tmem.size = sz; + arg->params[0].u.tmem.shm_ref = (unsigned long)shm; + arg->ret = TEEC_SUCCESS; + return; +bad: + tee_shm_free(shm); +} + +static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm) +{ + struct tee_param param; + + param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL; + param.u.value.b = tee_shm_get_id(shm); + param.u.value.c = 0; + + /* + * Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure + * world has released its reference. + * + * It's better to do this before sending the request to supplicant + * as we'd like to let the process doing the initial allocation to + * do release the last reference too in order to avoid stacking + * many pending fput() on the client process. This could otherwise + * happen if secure world does many allocate and free in a single + * invoke. + */ + tee_shm_put(shm); + + optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, ¶m); +} + +static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, + struct optee_msg_arg *arg) +{ + struct tee_shm *shm; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + if (arg->num_params != 1 || + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b; + switch (arg->params[0].u.value.a) { + case OPTEE_MSG_RPC_SHM_TYPE_APPL: + cmd_free_suppl(ctx, shm); + break; + case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: + tee_shm_free(shm); + break; + default: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + } + arg->ret = TEEC_SUCCESS; +} + +static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, + struct tee_shm *shm) +{ + struct optee_msg_arg *arg; + + arg = tee_shm_get_va(shm, 0); + if (IS_ERR(arg)) { + pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm); + return; + } + + switch (arg->cmd) { + case OPTEE_MSG_RPC_CMD_GET_TIME: + handle_rpc_func_cmd_get_time(arg); + break; + case OPTEE_MSG_RPC_CMD_WAIT_QUEUE: + handle_rpc_func_cmd_wq(optee, arg); + break; + case OPTEE_MSG_RPC_CMD_SUSPEND: + handle_rpc_func_cmd_wait(arg); + break; + case OPTEE_MSG_RPC_CMD_SHM_ALLOC: + handle_rpc_func_cmd_shm_alloc(ctx, arg); + break; + case OPTEE_MSG_RPC_CMD_SHM_FREE: + handle_rpc_func_cmd_shm_free(ctx, arg); + break; + default: + handle_rpc_supp_cmd(ctx, arg); + } +} + +/** + * optee_handle_rpc() - handle RPC from secure world + * @ctx: context doing the RPC + * @param: value of registers for the RPC + * + * Result of RPC is written back into @param. + */ +void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param) +{ + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct tee_shm *shm; + phys_addr_t pa; + + switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { + case OPTEE_SMC_RPC_FUNC_ALLOC: + shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED); + if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { + reg_pair_from_64(¶m->a1, ¶m->a2, pa); + reg_pair_from_64(¶m->a4, ¶m->a5, + (unsigned long)shm); + } else { + param->a1 = 0; + param->a2 = 0; + param->a4 = 0; + param->a5 = 0; + } + break; + case OPTEE_SMC_RPC_FUNC_FREE: + shm = reg_pair_to_ptr(param->a1, param->a2); + tee_shm_free(shm); + break; + case OPTEE_SMC_RPC_FUNC_IRQ: + /* + * An IRQ was raised while secure world was executing, + * since all IRQs are handled in Linux a dummy RPC is + * performed to let Linux take the IRQ through the normal + * vector. + */ + break; + case OPTEE_SMC_RPC_FUNC_CMD: + shm = reg_pair_to_ptr(param->a1, param->a2); + handle_rpc_func_cmd(ctx, optee, shm); + break; + default: + pr_warn("Unknown RPC func 0x%x\n", + (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)); + break; + } + + param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC; +} diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c new file mode 100644 index 000000000000..b4ea0678a436 --- /dev/null +++ b/drivers/tee/optee/supp.c @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include "optee_private.h" + +void optee_supp_init(struct optee_supp *supp) +{ + memset(supp, 0, sizeof(*supp)); + mutex_init(&supp->ctx_mutex); + mutex_init(&supp->thrd_mutex); + mutex_init(&supp->supp_mutex); + init_completion(&supp->data_to_supp); + init_completion(&supp->data_from_supp); +} + +void optee_supp_uninit(struct optee_supp *supp) +{ + mutex_destroy(&supp->ctx_mutex); + mutex_destroy(&supp->thrd_mutex); + mutex_destroy(&supp->supp_mutex); +} + +/** + * optee_supp_thrd_req() - request service from supplicant + * @ctx: context doing the request + * @func: function requested + * @num_params: number of elements in @param array + * @param: parameters for function + * + * Returns result of operation to be passed to secure world + */ +u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, + struct tee_param *param) +{ + bool interruptable; + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_supp *supp = &optee->supp; + u32 ret; + + /* + * Other threads blocks here until we've copied our answer from + * supplicant. + */ + while (mutex_lock_interruptible(&supp->thrd_mutex)) { + /* See comment below on when the RPC can be interrupted. */ + mutex_lock(&supp->ctx_mutex); + interruptable = !supp->ctx; + mutex_unlock(&supp->ctx_mutex); + if (interruptable) + return TEEC_ERROR_COMMUNICATION; + } + + /* + * We have exclusive access now since the supplicant at this + * point is either doing a + * wait_for_completion_interruptible(&supp->data_to_supp) or is in + * userspace still about to do the ioctl() to enter + * optee_supp_recv() below. + */ + + supp->func = func; + supp->num_params = num_params; + supp->param = param; + supp->req_posted = true; + + /* Let supplicant get the data */ + complete(&supp->data_to_supp); + + /* + * Wait for supplicant to process and return result, once we've + * returned from wait_for_completion(data_from_supp) we have + * exclusive access again. + */ + while (wait_for_completion_interruptible(&supp->data_from_supp)) { + mutex_lock(&supp->ctx_mutex); + interruptable = !supp->ctx; + if (interruptable) { + /* + * There's no supplicant available and since the + * supp->ctx_mutex currently is held none can + * become available until the mutex released + * again. + * + * Interrupting an RPC to supplicant is only + * allowed as a way of slightly improving the user + * experience in case the supplicant hasn't been + * started yet. During normal operation the supplicant + * will serve all requests in a timely manner and + * interrupting then wouldn't make sense. + */ + supp->ret = TEEC_ERROR_COMMUNICATION; + init_completion(&supp->data_to_supp); + } + mutex_unlock(&supp->ctx_mutex); + if (interruptable) + break; + } + + ret = supp->ret; + supp->param = NULL; + supp->req_posted = false; + + /* We're done, let someone else talk to the supplicant now. */ + mutex_unlock(&supp->thrd_mutex); + + return ret; +} + +/** + * optee_supp_recv() - receive request for supplicant + * @ctx: context receiving the request + * @func: requested function in supplicant + * @num_params: number of elements allocated in @param, updated with number + * used elements + * @param: space for parameters for @func + * + * Returns 0 on success or <0 on failure + */ +int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, + struct tee_param *param) +{ + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct optee_supp *supp = &optee->supp; + int rc; + + /* + * In case two threads in one supplicant is calling this function + * simultaneously we need to protect the data with a mutex which + * we'll release before returning. + */ + mutex_lock(&supp->supp_mutex); + + if (supp->supp_next_send) { + /* + * optee_supp_recv() has been called again without + * a optee_supp_send() in between. Supplicant has + * probably been restarted before it was able to + * write back last result. Abort last request and + * wait for a new. + */ + if (supp->req_posted) { + supp->ret = TEEC_ERROR_COMMUNICATION; + supp->supp_next_send = false; + complete(&supp->data_from_supp); + } + } + + /* + * This is where supplicant will be hanging most of the + * time, let's make this interruptable so we can easily + * restart supplicant if needed. + */ + if (wait_for_completion_interruptible(&supp->data_to_supp)) { + rc = -ERESTARTSYS; + goto out; + } + + /* We have exlusive access to the data */ + + if (*num_params < supp->num_params) { + /* + * Not enough room for parameters, tell supplicant + * it failed and abort last request. + */ + supp->ret = TEEC_ERROR_COMMUNICATION; + rc = -EINVAL; + complete(&supp->data_from_supp); + goto out; + } + + *func = supp->func; + *num_params = supp->num_params; + memcpy(param, supp->param, + sizeof(struct tee_param) * supp->num_params); + + /* Allow optee_supp_send() below to do its work */ + supp->supp_next_send = true; + + rc = 0; +out: + mutex_unlock(&supp->supp_mutex); + return rc; +} + +/** + * optee_supp_send() - send result of request from supplicant + * @ctx: context sending result + * @ret: return value of request + * @num_params: number of parameters returned + * @param: returned parameters + * + * Returns 0 on success or <0 on failure. + */ +int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params, + struct tee_param *param) +{ + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct optee_supp *supp = &optee->supp; + size_t n; + int rc = 0; + + /* + * We still have exclusive access to the data since that's how we + * left it when returning from optee_supp_read(). + */ + + /* See comment on mutex in optee_supp_read() above */ + mutex_lock(&supp->supp_mutex); + + if (!supp->supp_next_send) { + /* + * Something strange is going on, supplicant shouldn't + * enter optee_supp_send() in this state + */ + rc = -ENOENT; + goto out; + } + + if (num_params != supp->num_params) { + /* + * Something is wrong, let supplicant restart. Next call to + * optee_supp_recv() will give an error to the requesting + * thread and release it. + */ + rc = -EINVAL; + goto out; + } + + /* Update out and in/out parameters */ + for (n = 0; n < num_params; n++) { + struct tee_param *p = supp->param + n; + + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + p->u.value.a = param[n].u.value.a; + p->u.value.b = param[n].u.value.b; + p->u.value.c = param[n].u.value.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + p->u.memref.size = param[n].u.memref.size; + break; + default: + break; + } + } + supp->ret = ret; + + /* Allow optee_supp_recv() above to do its work */ + supp->supp_next_send = false; + + /* Let the requesting thread continue */ + complete(&supp->data_from_supp); +out: + mutex_unlock(&supp->supp_mutex); + return rc; +} diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c new file mode 100644 index 000000000000..5c60bf4423e6 --- /dev/null +++ b/drivers/tee/tee_core.c @@ -0,0 +1,893 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include <linux/uaccess.h> +#include "tee_private.h" + +#define TEE_NUM_DEVICES 32 + +#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x)) + +/* + * Unprivileged devices in the lower half range and privileged devices in + * the upper half range. + */ +static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES); +static DEFINE_SPINLOCK(driver_lock); + +static struct class *tee_class; +static dev_t tee_devt; + +static int tee_open(struct inode *inode, struct file *filp) +{ + int rc; + struct tee_device *teedev; + struct tee_context *ctx; + + teedev = container_of(inode->i_cdev, struct tee_device, cdev); + if (!tee_device_get(teedev)) + return -EINVAL; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + rc = -ENOMEM; + goto err; + } + + ctx->teedev = teedev; + INIT_LIST_HEAD(&ctx->list_shm); + filp->private_data = ctx; + rc = teedev->desc->ops->open(ctx); + if (rc) + goto err; + + return 0; +err: + kfree(ctx); + tee_device_put(teedev); + return rc; +} + +static int tee_release(struct inode *inode, struct file *filp) +{ + struct tee_context *ctx = filp->private_data; + struct tee_device *teedev = ctx->teedev; + struct tee_shm *shm; + + ctx->teedev->desc->ops->release(ctx); + mutex_lock(&ctx->teedev->mutex); + list_for_each_entry(shm, &ctx->list_shm, link) + shm->ctx = NULL; + mutex_unlock(&ctx->teedev->mutex); + kfree(ctx); + tee_device_put(teedev); + return 0; +} + +static int tee_ioctl_version(struct tee_context *ctx, + struct tee_ioctl_version_data __user *uvers) +{ + struct tee_ioctl_version_data vers; + + ctx->teedev->desc->ops->get_version(ctx->teedev, &vers); + if (copy_to_user(uvers, &vers, sizeof(vers))) + return -EFAULT; + return 0; +} + +static int tee_ioctl_shm_alloc(struct tee_context *ctx, + struct tee_ioctl_shm_alloc_data __user *udata) +{ + long ret; + struct tee_ioctl_shm_alloc_data data; + struct tee_shm *shm; + + if (copy_from_user(&data, udata, sizeof(data))) + return -EFAULT; + + /* Currently no input flags are supported */ + if (data.flags) + return -EINVAL; + + data.id = -1; + + shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + data.id = shm->id; + data.flags = shm->flags; + data.size = shm->size; + + if (copy_to_user(udata, &data, sizeof(data))) + ret = -EFAULT; + else + ret = tee_shm_get_fd(shm); + + /* + * When user space closes the file descriptor the shared memory + * should be freed or if tee_shm_get_fd() failed then it will + * be freed immediately. + */ + tee_shm_put(shm); + return ret; +} + +static int params_from_user(struct tee_context *ctx, struct tee_param *params, + size_t num_params, + struct tee_ioctl_param __user *uparams) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_shm *shm; + struct tee_ioctl_param ip; + + if (copy_from_user(&ip, uparams + n, sizeof(ip))) + return -EFAULT; + + /* All unused attribute bits has to be zero */ + if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) + return -EINVAL; + + params[n].attr = ip.attr; + switch (ip.attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + params[n].u.value.a = ip.a; + params[n].u.value.b = ip.b; + params[n].u.value.c = ip.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + /* + * If we fail to get a pointer to a shared memory + * object (and increase the ref count) from an + * identifier we return an error. All pointers that + * has been added in params have an increased ref + * count. It's the callers responibility to do + * tee_shm_put() on all resolved pointers. + */ + shm = tee_shm_get_from_id(ctx, ip.c); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + params[n].u.memref.shm_offs = ip.a; + params[n].u.memref.size = ip.b; + params[n].u.memref.shm = shm; + break; + default: + /* Unknown attribute */ + return -EINVAL; + } + } + return 0; +} + +static int params_to_user(struct tee_ioctl_param __user *uparams, + size_t num_params, struct tee_param *params) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_ioctl_param __user *up = uparams + n; + struct tee_param *p = params + n; + + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + if (put_user(p->u.value.a, &up->a) || + put_user(p->u.value.b, &up->b) || + put_user(p->u.value.c, &up->c)) + return -EFAULT; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + if (put_user((u64)p->u.memref.size, &up->b)) + return -EFAULT; + default: + break; + } + } + return 0; +} + +static bool param_is_memref(struct tee_param *param) +{ + switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + return true; + default: + return false; + } +} + +static int tee_ioctl_open_session(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + int rc; + size_t n; + struct tee_ioctl_buf_data buf; + struct tee_ioctl_open_session_arg __user *uarg; + struct tee_ioctl_open_session_arg arg; + struct tee_ioctl_param __user *uparams = NULL; + struct tee_param *params = NULL; + bool have_session = false; + + if (!ctx->teedev->desc->ops->open_session) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_ioctl_open_session_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) + return -EINVAL; + + if (arg.num_params) { + params = kcalloc(arg.num_params, sizeof(struct tee_param), + GFP_KERNEL); + if (!params) + return -ENOMEM; + uparams = uarg->params; + rc = params_from_user(ctx, params, arg.num_params, uparams); + if (rc) + goto out; + } + + rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params); + if (rc) + goto out; + have_session = true; + + if (put_user(arg.session, &uarg->session) || + put_user(arg.ret, &uarg->ret) || + put_user(arg.ret_origin, &uarg->ret_origin)) { + rc = -EFAULT; + goto out; + } + rc = params_to_user(uparams, arg.num_params, params); +out: + /* + * If we've succeeded to open the session but failed to communicate + * it back to user space, close the session again to avoid leakage. + */ + if (rc && have_session && ctx->teedev->desc->ops->close_session) + ctx->teedev->desc->ops->close_session(ctx, arg.session); + + if (params) { + /* Decrease ref count for all valid shared memory pointers */ + for (n = 0; n < arg.num_params; n++) + if (param_is_memref(params + n) && + params[n].u.memref.shm) + tee_shm_put(params[n].u.memref.shm); + kfree(params); + } + + return rc; +} + +static int tee_ioctl_invoke(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + int rc; + size_t n; + struct tee_ioctl_buf_data buf; + struct tee_ioctl_invoke_arg __user *uarg; + struct tee_ioctl_invoke_arg arg; + struct tee_ioctl_param __user *uparams = NULL; + struct tee_param *params = NULL; + + if (!ctx->teedev->desc->ops->invoke_func) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_ioctl_invoke_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) + return -EINVAL; + + if (arg.num_params) { + params = kcalloc(arg.num_params, sizeof(struct tee_param), + GFP_KERNEL); + if (!params) + return -ENOMEM; + uparams = uarg->params; + rc = params_from_user(ctx, params, arg.num_params, uparams); + if (rc) + goto out; + } + + rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params); + if (rc) + goto out; + + if (put_user(arg.ret, &uarg->ret) || + put_user(arg.ret_origin, &uarg->ret_origin)) { + rc = -EFAULT; + goto out; + } + rc = params_to_user(uparams, arg.num_params, params); +out: + if (params) { + /* Decrease ref count for all valid shared memory pointers */ + for (n = 0; n < arg.num_params; n++) + if (param_is_memref(params + n) && + params[n].u.memref.shm) + tee_shm_put(params[n].u.memref.shm); + kfree(params); + } + return rc; +} + +static int tee_ioctl_cancel(struct tee_context *ctx, + struct tee_ioctl_cancel_arg __user *uarg) +{ + struct tee_ioctl_cancel_arg arg; + + if (!ctx->teedev->desc->ops->cancel_req) + return -EINVAL; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id, + arg.session); +} + +static int +tee_ioctl_close_session(struct tee_context *ctx, + struct tee_ioctl_close_session_arg __user *uarg) +{ + struct tee_ioctl_close_session_arg arg; + + if (!ctx->teedev->desc->ops->close_session) + return -EINVAL; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + return ctx->teedev->desc->ops->close_session(ctx, arg.session); +} + +static int params_to_supp(struct tee_context *ctx, + struct tee_ioctl_param __user *uparams, + size_t num_params, struct tee_param *params) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_ioctl_param ip; + struct tee_param *p = params + n; + + ip.attr = p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK; + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + ip.a = p->u.value.a; + ip.b = p->u.value.b; + ip.c = p->u.value.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + ip.b = p->u.memref.size; + if (!p->u.memref.shm) { + ip.a = 0; + ip.c = (u64)-1; /* invalid shm id */ + break; + } + ip.a = p->u.memref.shm_offs; + ip.c = p->u.memref.shm->id; + break; + default: + ip.a = 0; + ip.b = 0; + ip.c = 0; + break; + } + + if (copy_to_user(uparams + n, &ip, sizeof(ip))) + return -EFAULT; + } + + return 0; +} + +static int tee_ioctl_supp_recv(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + int rc; + struct tee_ioctl_buf_data buf; + struct tee_iocl_supp_recv_arg __user *uarg; + struct tee_param *params; + u32 num_params; + u32 func; + + if (!ctx->teedev->desc->ops->supp_recv) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (get_user(num_params, &uarg->num_params)) + return -EFAULT; + + if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len) + return -EINVAL; + + params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL); + if (!params) + return -ENOMEM; + + rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params); + if (rc) + goto out; + + if (put_user(func, &uarg->func) || + put_user(num_params, &uarg->num_params)) { + rc = -EFAULT; + goto out; + } + + rc = params_to_supp(ctx, uarg->params, num_params, params); +out: + kfree(params); + return rc; +} + +static int params_from_supp(struct tee_param *params, size_t num_params, + struct tee_ioctl_param __user *uparams) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_param *p = params + n; + struct tee_ioctl_param ip; + + if (copy_from_user(&ip, uparams + n, sizeof(ip))) + return -EFAULT; + + /* All unused attribute bits has to be zero */ + if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) + return -EINVAL; + + p->attr = ip.attr; + switch (ip.attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + /* Only out and in/out values can be updated */ + p->u.value.a = ip.a; + p->u.value.b = ip.b; + p->u.value.c = ip.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + /* + * Only the size of the memref can be updated. + * Since we don't have access to the original + * parameters here, only store the supplied size. + * The driver will copy the updated size into the + * original parameters. + */ + p->u.memref.shm = NULL; + p->u.memref.shm_offs = 0; + p->u.memref.size = ip.b; + break; + default: + memset(&p->u, 0, sizeof(p->u)); + break; + } + } + return 0; +} + +static int tee_ioctl_supp_send(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + long rc; + struct tee_ioctl_buf_data buf; + struct tee_iocl_supp_send_arg __user *uarg; + struct tee_param *params; + u32 num_params; + u32 ret; + + /* Not valid for this driver */ + if (!ctx->teedev->desc->ops->supp_send) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_iocl_supp_send_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (get_user(ret, &uarg->ret) || + get_user(num_params, &uarg->num_params)) + return -EFAULT; + + if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len) + return -EINVAL; + + params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL); + if (!params) + return -ENOMEM; + + rc = params_from_supp(params, num_params, uarg->params); + if (rc) + goto out; + + rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params); +out: + kfree(params); + return rc; +} + +static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct tee_context *ctx = filp->private_data; + void __user *uarg = (void __user *)arg; + + switch (cmd) { + case TEE_IOC_VERSION: + return tee_ioctl_version(ctx, uarg); + case TEE_IOC_SHM_ALLOC: + return tee_ioctl_shm_alloc(ctx, uarg); + case TEE_IOC_OPEN_SESSION: + return tee_ioctl_open_session(ctx, uarg); + case TEE_IOC_INVOKE: + return tee_ioctl_invoke(ctx, uarg); + case TEE_IOC_CANCEL: + return tee_ioctl_cancel(ctx, uarg); + case TEE_IOC_CLOSE_SESSION: + return tee_ioctl_close_session(ctx, uarg); + case TEE_IOC_SUPPL_RECV: + return tee_ioctl_supp_recv(ctx, uarg); + case TEE_IOC_SUPPL_SEND: + return tee_ioctl_supp_send(ctx, uarg); + default: + return -EINVAL; + } +} + +static const struct file_operations tee_fops = { + .owner = THIS_MODULE, + .open = tee_open, + .release = tee_release, + .unlocked_ioctl = tee_ioctl, + .compat_ioctl = tee_ioctl, +}; + +static void tee_release_device(struct device *dev) +{ + struct tee_device *teedev = container_of(dev, struct tee_device, dev); + + spin_lock(&driver_lock); + clear_bit(teedev->id, dev_mask); + spin_unlock(&driver_lock); + mutex_destroy(&teedev->mutex); + idr_destroy(&teedev->idr); + kfree(teedev); +} + +/** + * tee_device_alloc() - Allocate a new struct tee_device instance + * @teedesc: Descriptor for this driver + * @dev: Parent device for this device + * @pool: Shared memory pool, NULL if not used + * @driver_data: Private driver data for this device + * + * Allocates a new struct tee_device instance. The device is + * removed by tee_device_unregister(). + * + * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure + */ +struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, + struct device *dev, + struct tee_shm_pool *pool, + void *driver_data) +{ + struct tee_device *teedev; + void *ret; + int rc; + int offs = 0; + + if (!teedesc || !teedesc->name || !teedesc->ops || + !teedesc->ops->get_version || !teedesc->ops->open || + !teedesc->ops->release || !pool) + return ERR_PTR(-EINVAL); + + teedev = kzalloc(sizeof(*teedev), GFP_KERNEL); + if (!teedev) { + ret = ERR_PTR(-ENOMEM); + goto err; + } + + if (teedesc->flags & TEE_DESC_PRIVILEGED) + offs = TEE_NUM_DEVICES / 2; + + spin_lock(&driver_lock); + teedev->id = find_next_zero_bit(dev_mask, TEE_NUM_DEVICES, offs); + if (teedev->id < TEE_NUM_DEVICES) + set_bit(teedev->id, dev_mask); + spin_unlock(&driver_lock); + + if (teedev->id >= TEE_NUM_DEVICES) { + ret = ERR_PTR(-ENOMEM); + goto err; + } + + snprintf(teedev->name, sizeof(teedev->name), "tee%s%d", + teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "", + teedev->id - offs); + + teedev->dev.class = tee_class; + teedev->dev.release = tee_release_device; + teedev->dev.parent = dev; + + teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id); + + rc = dev_set_name(&teedev->dev, "%s", teedev->name); + if (rc) { + ret = ERR_PTR(rc); + goto err_devt; + } + + cdev_init(&teedev->cdev, &tee_fops); + teedev->cdev.owner = teedesc->owner; + teedev->cdev.kobj.parent = &teedev->dev.kobj; + + dev_set_drvdata(&teedev->dev, driver_data); + device_initialize(&teedev->dev); + + /* 1 as tee_device_unregister() does one final tee_device_put() */ + teedev->num_users = 1; + init_completion(&teedev->c_no_users); + mutex_init(&teedev->mutex); + idr_init(&teedev->idr); + + teedev->desc = teedesc; + teedev->pool = pool; + + return teedev; +err_devt: + unregister_chrdev_region(teedev->dev.devt, 1); +err: + pr_err("could not register %s driver\n", + teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client"); + if (teedev && teedev->id < TEE_NUM_DEVICES) { + spin_lock(&driver_lock); + clear_bit(teedev->id, dev_mask); + spin_unlock(&driver_lock); + } + kfree(teedev); + return ret; +} +EXPORT_SYMBOL_GPL(tee_device_alloc); + +static ssize_t implementation_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tee_device *teedev = container_of(dev, struct tee_device, dev); + struct tee_ioctl_version_data vers; + + teedev->desc->ops->get_version(teedev, &vers); + return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id); +} +static DEVICE_ATTR_RO(implementation_id); + +static struct attribute *tee_dev_attrs[] = { + &dev_attr_implementation_id.attr, + NULL +}; + +static const struct attribute_group tee_dev_group = { + .attrs = tee_dev_attrs, +}; + +/** + * tee_device_register() - Registers a TEE device + * @teedev: Device to register + * + * tee_device_unregister() need to be called to remove the @teedev if + * this function fails. + * + * @returns < 0 on failure + */ +int tee_device_register(struct tee_device *teedev) +{ + int rc; + + if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) { + dev_err(&teedev->dev, "attempt to register twice\n"); + return -EINVAL; + } + + rc = cdev_add(&teedev->cdev, teedev->dev.devt, 1); + if (rc) { + dev_err(&teedev->dev, + "unable to cdev_add() %s, major %d, minor %d, err=%d\n", + teedev->name, MAJOR(teedev->dev.devt), + MINOR(teedev->dev.devt), rc); + return rc; + } + + rc = device_add(&teedev->dev); + if (rc) { + dev_err(&teedev->dev, + "unable to device_add() %s, major %d, minor %d, err=%d\n", + teedev->name, MAJOR(teedev->dev.devt), + MINOR(teedev->dev.devt), rc); + goto err_device_add; + } + + rc = sysfs_create_group(&teedev->dev.kobj, &tee_dev_group); + if (rc) { + dev_err(&teedev->dev, + "failed to create sysfs attributes, err=%d\n", rc); + goto err_sysfs_create_group; + } + + teedev->flags |= TEE_DEVICE_FLAG_REGISTERED; + return 0; + +err_sysfs_create_group: + device_del(&teedev->dev); +err_device_add: + cdev_del(&teedev->cdev); + return rc; +} +EXPORT_SYMBOL_GPL(tee_device_register); + +void tee_device_put(struct tee_device *teedev) +{ + mutex_lock(&teedev->mutex); + /* Shouldn't put in this state */ + if (!WARN_ON(!teedev->desc)) { + teedev->num_users--; + if (!teedev->num_users) { + teedev->desc = NULL; + complete(&teedev->c_no_users); + } + } + mutex_unlock(&teedev->mutex); +} + +bool tee_device_get(struct tee_device *teedev) +{ + mutex_lock(&teedev->mutex); + if (!teedev->desc) { + mutex_unlock(&teedev->mutex); + return false; + } + teedev->num_users++; + mutex_unlock(&teedev->mutex); + return true; +} + +/** + * tee_device_unregister() - Removes a TEE device + * @teedev: Device to unregister + * + * This function should be called to remove the @teedev even if + * tee_device_register() hasn't been called yet. Does nothing if + * @teedev is NULL. + */ +void tee_device_unregister(struct tee_device *teedev) +{ + if (!teedev) + return; + + if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) { + sysfs_remove_group(&teedev->dev.kobj, &tee_dev_group); + cdev_del(&teedev->cdev); + device_del(&teedev->dev); + } + + tee_device_put(teedev); + wait_for_completion(&teedev->c_no_users); + + /* + * No need to take a mutex any longer now since teedev->desc was + * set to NULL before teedev->c_no_users was completed. + */ + + teedev->pool = NULL; + + put_device(&teedev->dev); +} +EXPORT_SYMBOL_GPL(tee_device_unregister); + +/** + * tee_get_drvdata() - Return driver_data pointer + * @teedev: Device containing the driver_data pointer + * @returns the driver_data pointer supplied to tee_register(). + */ +void *tee_get_drvdata(struct tee_device *teedev) +{ + return dev_get_drvdata(&teedev->dev); +} +EXPORT_SYMBOL_GPL(tee_get_drvdata); + +static int __init tee_init(void) +{ + int rc; + + tee_class = class_create(THIS_MODULE, "tee"); + if (IS_ERR(tee_class)) { + pr_err("couldn't create class\n"); + return PTR_ERR(tee_class); + } + + rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee"); + if (rc) { + pr_err("failed to allocate char dev region\n"); + class_destroy(tee_class); + tee_class = NULL; + } + + return rc; +} + +static void __exit tee_exit(void) +{ + class_destroy(tee_class); + tee_class = NULL; + unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES); +} + +subsys_initcall(tee_init); +module_exit(tee_exit); + +MODULE_AUTHOR("Linaro"); +MODULE_DESCRIPTION("TEE Driver"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h new file mode 100644 index 000000000000..21cb6be8bce9 --- /dev/null +++ b/drivers/tee/tee_private.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef TEE_PRIVATE_H +#define TEE_PRIVATE_H + +#include <linux/cdev.h> +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/types.h> + +struct tee_device; + +/** + * struct tee_shm - shared memory object + * @teedev: device used to allocate the object + * @ctx: context using the object, if NULL the context is gone + * @link link element + * @paddr: physical address of the shared memory + * @kaddr: virtual address of the shared memory + * @size: size of shared memory + * @dmabuf: dmabuf used to for exporting to user space + * @flags: defined by TEE_SHM_* in tee_drv.h + * @id: unique id of a shared memory object on this device + */ +struct tee_shm { + struct tee_device *teedev; + struct tee_context *ctx; + struct list_head link; + phys_addr_t paddr; + void *kaddr; + size_t size; + struct dma_buf *dmabuf; + u32 flags; + int id; +}; + +struct tee_shm_pool_mgr; + +/** + * struct tee_shm_pool_mgr_ops - shared memory pool manager operations + * @alloc: called when allocating shared memory + * @free: called when freeing shared memory + */ +struct tee_shm_pool_mgr_ops { + int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm, + size_t size); + void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm); +}; + +/** + * struct tee_shm_pool_mgr - shared memory manager + * @ops: operations + * @private_data: private data for the shared memory manager + */ +struct tee_shm_pool_mgr { + const struct tee_shm_pool_mgr_ops *ops; + void *private_data; +}; + +/** + * struct tee_shm_pool - shared memory pool + * @private_mgr: pool manager for shared memory only between kernel + * and secure world + * @dma_buf_mgr: pool manager for shared memory exported to user space + * @destroy: called when destroying the pool + * @private_data: private data for the pool + */ +struct tee_shm_pool { + struct tee_shm_pool_mgr private_mgr; + struct tee_shm_pool_mgr dma_buf_mgr; + void (*destroy)(struct tee_shm_pool *pool); + void *private_data; +}; + +#define TEE_DEVICE_FLAG_REGISTERED 0x1 +#define TEE_MAX_DEV_NAME_LEN 32 + +/** + * struct tee_device - TEE Device representation + * @name: name of device + * @desc: description of device + * @id: unique id of device + * @flags: represented by TEE_DEVICE_FLAG_REGISTERED above + * @dev: embedded basic device structure + * @cdev: embedded cdev + * @num_users: number of active users of this device + * @c_no_user: completion used when unregistering the device + * @mutex: mutex protecting @num_users and @idr + * @idr: register of shared memory object allocated on this device + * @pool: shared memory pool + */ +struct tee_device { + char name[TEE_MAX_DEV_NAME_LEN]; + const struct tee_desc *desc; + int id; + unsigned int flags; + + struct device dev; + struct cdev cdev; + + size_t num_users; + struct completion c_no_users; + struct mutex mutex; /* protects num_users and idr */ + + struct idr idr; + struct tee_shm_pool *pool; +}; + +int tee_shm_init(void); + +int tee_shm_get_fd(struct tee_shm *shm); + +bool tee_device_get(struct tee_device *teedev); +void tee_device_put(struct tee_device *teedev); + +#endif /*TEE_PRIVATE_H*/ diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c new file mode 100644 index 000000000000..0be1e3e93bee --- /dev/null +++ b/drivers/tee/tee_shm.c @@ -0,0 +1,358 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/device.h> +#include <linux/dma-buf.h> +#include <linux/fdtable.h> +#include <linux/idr.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include "tee_private.h" + +static void tee_shm_release(struct tee_shm *shm) +{ + struct tee_device *teedev = shm->teedev; + struct tee_shm_pool_mgr *poolm; + + mutex_lock(&teedev->mutex); + idr_remove(&teedev->idr, shm->id); + if (shm->ctx) + list_del(&shm->link); + mutex_unlock(&teedev->mutex); + + if (shm->flags & TEE_SHM_DMA_BUF) + poolm = &teedev->pool->dma_buf_mgr; + else + poolm = &teedev->pool->private_mgr; + + poolm->ops->free(poolm, shm); + kfree(shm); + + tee_device_put(teedev); +} + +static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment + *attach, enum dma_data_direction dir) +{ + return NULL; +} + +static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *table, + enum dma_data_direction dir) +{ +} + +static void tee_shm_op_release(struct dma_buf *dmabuf) +{ + struct tee_shm *shm = dmabuf->priv; + + tee_shm_release(shm); +} + +static void *tee_shm_op_kmap_atomic(struct dma_buf *dmabuf, unsigned long pgnum) +{ + return NULL; +} + +static void *tee_shm_op_kmap(struct dma_buf *dmabuf, unsigned long pgnum) +{ + return NULL; +} + +static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct tee_shm *shm = dmabuf->priv; + size_t size = vma->vm_end - vma->vm_start; + + return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, + size, vma->vm_page_prot); +} + +static struct dma_buf_ops tee_shm_dma_buf_ops = { + .map_dma_buf = tee_shm_op_map_dma_buf, + .unmap_dma_buf = tee_shm_op_unmap_dma_buf, + .release = tee_shm_op_release, + .kmap_atomic = tee_shm_op_kmap_atomic, + .kmap = tee_shm_op_kmap, + .mmap = tee_shm_op_mmap, +}; + +/** + * tee_shm_alloc() - Allocate shared memory + * @ctx: Context that allocates the shared memory + * @size: Requested size of shared memory + * @flags: Flags setting properties for the requested shared memory. + * + * Memory allocated as global shared memory is automatically freed when the + * TEE file pointer is closed. The @flags field uses the bits defined by + * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be + * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and + * associated with a dma-buf handle, else driver private memory. + */ +struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) +{ + struct tee_device *teedev = ctx->teedev; + struct tee_shm_pool_mgr *poolm = NULL; + struct tee_shm *shm; + void *ret; + int rc; + + if (!(flags & TEE_SHM_MAPPED)) { + dev_err(teedev->dev.parent, + "only mapped allocations supported\n"); + return ERR_PTR(-EINVAL); + } + + if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) { + dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags); + return ERR_PTR(-EINVAL); + } + + if (!tee_device_get(teedev)) + return ERR_PTR(-EINVAL); + + if (!teedev->pool) { + /* teedev has been detached from driver */ + ret = ERR_PTR(-EINVAL); + goto err_dev_put; + } + + shm = kzalloc(sizeof(*shm), GFP_KERNEL); + if (!shm) { + ret = ERR_PTR(-ENOMEM); + goto err_dev_put; + } + + shm->flags = flags; + shm->teedev = teedev; + shm->ctx = ctx; + if (flags & TEE_SHM_DMA_BUF) + poolm = &teedev->pool->dma_buf_mgr; + else + poolm = &teedev->pool->private_mgr; + + rc = poolm->ops->alloc(poolm, shm, size); + if (rc) { + ret = ERR_PTR(rc); + goto err_kfree; + } + + mutex_lock(&teedev->mutex); + shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); + mutex_unlock(&teedev->mutex); + if (shm->id < 0) { + ret = ERR_PTR(shm->id); + goto err_pool_free; + } + + if (flags & TEE_SHM_DMA_BUF) { + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &tee_shm_dma_buf_ops; + exp_info.size = shm->size; + exp_info.flags = O_RDWR; + exp_info.priv = shm; + + shm->dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(shm->dmabuf)) { + ret = ERR_CAST(shm->dmabuf); + goto err_rem; + } + } + mutex_lock(&teedev->mutex); + list_add_tail(&shm->link, &ctx->list_shm); + mutex_unlock(&teedev->mutex); + + return shm; +err_rem: + mutex_lock(&teedev->mutex); + idr_remove(&teedev->idr, shm->id); + mutex_unlock(&teedev->mutex); +err_pool_free: + poolm->ops->free(poolm, shm); +err_kfree: + kfree(shm); +err_dev_put: + tee_device_put(teedev); + return ret; +} +EXPORT_SYMBOL_GPL(tee_shm_alloc); + +/** + * tee_shm_get_fd() - Increase reference count and return file descriptor + * @shm: Shared memory handle + * @returns user space file descriptor to shared memory + */ +int tee_shm_get_fd(struct tee_shm *shm) +{ + u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF; + int fd; + + if ((shm->flags & req_flags) != req_flags) + return -EINVAL; + + fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); + if (fd >= 0) + get_dma_buf(shm->dmabuf); + return fd; +} + +/** + * tee_shm_free() - Free shared memory + * @shm: Handle to shared memory to free + */ +void tee_shm_free(struct tee_shm *shm) +{ + /* + * dma_buf_put() decreases the dmabuf reference counter and will + * call tee_shm_release() when the last reference is gone. + * + * In the case of driver private memory we call tee_shm_release + * directly instead as it doesn't have a reference counter. + */ + if (shm->flags & TEE_SHM_DMA_BUF) + dma_buf_put(shm->dmabuf); + else + tee_shm_release(shm); +} +EXPORT_SYMBOL_GPL(tee_shm_free); + +/** + * tee_shm_va2pa() - Get physical address of a virtual address + * @shm: Shared memory handle + * @va: Virtual address to tranlsate + * @pa: Returned physical address + * @returns 0 on success and < 0 on failure + */ +int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa) +{ + /* Check that we're in the range of the shm */ + if ((char *)va < (char *)shm->kaddr) + return -EINVAL; + if ((char *)va >= ((char *)shm->kaddr + shm->size)) + return -EINVAL; + + return tee_shm_get_pa( + shm, (unsigned long)va - (unsigned long)shm->kaddr, pa); +} +EXPORT_SYMBOL_GPL(tee_shm_va2pa); + +/** + * tee_shm_pa2va() - Get virtual address of a physical address + * @shm: Shared memory handle + * @pa: Physical address to tranlsate + * @va: Returned virtual address + * @returns 0 on success and < 0 on failure + */ +int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va) +{ + /* Check that we're in the range of the shm */ + if (pa < shm->paddr) + return -EINVAL; + if (pa >= (shm->paddr + shm->size)) + return -EINVAL; + + if (va) { + void *v = tee_shm_get_va(shm, pa - shm->paddr); + + if (IS_ERR(v)) + return PTR_ERR(v); + *va = v; + } + return 0; +} +EXPORT_SYMBOL_GPL(tee_shm_pa2va); + +/** + * tee_shm_get_va() - Get virtual address of a shared memory plus an offset + * @shm: Shared memory handle + * @offs: Offset from start of this shared memory + * @returns virtual address of the shared memory + offs if offs is within + * the bounds of this shared memory, else an ERR_PTR + */ +void *tee_shm_get_va(struct tee_shm *shm, size_t offs) +{ + if (offs >= shm->size) + return ERR_PTR(-EINVAL); + return (char *)shm->kaddr + offs; +} +EXPORT_SYMBOL_GPL(tee_shm_get_va); + +/** + * tee_shm_get_pa() - Get physical address of a shared memory plus an offset + * @shm: Shared memory handle + * @offs: Offset from start of this shared memory + * @pa: Physical address to return + * @returns 0 if offs is within the bounds of this shared memory, else an + * error code. + */ +int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa) +{ + if (offs >= shm->size) + return -EINVAL; + if (pa) + *pa = shm->paddr + offs; + return 0; +} +EXPORT_SYMBOL_GPL(tee_shm_get_pa); + +/** + * tee_shm_get_from_id() - Find shared memory object and increase reference + * count + * @ctx: Context owning the shared memory + * @id: Id of shared memory object + * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure + */ +struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id) +{ + struct tee_device *teedev; + struct tee_shm *shm; + + if (!ctx) + return ERR_PTR(-EINVAL); + + teedev = ctx->teedev; + mutex_lock(&teedev->mutex); + shm = idr_find(&teedev->idr, id); + if (!shm || shm->ctx != ctx) + shm = ERR_PTR(-EINVAL); + else if (shm->flags & TEE_SHM_DMA_BUF) + get_dma_buf(shm->dmabuf); + mutex_unlock(&teedev->mutex); + return shm; +} +EXPORT_SYMBOL_GPL(tee_shm_get_from_id); + +/** + * tee_shm_get_id() - Get id of a shared memory object + * @shm: Shared memory handle + * @returns id + */ +int tee_shm_get_id(struct tee_shm *shm) +{ + return shm->id; +} +EXPORT_SYMBOL_GPL(tee_shm_get_id); + +/** + * tee_shm_put() - Decrease reference count on a shared memory handle + * @shm: Shared memory handle + */ +void tee_shm_put(struct tee_shm *shm) +{ + if (shm->flags & TEE_SHM_DMA_BUF) + dma_buf_put(shm->dmabuf); +} +EXPORT_SYMBOL_GPL(tee_shm_put); diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c new file mode 100644 index 000000000000..fb4f8522a526 --- /dev/null +++ b/drivers/tee/tee_shm_pool.c @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/device.h> +#include <linux/dma-buf.h> +#include <linux/genalloc.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include "tee_private.h" + +static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm, + struct tee_shm *shm, size_t size) +{ + unsigned long va; + struct gen_pool *genpool = poolm->private_data; + size_t s = roundup(size, 1 << genpool->min_alloc_order); + + va = gen_pool_alloc(genpool, s); + if (!va) + return -ENOMEM; + + memset((void *)va, 0, s); + shm->kaddr = (void *)va; + shm->paddr = gen_pool_virt_to_phys(genpool, va); + shm->size = s; + return 0; +} + +static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm, + struct tee_shm *shm) +{ + gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr, + shm->size); + shm->kaddr = NULL; +} + +static const struct tee_shm_pool_mgr_ops pool_ops_generic = { + .alloc = pool_op_gen_alloc, + .free = pool_op_gen_free, +}; + +static void pool_res_mem_destroy(struct tee_shm_pool *pool) +{ + gen_pool_destroy(pool->private_mgr.private_data); + gen_pool_destroy(pool->dma_buf_mgr.private_data); +} + +static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr, + struct tee_shm_pool_mem_info *info, + int min_alloc_order) +{ + size_t page_mask = PAGE_SIZE - 1; + struct gen_pool *genpool = NULL; + int rc; + + /* + * Start and end must be page aligned + */ + if ((info->vaddr & page_mask) || (info->paddr & page_mask) || + (info->size & page_mask)) + return -EINVAL; + + genpool = gen_pool_create(min_alloc_order, -1); + if (!genpool) + return -ENOMEM; + + gen_pool_set_algo(genpool, gen_pool_best_fit, NULL); + rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size, + -1); + if (rc) { + gen_pool_destroy(genpool); + return rc; + } + + mgr->private_data = genpool; + mgr->ops = &pool_ops_generic; + return 0; +} + +/** + * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved + * memory range + * @priv_info: Information for driver private shared memory pool + * @dmabuf_info: Information for dma-buf shared memory pool + * + * Start and end of pools will must be page aligned. + * + * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied + * in @dmabuf, others will use the range provided by @priv. + * + * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. + */ +struct tee_shm_pool * +tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, + struct tee_shm_pool_mem_info *dmabuf_info) +{ + struct tee_shm_pool *pool = NULL; + int ret; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) { + ret = -ENOMEM; + goto err; + } + + /* + * Create the pool for driver private shared memory + */ + ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info, + 3 /* 8 byte aligned */); + if (ret) + goto err; + + /* + * Create the pool for dma_buf shared memory + */ + ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info, + PAGE_SHIFT); + if (ret) + goto err; + + pool->destroy = pool_res_mem_destroy; + return pool; +err: + if (ret == -ENOMEM) + pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__); + if (pool && pool->private_mgr.private_data) + gen_pool_destroy(pool->private_mgr.private_data); + kfree(pool); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem); + +/** + * tee_shm_pool_free() - Free a shared memory pool + * @pool: The shared memory pool to free + * + * There must be no remaining shared memory allocated from this pool when + * this function is called. + */ +void tee_shm_pool_free(struct tee_shm_pool *pool) +{ + pool->destroy(pool); + kfree(pool); +} +EXPORT_SYMBOL_GPL(tee_shm_pool_free); diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c index 89474399ab89..1d5a9e5fb069 100644 --- a/drivers/tty/serial/8250/8250_fintek.c +++ b/drivers/tty/serial/8250/8250_fintek.c @@ -117,7 +117,7 @@ static int fintek_8250_rs485_config(struct uart_port *port, if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) == (!!(rs485->flags & SER_RS485_RTS_AFTER_SEND))) - rs485->flags &= SER_RS485_ENABLED; + rs485->flags &= ~SER_RS485_ENABLED; else config |= RS485_URA; diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index cf3da51a3536..7025f47fa284 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -5797,6 +5797,9 @@ static struct pci_device_id serial_pci_tbl[] = { { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 }, { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 }, + /* Amazon PCI serial device */ + { PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 }, + /* * These entries match devices with class COMMUNICATION_SERIAL, * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 56ccbcefdd85..d42d66b72d5a 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -2223,8 +2223,11 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud, serial_dl_write(up, quot); /* XR17V35x UARTs have an extra fractional divisor register (DLD) */ - if (up->port.type == PORT_XR17V35X) + if (up->port.type == PORT_XR17V35X) { + /* Preserve bits not related to baudrate; DLD[7:4]. */ + quot_frac |= serial_port_in(port, 0x2) & 0xf0; serial_port_out(port, 0x2, quot_frac); + } } static unsigned int diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index de1c143b475f..21fc9b3a27cf 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -693,7 +693,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl) if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) up->efr |= UART_EFR_RTS; else - up->efr &= UART_EFR_RTS; + up->efr &= ~UART_EFR_RTS; serial_out(up, UART_EFR, up->efr); serial_out(up, UART_LCR, lcr); diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 235e150d7b81..80d0ffe7abc1 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -163,18 +163,17 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { }, /* - * Common definitions for legacy IrDA ports, dependent on - * regshift value. + * Common definitions for legacy IrDA ports. */ [SCIx_IRDA_REGTYPE] = { [SCSMR] = { 0x00, 8 }, - [SCBRR] = { 0x01, 8 }, - [SCSCR] = { 0x02, 8 }, - [SCxTDR] = { 0x03, 8 }, - [SCxSR] = { 0x04, 8 }, - [SCxRDR] = { 0x05, 8 }, - [SCFCR] = { 0x06, 8 }, - [SCFDR] = { 0x07, 16 }, + [SCBRR] = { 0x02, 8 }, + [SCSCR] = { 0x04, 8 }, + [SCxTDR] = { 0x06, 8 }, + [SCxSR] = { 0x08, 16 }, + [SCxRDR] = { 0x0a, 8 }, + [SCFCR] = { 0x0c, 8 }, + [SCFDR] = { 0x0e, 16 }, [SCTFDR] = sci_reg_invalid, [SCRFDR] = sci_reg_invalid, [SCSPTR] = sci_reg_invalid, diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 1ca9cea2eaf8..64dc549276af 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -244,8 +244,10 @@ static void sysrq_handle_showallcpus(int key) * architecture has no support for it: */ if (!trigger_all_cpu_backtrace()) { - struct pt_regs *regs = get_irq_regs(); + struct pt_regs *regs = NULL; + if (in_irq()) + regs = get_irq_regs(); if (regs) { pr_info("CPU%d:\n", smp_processor_id()); show_regs(regs); @@ -264,7 +266,10 @@ static struct sysrq_key_op sysrq_showallcpus_op = { static void sysrq_handle_showregs(int key) { - struct pt_regs *regs = get_irq_regs(); + struct pt_regs *regs = NULL; + + if (in_irq()) + regs = get_irq_regs(); if (regs) show_regs(regs); perf_event_print_debug(); diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index e8846c91ca71..7fab79f9eb33 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -871,14 +871,25 @@ void usb_release_bos_descriptor(struct usb_device *dev) } } +static const __u8 bos_desc_len[256] = { + [USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE, + [USB_CAP_TYPE_EXT] = USB_DT_USB_EXT_CAP_SIZE, + [USB_SS_CAP_TYPE] = USB_DT_USB_SS_CAP_SIZE, + [USB_SSP_CAP_TYPE] = USB_DT_USB_SSP_CAP_SIZE(1), + [CONTAINER_ID_TYPE] = USB_DT_USB_SS_CONTN_ID_SIZE, + [USB_PTM_CAP_TYPE] = USB_DT_USB_PTM_ID_SIZE, +}; + /* Get BOS descriptor set */ int usb_get_bos_descriptor(struct usb_device *dev) { struct device *ddev = &dev->dev; struct usb_bos_descriptor *bos; struct usb_dev_cap_header *cap; + struct usb_ssp_cap_descriptor *ssp_cap; unsigned char *buffer; - int length, total_len, num, i; + int length, total_len, num, i, ssac; + __u8 cap_type; int ret; bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL); @@ -931,7 +942,13 @@ int usb_get_bos_descriptor(struct usb_device *dev) dev->bos->desc->bNumDeviceCaps = i; break; } + cap_type = cap->bDevCapabilityType; length = cap->bLength; + if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) { + dev->bos->desc->bNumDeviceCaps = i; + break; + } + total_len -= length; if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { @@ -939,7 +956,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) continue; } - switch (cap->bDevCapabilityType) { + switch (cap_type) { case USB_CAP_TYPE_WIRELESS_USB: /* Wireless USB cap descriptor is handled by wusb */ break; @@ -952,13 +969,20 @@ int usb_get_bos_descriptor(struct usb_device *dev) (struct usb_ss_cap_descriptor *)buffer; break; case USB_SSP_CAP_TYPE: - dev->bos->ssp_cap = - (struct usb_ssp_cap_descriptor *)buffer; + ssp_cap = (struct usb_ssp_cap_descriptor *)buffer; + ssac = (le32_to_cpu(ssp_cap->bmAttributes) & + USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1; + if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac)) + dev->bos->ssp_cap = ssp_cap; break; case CONTAINER_ID_TYPE: dev->bos->ss_id = (struct usb_ss_container_id_descriptor *)buffer; break; + case USB_PTM_CAP_TYPE: + dev->bos->ptm_cap = + (struct usb_ptm_cap_descriptor *)buffer; + break; case USB_CAP_TYPE_CONFIG_SUMMARY: /* one such desc per configuration */ if (!dev->bos->num_config_summary_desc) diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 873ba02d59e6..ad2e6d235c30 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -113,42 +113,38 @@ enum snoop_when { #define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0) /* Limit on the total amount of memory we can allocate for transfers */ -static unsigned usbfs_memory_mb = 16; +static u32 usbfs_memory_mb = 16; module_param(usbfs_memory_mb, uint, 0644); MODULE_PARM_DESC(usbfs_memory_mb, "maximum MB allowed for usbfs buffers (0 = no limit)"); /* Hard limit, necessary to avoid arithmetic overflow */ -#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000) +#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000) -static atomic_t usbfs_memory_usage; /* Total memory currently allocated */ +static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */ /* Check whether it's okay to allocate more memory for a transfer */ -static int usbfs_increase_memory_usage(unsigned amount) +static int usbfs_increase_memory_usage(u64 amount) { - unsigned lim; + u64 lim; - /* - * Convert usbfs_memory_mb to bytes, avoiding overflows. - * 0 means use the hard limit (effectively unlimited). - */ lim = ACCESS_ONCE(usbfs_memory_mb); - if (lim == 0 || lim > (USBFS_XFER_MAX >> 20)) - lim = USBFS_XFER_MAX; - else - lim <<= 20; + lim <<= 20; - atomic_add(amount, &usbfs_memory_usage); - if (atomic_read(&usbfs_memory_usage) <= lim) - return 0; - atomic_sub(amount, &usbfs_memory_usage); - return -ENOMEM; + atomic64_add(amount, &usbfs_memory_usage); + + if (lim > 0 && atomic64_read(&usbfs_memory_usage) > lim) { + atomic64_sub(amount, &usbfs_memory_usage); + return -ENOMEM; + } + + return 0; } /* Memory for a transfer is being deallocated */ -static void usbfs_decrease_memory_usage(unsigned amount) +static void usbfs_decrease_memory_usage(u64 amount) { - atomic_sub(amount, &usbfs_memory_usage); + atomic64_sub(amount, &usbfs_memory_usage); } static int connected(struct usb_dev_state *ps) @@ -1077,7 +1073,7 @@ static int proc_bulk(struct usb_dev_state *ps, void __user *arg) if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN))) return -EINVAL; len1 = bulk.len; - if (len1 >= USBFS_XFER_MAX) + if (len1 >= (INT_MAX - sizeof(struct urb))) return -EINVAL; ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb)); if (ret) @@ -1297,13 +1293,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb int number_of_packets = 0; unsigned int stream_id = 0; void *buf; - - if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP | - USBDEVFS_URB_SHORT_NOT_OK | + unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | USBDEVFS_URB_BULK_CONTINUATION | USBDEVFS_URB_NO_FSBR | USBDEVFS_URB_ZERO_PACKET | - USBDEVFS_URB_NO_INTERRUPT)) + USBDEVFS_URB_NO_INTERRUPT; + /* USBDEVFS_URB_ISO_ASAP is a special case */ + if (uurb->type == USBDEVFS_URB_TYPE_ISO) + mask |= USBDEVFS_URB_ISO_ASAP; + + if (uurb->flags & ~mask) + return -EINVAL; + + if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX) return -EINVAL; if (uurb->buffer_length > 0 && !uurb->buffer) return -EINVAL; @@ -1424,10 +1426,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb return -EINVAL; } - if (uurb->buffer_length >= USBFS_XFER_MAX) { - ret = -EINVAL; - goto error; - } if (uurb->buffer_length > 0 && !access_ok(is_in ? VERIFY_WRITE : VERIFY_READ, uurb->buffer, uurb->buffer_length)) { @@ -1653,6 +1651,18 @@ static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg) return 0; } +static void compute_isochronous_actual_length(struct urb *urb) +{ + unsigned int i; + + if (urb->number_of_packets > 0) { + urb->actual_length = 0; + for (i = 0; i < urb->number_of_packets; i++) + urb->actual_length += + urb->iso_frame_desc[i].actual_length; + } +} + static int processcompl(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; @@ -1660,6 +1670,7 @@ static int processcompl(struct async *as, void __user * __user *arg) void __user *addr = as->userurb; unsigned int i; + compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) goto err_out; @@ -1829,6 +1840,7 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) void __user *addr = as->userurb; unsigned int i; + compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) return -EFAULT; diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 1fb9191b8542..592f45e6dbac 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -3057,6 +3057,7 @@ void usb_remove_hcd(struct usb_hcd *hcd) } usb_put_invalidate_rhdev(hcd); + hcd->flags = 0; } EXPORT_SYMBOL_GPL(usb_remove_hcd); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 5644051b4010..5df314dd5f3c 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4877,6 +4877,15 @@ loop: usb_put_dev(udev); if ((status == -ENOTCONN) || (status == -ENOTSUPP)) break; + + /* When halfway through our retry count, power-cycle the port */ + if (i == (SET_CONFIG_TRIES / 2) - 1) { + dev_info(&port_dev->dev, "attempt power cycle\n"); + usb_hub_set_port_power(hdev, hub, port1, false); + msleep(2 * hub_power_on_good_delay(hub)); + usb_hub_set_port_power(hdev, hub, port1, true); + msleep(hub_power_on_good_delay(hub)); + } } if (hub->hdev->parent || !hcd->driver->port_handed_over || diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index a6aaf2f193a4..50010282c010 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -151,6 +151,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* appletouch */ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */ + { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM }, + /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, @@ -221,6 +224,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Corsair Strafe RGB */ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Corsair K70 LUX */ + { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, + /* MIDI keyboard WORLDE MINI */ { USB_DEVICE(0x1c75, 0x0204), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c index a28bcd084dc3..8a2346ce6b42 100644 --- a/drivers/usb/gadget/function/f_qc_rndis.c +++ b/drivers/usb/gadget/function/f_qc_rndis.c @@ -106,6 +106,7 @@ struct f_rndis_qc { u8 port_num; u16 cdc_filter; bool net_ready_trigger; + bool use_wceis; }; static struct ipa_usb_init_params rndis_ipa_params; @@ -161,9 +162,9 @@ static struct usb_interface_descriptor rndis_qc_control_intf = { /* .bInterfaceNumber = DYNAMIC */ /* status endpoint is optional; this could be patched later */ .bNumEndpoints = 1, - .bInterfaceClass = USB_CLASS_WIRELESS_CONTROLLER, - .bInterfaceSubClass = 0x01, - .bInterfaceProtocol = 0x03, + .bInterfaceClass = USB_CLASS_MISC, + .bInterfaceSubClass = 0x04, + .bInterfaceProtocol = 0x01, /* RNDIS over ethernet */ /* .iInterface = DYNAMIC */ }; @@ -222,9 +223,9 @@ rndis_qc_iad_descriptor = { .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, .bFirstInterface = 0, /* XXX, hardcoded */ .bInterfaceCount = 2, /* control + data */ - .bFunctionClass = USB_CLASS_WIRELESS_CONTROLLER, - .bFunctionSubClass = 0x01, - .bFunctionProtocol = 0x03, + .bFunctionClass = USB_CLASS_MISC, + .bFunctionSubClass = 0x04, + .bFunctionProtocol = 0x01, /* RNDIS over ethernet */ /* .iFunction = DYNAMIC */ }; @@ -935,6 +936,17 @@ rndis_qc_bind(struct usb_configuration *c, struct usb_function *f) rndis_qc_iad_descriptor.iFunction = status; } + if (rndis->use_wceis) { + rndis_qc_iad_descriptor.bFunctionClass = + USB_CLASS_WIRELESS_CONTROLLER; + rndis_qc_iad_descriptor.bFunctionSubClass = 0x01; + rndis_qc_iad_descriptor.bFunctionProtocol = 0x03; + rndis_qc_control_intf.bInterfaceClass = + USB_CLASS_WIRELESS_CONTROLLER; + rndis_qc_control_intf.bInterfaceSubClass = 0x1; + rndis_qc_control_intf.bInterfaceProtocol = 0x03; + } + /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) @@ -1470,8 +1482,38 @@ static struct configfs_item_operations qcrndis_item_ops = { .release = qcrndis_attr_release, }; + +static ssize_t qcrndis_wceis_show(struct config_item *item, char *page) +{ + struct f_rndis_qc *rndis = to_f_qc_rndis_opts(item)->rndis; + + return snprintf(page, PAGE_SIZE, "%d\n", rndis->use_wceis); +} + +static ssize_t qcrndis_wceis_store(struct config_item *item, + const char *page, size_t len) +{ + struct f_rndis_qc *rndis = to_f_qc_rndis_opts(item)->rndis; + bool val; + + if (kstrtobool(page, &val)) + return -EINVAL; + + rndis->use_wceis = val; + + return len; +} + +CONFIGFS_ATTR(qcrndis_, wceis); + +static struct configfs_attribute *qcrndis_attrs[] = { + &qcrndis_attr_wceis, + NULL, +}; + static struct config_item_type qcrndis_func_type = { .ct_item_ops = &qcrndis_item_ops, + .ct_attrs = qcrndis_attrs, .ct_owner = THIS_MODULE, }; diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c index 8d22fda48618..c1c14d818b5c 100644 --- a/drivers/usb/host/ehci-dbg.c +++ b/drivers/usb/host/ehci-dbg.c @@ -851,7 +851,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf) default: /* unknown */ break; } - temp = (cap >> 8) & 0xff; + offset = (cap >> 8) & 0xff; } } #endif diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 35e0c046fdcc..9daa5b196bc7 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -981,6 +981,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) if (!vdev) return; + if (vdev->real_port == 0 || + vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) { + xhci_dbg(xhci, "Bad vdev->real_port.\n"); + goto out; + } + tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts); list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { /* is this a hub device that added a tt_info to the tts list */ @@ -994,6 +1000,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) } } } +out: /* we are now at a leaf device */ xhci_free_virt_device(xhci, slot_id); } diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 2e947dc94e32..bc92a498ec03 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -185,12 +185,13 @@ found: return tmp; } - if (in) { + if (in) dev->in_pipe = usb_rcvbulkpipe(udev, in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + if (out) dev->out_pipe = usb_sndbulkpipe(udev, out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); - } + if (iso_in) { dev->iso_in = &iso_in->desc; dev->in_iso_pipe = usb_rcvisocpipe(udev, diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c index ab5d364f6e8c..335a1ef35224 100644 --- a/drivers/usb/phy/phy-tahvo.c +++ b/drivers/usb/phy/phy-tahvo.c @@ -368,7 +368,8 @@ static int tahvo_usb_probe(struct platform_device *pdev) tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable); if (IS_ERR(tu->extcon)) { dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); - return -ENOMEM; + ret = PTR_ERR(tu->extcon); + goto err_disable_clk; } ret = devm_extcon_dev_register(&pdev->dev, tu->extcon); diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 37d0e8cc7af6..2220c1b9df10 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -138,6 +138,7 @@ struct garmin_data { __u8 privpkt[4*6]; spinlock_t lock; struct list_head pktlist; + struct usb_anchor write_urbs; }; @@ -906,7 +907,7 @@ static int garmin_init_session(struct usb_serial_port *port) sizeof(GARMIN_START_SESSION_REQ), 0); if (status < 0) - break; + goto err_kill_urbs; } if (status > 0) @@ -914,6 +915,12 @@ static int garmin_init_session(struct usb_serial_port *port) } return status; + +err_kill_urbs: + usb_kill_anchored_urbs(&garmin_data_p->write_urbs); + usb_kill_urb(port->interrupt_in_urb); + + return status; } @@ -931,7 +938,6 @@ static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port) spin_unlock_irqrestore(&garmin_data_p->lock, flags); /* shutdown any bulk reads that might be going on */ - usb_kill_urb(port->write_urb); usb_kill_urb(port->read_urb); if (garmin_data_p->state == STATE_RESET) @@ -954,7 +960,7 @@ static void garmin_close(struct usb_serial_port *port) /* shutdown our urbs */ usb_kill_urb(port->read_urb); - usb_kill_urb(port->write_urb); + usb_kill_anchored_urbs(&garmin_data_p->write_urbs); /* keep reset state so we know that we must start a new session */ if (garmin_data_p->state != STATE_RESET) @@ -1038,12 +1044,14 @@ static int garmin_write_bulk(struct usb_serial_port *port, } /* send it down the pipe */ + usb_anchor_urb(urb, &garmin_data_p->write_urbs); status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed with status = %d\n", __func__, status); count = status; + usb_unanchor_urb(urb); kfree(buffer); } @@ -1402,9 +1410,16 @@ static int garmin_port_probe(struct usb_serial_port *port) garmin_data_p->state = 0; garmin_data_p->flags = 0; garmin_data_p->count = 0; + init_usb_anchor(&garmin_data_p->write_urbs); usb_set_serial_port_data(port, garmin_data_p); status = garmin_init_session(port); + if (status) + goto err_free; + + return 0; +err_free: + kfree(garmin_data_p); return status; } @@ -1414,6 +1429,7 @@ static int garmin_port_remove(struct usb_serial_port *port) { struct garmin_data *garmin_data_p = usb_get_serial_port_data(port); + usb_kill_anchored_urbs(&garmin_data_p->write_urbs); usb_kill_urb(port->interrupt_in_urb); del_timer_sync(&garmin_data_p->timer); kfree(garmin_data_p); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index db3d34c2c82e..ffa8ec917ff5 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb); /* These Quectel products use Quectel's vendor ID */ #define QUECTEL_PRODUCT_EC21 0x0121 #define QUECTEL_PRODUCT_EC25 0x0125 +#define QUECTEL_PRODUCT_BG96 0x0296 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -1185,6 +1186,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index e1c1e329c877..4516291df1b8 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -148,6 +148,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */ {DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC7304/MC7354 */ {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ + {DEVICE_SWI(0x1199, 0x901e)}, /* Sierra Wireless EM7355 QDL */ {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */ diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index a155cd02bce2..ecc83c405a8b 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h @@ -111,6 +111,10 @@ static int uas_use_uas_driver(struct usb_interface *intf, } } + /* All Seagate disk enclosures have broken ATA pass-through support */ + if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2) + flags |= US_FL_NO_ATA_1X; + usb_stor_adjust_quirks(udev, &flags); if (flags & US_FL_IGNORE_UAS) { diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index e4110d6de0b5..da6cc25baaef 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -703,6 +703,7 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, struct scatterlist *sg, int sg_count) { size_t off = iter->iov_offset; + struct scatterlist *p = sg; int i, ret; for (i = 0; i < iter->nr_segs; i++) { @@ -711,8 +712,8 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write); if (ret < 0) { - for (i = 0; i < sg_count; i++) { - struct page *page = sg_page(&sg[i]); + while (p < sg) { + struct page *page = sg_page(p++); if (page) put_page(page); } diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c index dd88ba1d71ce..35373e2065b2 100644 --- a/drivers/video/backlight/adp5520_bl.c +++ b/drivers/video/backlight/adp5520_bl.c @@ -332,10 +332,18 @@ static int adp5520_bl_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, bl); - ret |= adp5520_bl_setup(bl); + ret = adp5520_bl_setup(bl); + if (ret) { + dev_err(&pdev->dev, "failed to setup\n"); + if (data->pdata->en_ambl_sens) + sysfs_remove_group(&bl->dev.kobj, + &adp5520_bl_attr_group); + return ret; + } + backlight_update_status(bl); - return ret; + return 0; } static int adp5520_bl_remove(struct platform_device *pdev) diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c index 7de847df224f..4b40c6a4d441 100644 --- a/drivers/video/backlight/lcd.c +++ b/drivers/video/backlight/lcd.c @@ -226,6 +226,8 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent, dev_set_name(&new_ld->dev, "%s", name); dev_set_drvdata(&new_ld->dev, devdata); + new_ld->ops = ops; + rc = device_register(&new_ld->dev); if (rc) { put_device(&new_ld->dev); @@ -238,8 +240,6 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent, return ERR_PTR(rc); } - new_ld->ops = ops; - return new_ld; } EXPORT_SYMBOL(lcd_device_register); diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c index 914a52ba8477..77837665ce89 100644 --- a/drivers/video/fbdev/pmag-ba-fb.c +++ b/drivers/video/fbdev/pmag-ba-fb.c @@ -129,7 +129,7 @@ static struct fb_ops pmagbafb_ops = { /* * Turn the hardware cursor off. */ -static void __init pmagbafb_erase_cursor(struct fb_info *info) +static void pmagbafb_erase_cursor(struct fb_info *info) { struct pmagbafb_par *par = info->par; diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 0e0eb10f82a0..816a0e08ef10 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type, rc = -ENOMEM; goto out; } - } else if (msg_type == XS_TRANSACTION_END) { + } else if (u->u.msg.tx_id != 0) { list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 511078586fa1..73f1d1b3a51c 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -483,6 +483,9 @@ static int v9fs_test_inode(struct inode *inode, void *data) if (v9inode->qid.type != st->qid.type) return 0; + + if (v9inode->qid.path != st->qid.path) + return 0; return 1; } diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index cb899af1babc..0b88744c6446 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -87,6 +87,9 @@ static int v9fs_test_inode_dotl(struct inode *inode, void *data) if (v9inode->qid.type != st->qid.type) return 0; + + if (v9inode->qid.path != st->qid.path) + return 0; return 1; } diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 35b755e79c2d..fe6e7050fe50 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -87,7 +87,8 @@ static int autofs4_write(struct autofs_sb_info *sbi, spin_unlock_irqrestore(¤t->sighand->siglock, flags); } - return (bytes > 0); + /* if 'wr' returned 0 (impossible) we assume -EIO (safe) */ + return bytes == 0 ? 0 : wr < 0 ? wr : -EIO; } static void autofs4_notify_daemon(struct autofs_sb_info *sbi, @@ -101,6 +102,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, } pkt; struct file *pipe = NULL; size_t pktsz; + int ret; DPRINTK("wait id = 0x%08lx, name = %.*s, type=%d", (unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, type); @@ -173,7 +175,18 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, mutex_unlock(&sbi->wq_mutex); if (autofs4_write(sbi, pipe, &pkt, pktsz)) + switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) { + case 0: + break; + case -ENOMEM: + case -ERESTARTSYS: + /* Just fail this one */ + autofs4_wait_release(sbi, wq->wait_queue_token, ret); + break; + default: autofs4_catatonic_mode(sbi); + break; + } fput(pipe); } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index c36a03fa7678..260f94b019c9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3361,13 +3361,6 @@ again: goto again; } - /* We've already setup this transaction, go ahead and exit */ - if (block_group->cache_generation == trans->transid && - i_size_read(inode)) { - dcs = BTRFS_DC_SETUP; - goto out_put; - } - /* * We want to set the generation to 0, that way if anything goes wrong * from here on out we know not to trust this cache when we load up next @@ -3391,6 +3384,13 @@ again: } WARN_ON(ret); + /* We've already setup this transaction, go ahead and exit */ + if (block_group->cache_generation == trans->transid && + i_size_read(inode)) { + dcs = BTRFS_DC_SETUP; + goto out_put; + } + if (i_size_read(inode) > 0) { ret = btrfs_check_trunc_cache_free_space(root, &root->fs_info->global_block_rsv); diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c index 778282944530..837a9a8d579e 100644 --- a/fs/btrfs/uuid-tree.c +++ b/fs/btrfs/uuid-tree.c @@ -348,7 +348,5 @@ skip: out: btrfs_free_path(path); - if (ret) - btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret); - return 0; + return ret; } diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c index f6c6c8adbc01..7289f0a7670b 100644 --- a/fs/coda/upcall.c +++ b/fs/coda/upcall.c @@ -446,8 +446,7 @@ int venus_fsync(struct super_block *sb, struct CodaFid *fid) UPARG(CODA_FSYNC); inp->coda_fsync.VFid = *fid; - error = coda_upcall(coda_vcp(sb), sizeof(union inputArgs), - &outsize, inp); + error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); CODA_FREE(inp, insize); return error; diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c index 286f10b0363b..4f457d5c4933 100644 --- a/fs/ecryptfs/messaging.c +++ b/fs/ecryptfs/messaging.c @@ -442,15 +442,16 @@ void ecryptfs_release_messaging(void) } if (ecryptfs_daemon_hash) { struct ecryptfs_daemon *daemon; + struct hlist_node *n; int i; mutex_lock(&ecryptfs_daemon_hash_mux); for (i = 0; i < (1 << ecryptfs_hash_bits); i++) { int rc; - hlist_for_each_entry(daemon, - &ecryptfs_daemon_hash[i], - euid_chain) { + hlist_for_each_entry_safe(daemon, n, + &ecryptfs_daemon_hash[i], + euid_chain) { rc = ecryptfs_exorcise_daemon(daemon); if (rc) printk(KERN_ERR "%s: Error whilst " diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c index 15ebac242288..d3d6b28ce9b9 100644 --- a/fs/ext4/crypto_key.c +++ b/fs/ext4/crypto_key.c @@ -220,11 +220,9 @@ int _ext4_get_encryption_info(struct inode *inode) int mode; int res; - if (!ext4_read_workqueue) { - res = ext4_init_crypto(); - if (res) - return res; - } + res = ext4_init_crypto(); + if (res) + return res; retry: crypt_info = ACCESS_ONCE(ei->i_crypt_info); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index fc496c646d12..468e7fe3616c 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4810,7 +4810,8 @@ static long ext4_zero_range(struct file *file, loff_t offset, } if (!(mode & FALLOC_FL_KEEP_SIZE) && - offset + len > i_size_read(inode)) { + (offset + len > i_size_read(inode) || + offset + len > EXT4_I(inode)->i_disksize)) { new_size = offset + len; ret = inode_newsize_ok(inode, new_size); if (ret) @@ -4986,7 +4987,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) } if (!(mode & FALLOC_FL_KEEP_SIZE) && - offset + len > i_size_read(inode)) { + (offset + len > i_size_read(inode) || + offset + len > EXT4_I(inode)->i_disksize)) { new_size = offset + len; ret = inode_newsize_ok(inode, new_size); if (ret) diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index 112f8e04c549..3f52efa0f94f 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c @@ -253,6 +253,9 @@ static int __f2fs_set_acl(struct inode *inode, int type, int f2fs_set_acl(struct inode *inode, struct posix_acl *acl, int type) { + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) + return -EIO; + return __f2fs_set_acl(inode, type, acl, NULL); } diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index e86f67ac96c6..2eb778174a9b 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -29,7 +29,6 @@ struct kmem_cache *inode_entry_slab; void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io) { set_ckpt_flags(sbi, CP_ERROR_FLAG); - sbi->sb->s_flags |= MS_RDONLY; if (!end_io) f2fs_flush_merged_writes(sbi); } @@ -402,24 +401,23 @@ const struct address_space_operations f2fs_meta_aops = { #endif }; -static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) +static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, + unsigned int devidx, int type) { struct inode_management *im = &sbi->im[type]; struct ino_entry *e, *tmp; tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS); -retry: + radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); spin_lock(&im->ino_lock); e = radix_tree_lookup(&im->ino_root, ino); if (!e) { e = tmp; - if (radix_tree_insert(&im->ino_root, ino, e)) { - spin_unlock(&im->ino_lock); - radix_tree_preload_end(); - goto retry; - } + if (unlikely(radix_tree_insert(&im->ino_root, ino, e))) + f2fs_bug_on(sbi, 1); + memset(e, 0, sizeof(struct ino_entry)); e->ino = ino; @@ -427,6 +425,10 @@ retry: if (type != ORPHAN_INO) im->ino_num++; } + + if (type == FLUSH_INO) + f2fs_set_bit(devidx, (char *)&e->dirty_device); + spin_unlock(&im->ino_lock); radix_tree_preload_end(); @@ -455,7 +457,7 @@ static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) { /* add new dirty ino entry into list */ - __add_ino_entry(sbi, ino, type); + __add_ino_entry(sbi, ino, 0, type); } void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) @@ -481,7 +483,7 @@ void release_ino_entry(struct f2fs_sb_info *sbi, bool all) struct ino_entry *e, *tmp; int i; - for (i = all ? ORPHAN_INO: APPEND_INO; i <= UPDATE_INO; i++) { + for (i = all ? ORPHAN_INO : APPEND_INO; i < MAX_INO_ENTRY; i++) { struct inode_management *im = &sbi->im[i]; spin_lock(&im->ino_lock); @@ -495,6 +497,27 @@ void release_ino_entry(struct f2fs_sb_info *sbi, bool all) } } +void set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, + unsigned int devidx, int type) +{ + __add_ino_entry(sbi, ino, devidx, type); +} + +bool is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, + unsigned int devidx, int type) +{ + struct inode_management *im = &sbi->im[type]; + struct ino_entry *e; + bool is_dirty = false; + + spin_lock(&im->ino_lock); + e = radix_tree_lookup(&im->ino_root, ino); + if (e && f2fs_test_bit(devidx, (char *)&e->dirty_device)) + is_dirty = true; + spin_unlock(&im->ino_lock); + return is_dirty; +} + int acquire_orphan_inode(struct f2fs_sb_info *sbi) { struct inode_management *im = &sbi->im[ORPHAN_INO]; @@ -531,7 +554,7 @@ void release_orphan_inode(struct f2fs_sb_info *sbi) void add_orphan_inode(struct inode *inode) { /* add new orphan ino entry into list */ - __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, ORPHAN_INO); + __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, 0, ORPHAN_INO); update_inode_page(inode); } @@ -555,7 +578,7 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) return err; } - __add_ino_entry(sbi, ino, ORPHAN_INO); + __add_ino_entry(sbi, ino, 0, ORPHAN_INO); inode = f2fs_iget_retry(sbi->sb, ino); if (IS_ERR(inode)) { @@ -591,6 +614,9 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi) block_t start_blk, orphan_blocks, i, j; unsigned int s_flags = sbi->sb->s_flags; int err = 0; +#ifdef CONFIG_QUOTA + int quota_enabled; +#endif if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG)) return 0; @@ -603,8 +629,9 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi) #ifdef CONFIG_QUOTA /* Needed for iput() to work correctly and not trash data */ sbi->sb->s_flags |= MS_ACTIVE; + /* Turn on quotas so that they are updated correctly */ - f2fs_enable_quota_files(sbi); + quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY); #endif start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi); @@ -632,7 +659,8 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi) out: #ifdef CONFIG_QUOTA /* Turn quotas off */ - f2fs_quota_off_umount(sbi->sb); + if (quota_enabled) + f2fs_quota_off_umount(sbi->sb); #endif sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */ @@ -987,7 +1015,7 @@ int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi) update_inode_page(inode); iput(inode); } - }; + } return 0; } @@ -1147,6 +1175,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) struct super_block *sb = sbi->sb; struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); u64 kbytes_written; + int err; /* Flush all the NAT/SIT pages */ while (get_pages(sbi, F2FS_DIRTY_META)) { @@ -1240,6 +1269,11 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) if (unlikely(f2fs_cp_error(sbi))) return -EIO; + /* flush all device cache */ + err = f2fs_flush_device_cache(sbi); + if (err) + return err; + /* write out checkpoint buffer at block 0 */ update_meta_page(sbi, ckpt, start_blk++); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index c8583d7a1845..cdccc429325b 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -172,7 +172,7 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, { struct bio *bio; - bio = f2fs_bio_alloc(npages); + bio = f2fs_bio_alloc(sbi, npages, true); f2fs_target_device(sbi, blk_addr, bio); bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; @@ -417,8 +417,8 @@ next: bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; - /* set submitted = 1 as a return value */ - fio->submitted = 1; + /* set submitted = true as a return value */ + fio->submitted = true; inc_page_count(sbi, WB_DATA_TYPE(bio_page)); @@ -472,7 +472,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, f2fs_wait_on_block_writeback(sbi, blkaddr); } - bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES)); + bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false); if (!bio) { if (ctx) fscrypt_release_ctx(ctx); @@ -832,6 +832,13 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) struct f2fs_map_blocks map; int err = 0; + /* convert inline data for Direct I/O*/ + if (iocb->ki_flags & IOCB_DIRECT) { + err = f2fs_convert_inline_inode(inode); + if (err) + return err; + } + if (is_inode_flag_set(inode, FI_NO_PREALLOC)) return 0; @@ -844,15 +851,11 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) map.m_next_pgofs = NULL; - if (iocb->ki_flags & IOCB_DIRECT) { - err = f2fs_convert_inline_inode(inode); - if (err) - return err; + if (iocb->ki_flags & IOCB_DIRECT) return f2fs_map_blocks(inode, &map, 1, __force_buffered_io(inode, WRITE) ? F2FS_GET_BLOCK_PRE_AIO : F2FS_GET_BLOCK_PRE_DIO); - } if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) { err = f2fs_convert_inline_inode(inode); if (err) @@ -1332,7 +1335,7 @@ static int f2fs_read_data_pages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { - struct inode *inode = file->f_mapping->host; + struct inode *inode = mapping->host; struct page *page = list_last_entry(pages, struct page, lru); trace_f2fs_readpages(inode, page, nr_pages); @@ -1493,6 +1496,7 @@ static int __write_data_page(struct page *page, bool *submitted, int err = 0; struct f2fs_io_info fio = { .sbi = sbi, + .ino = inode->i_ino, .type = DATA, .op = REQ_OP_WRITE, .op_flags = wbc_to_write_flags(wbc), @@ -1564,8 +1568,11 @@ write: err = do_write_data_page(&fio); } } + + down_write(&F2FS_I(inode)->i_sem); if (F2FS_I(inode)->last_disk_size < psize) F2FS_I(inode)->last_disk_size = psize; + up_write(&F2FS_I(inode)->i_sem); done: if (err && err != -ENOENT) @@ -1945,6 +1952,12 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, } trace_f2fs_write_begin(inode, pos, len, flags); + if (f2fs_is_atomic_file(inode) && + !available_free_memory(sbi, INMEM_PAGES)) { + err = -ENOMEM; + goto fail; + } + /* * We should check this at this moment to avoid deadlock on inode page * and #0 page. The locking rule for inline_data conversion should be: @@ -1960,7 +1973,8 @@ repeat: * Do not use grab_cache_page_write_begin() to avoid deadlock due to * wait_for_stable_page. Will wait that below with our IO control. */ - page = grab_cache_page(mapping, index); + page = f2fs_pagecache_get_page(mapping, index, + FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS); if (!page) { err = -ENOMEM; goto fail; @@ -2021,6 +2035,8 @@ repeat: fail: f2fs_put_page(page, 1); f2fs_write_failed(mapping, pos + len); + if (f2fs_is_atomic_file(inode)) + drop_inmem_pages_all(sbi); return err; } diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 87f449845f5f..ecada8425268 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -45,9 +45,18 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS); si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META); si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA); + si->ndirty_qdata = get_pages(sbi, F2FS_DIRTY_QDATA); si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA); si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE]; si->ndirty_files = sbi->ndirty_inode[FILE_INODE]; + + si->nquota_files = 0; + if (f2fs_sb_has_quota_ino(sbi->sb)) { + for (i = 0; i < MAXQUOTAS; i++) { + if (f2fs_qf_ino(sbi->sb, i)) + si->nquota_files++; + } + } si->ndirty_all = sbi->ndirty_inode[DIRTY_META]; si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES); si->aw_cnt = atomic_read(&sbi->aw_cnt); @@ -61,6 +70,8 @@ static void update_general_status(struct f2fs_sb_info *sbi) atomic_read(&SM_I(sbi)->fcc_info->issued_flush); si->nr_flushing = atomic_read(&SM_I(sbi)->fcc_info->issing_flush); + si->flush_list_empty = + llist_empty(&SM_I(sbi)->fcc_info->issue_list); } if (SM_I(sbi) && SM_I(sbi)->dcc_info) { si->nr_discarded = @@ -96,9 +107,9 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->dirty_nats = NM_I(sbi)->dirty_nat_cnt; si->sits = MAIN_SEGS(sbi); si->dirty_sits = SIT_I(sbi)->dirty_sentries; - si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID_LIST]; + si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID]; si->avail_nids = NM_I(sbi)->available_nids; - si->alloc_nids = NM_I(sbi)->nid_cnt[ALLOC_NID_LIST]; + si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID]; si->bg_gc = sbi->bg_gc; si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg) * 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg) @@ -231,14 +242,14 @@ get_cache: } /* free nids */ - si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID_LIST] + - NM_I(sbi)->nid_cnt[ALLOC_NID_LIST]) * + si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID] + + NM_I(sbi)->nid_cnt[PREALLOC_NID]) * sizeof(struct free_nid); si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry); si->cache_mem += NM_I(sbi)->dirty_nat_cnt * sizeof(struct nat_entry_set); si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages); - for (i = 0; i <= ORPHAN_INO; i++) + for (i = 0; i < MAX_INO_ENTRY; i++) si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry); si->cache_mem += atomic_read(&sbi->total_ext_tree) * sizeof(struct extent_tree); @@ -262,9 +273,10 @@ static int stat_show(struct seq_file *s, void *v) list_for_each_entry(si, &f2fs_stat_list, stat_list) { update_general_status(si->sbi); - seq_printf(s, "\n=====[ partition info(%pg). #%d, %s]=====\n", + seq_printf(s, "\n=====[ partition info(%pg). #%d, %s, CP: %s]=====\n", si->sbi->sb->s_bdev, i++, - f2fs_readonly(si->sbi->sb) ? "RO": "RW"); + f2fs_readonly(si->sbi->sb) ? "RO": "RW", + f2fs_cp_error(si->sbi) ? "Error": "Good"); seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ", si->sit_area_segs, si->nat_area_segs); seq_printf(s, "[SSA: %d] [MAIN: %d", @@ -349,10 +361,11 @@ static int stat_show(struct seq_file *s, void *v) seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n", si->ext_tree, si->zombie_tree, si->ext_node); seq_puts(s, "\nBalancing F2FS Async:\n"); - seq_printf(s, " - IO (CP: %4d, Data: %4d, Flush: (%4d %4d), " + seq_printf(s, " - IO (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), " "Discard: (%4d %4d)) cmd: %4d undiscard:%4u\n", si->nr_wb_cp_data, si->nr_wb_data, si->nr_flushing, si->nr_flushed, + si->flush_list_empty, si->nr_discarding, si->nr_discarded, si->nr_discard_cmd, si->undiscard_blks); seq_printf(s, " - inmem: %4d, atomic IO: %4d (Max. %4d), " @@ -365,6 +378,8 @@ static int stat_show(struct seq_file *s, void *v) si->ndirty_dent, si->ndirty_dirs, si->ndirty_all); seq_printf(s, " - datas: %4d in files:%4d\n", si->ndirty_data, si->ndirty_files); + seq_printf(s, " - quota datas: %4d in quota files:%4d\n", + si->ndirty_qdata, si->nquota_files); seq_printf(s, " - meta: %4d in %4d\n", si->ndirty_meta, si->meta_pages); seq_printf(s, " - imeta: %4d\n", diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 4f2a8fedb313..1955707b138b 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -10,10 +10,12 @@ */ #include <linux/fs.h> #include <linux/f2fs_fs.h> +#include <linux/sched.h> #include "f2fs.h" #include "node.h" #include "acl.h" #include "xattr.h" +#include <trace/events/f2fs.h> static unsigned long dir_blocks(struct inode *inode) { @@ -847,6 +849,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) struct f2fs_dentry_block *dentry_blk = NULL; struct page *dentry_page = NULL; struct file_ra_state *ra = &file->f_ra; + loff_t start_pos = ctx->pos; unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK); struct f2fs_dentry_ptr d; struct fscrypt_str fstr = FSTR_INIT(NULL, 0); @@ -855,24 +858,32 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) if (f2fs_encrypted_inode(inode)) { err = fscrypt_get_encryption_info(inode); if (err && err != -ENOKEY) - return err; + goto out; err = fscrypt_fname_alloc_buffer(inode, F2FS_NAME_LEN, &fstr); if (err < 0) - return err; + goto out; } if (f2fs_has_inline_dentry(inode)) { err = f2fs_read_inline_dir(file, ctx, &fstr); - goto out; + goto out_free; } - /* readahead for multi pages of dir */ - if (npages - n > 1 && !ra_has_index(ra, n)) - page_cache_sync_readahead(inode->i_mapping, ra, file, n, + for (; n < npages; n++, ctx->pos = n * NR_DENTRY_IN_BLOCK) { + + /* allow readdir() to be interrupted */ + if (fatal_signal_pending(current)) { + err = -ERESTARTSYS; + goto out_free; + } + cond_resched(); + + /* readahead for multi pages of dir */ + if (npages - n > 1 && !ra_has_index(ra, n)) + page_cache_sync_readahead(inode->i_mapping, ra, file, n, min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES)); - for (; n < npages; n++) { dentry_page = get_lock_data_page(inode, n, false); if (IS_ERR(dentry_page)) { err = PTR_ERR(dentry_page); @@ -880,7 +891,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) err = 0; continue; } else { - goto out; + goto out_free; } } @@ -896,12 +907,13 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) break; } - ctx->pos = (n + 1) * NR_DENTRY_IN_BLOCK; kunmap(dentry_page); f2fs_put_page(dentry_page, 1); } -out: +out_free: fscrypt_fname_free_buffer(&fstr); +out: + trace_f2fs_readdir(inode, start_pos, ctx->pos, err); return err < 0 ? err : 0; } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index c1a0aef8efc6..081ec493baae 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -47,6 +47,8 @@ enum { FAULT_KMALLOC, FAULT_PAGE_ALLOC, + FAULT_PAGE_GET, + FAULT_ALLOC_BIO, FAULT_ALLOC_NID, FAULT_ORPHAN, FAULT_BLOCK, @@ -94,6 +96,7 @@ extern char *fault_name[FAULT_MAX]; #define F2FS_MOUNT_GRPQUOTA 0x00100000 #define F2FS_MOUNT_PRJQUOTA 0x00200000 #define F2FS_MOUNT_QUOTA 0x00400000 +#define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 #define clear_opt(sbi, option) ((sbi)->mount_opt.opt &= ~F2FS_MOUNT_##option) #define set_opt(sbi, option) ((sbi)->mount_opt.opt |= F2FS_MOUNT_##option) @@ -119,6 +122,8 @@ struct f2fs_mount_info { #define F2FS_FEATURE_EXTRA_ATTR 0x0008 #define F2FS_FEATURE_PRJQUOTA 0x0010 #define F2FS_FEATURE_INODE_CHKSUM 0x0020 +#define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040 +#define F2FS_FEATURE_QUOTA_INO 0x0080 #define F2FS_HAS_FEATURE(sb, mask) \ ((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0) @@ -214,7 +219,7 @@ enum { #define BATCHED_TRIM_BLOCKS(sbi) \ (BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg) #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) -#define DISCARD_ISSUE_RATE 8 +#define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ #define DEF_CP_INTERVAL 60 /* 60 secs */ @@ -225,7 +230,6 @@ struct cp_control { __u64 trim_start; __u64 trim_end; __u64 trim_minlen; - __u64 trimmed; }; /* @@ -244,12 +248,14 @@ enum { ORPHAN_INO, /* for orphan ino list */ APPEND_INO, /* for append ino list */ UPDATE_INO, /* for update ino list */ + FLUSH_INO, /* for multiple device flushing */ MAX_INO_ENTRY, /* max. list */ }; struct ino_entry { - struct list_head list; /* list head */ - nid_t ino; /* inode number */ + struct list_head list; /* list head */ + nid_t ino; /* inode number */ + unsigned int dirty_device; /* dirty device bitmap */ }; /* for the list of inodes to be GCed */ @@ -273,10 +279,6 @@ struct discard_entry { #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ (MAX_PLIST_NUM - 1) : (blk_num - 1)) -#define P_ACTIVE 0x01 -#define P_TRIM 0x02 -#define plist_issue(tag) (((tag) & P_ACTIVE) || ((tag) & P_TRIM)) - enum { D_PREP, D_SUBMIT, @@ -308,12 +310,32 @@ struct discard_cmd { int error; /* bio error */ }; +enum { + DPOLICY_BG, + DPOLICY_FORCE, + DPOLICY_FSTRIM, + DPOLICY_UMOUNT, + MAX_DPOLICY, +}; + +struct discard_policy { + int type; /* type of discard */ + unsigned int min_interval; /* used for candidates exist */ + unsigned int max_interval; /* used for candidates not exist */ + unsigned int max_requests; /* # of discards issued per round */ + unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ + bool io_aware; /* issue discard in idle time */ + bool sync; /* submit discard with REQ_SYNC flag */ + unsigned int granularity; /* discard granularity */ +}; + struct discard_cmd_control { struct task_struct *f2fs_issue_discard; /* discard thread */ struct list_head entry_list; /* 4KB discard entry list */ struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ unsigned char pend_list_tag[MAX_PLIST_NUM];/* tag for pending entries */ struct list_head wait_list; /* store on-flushing entries */ + struct list_head fstrim_list; /* in-flight discard from fstrim */ wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ unsigned int discard_wake; /* to wake up discard thread */ struct mutex cmd_lock; @@ -443,11 +465,14 @@ struct f2fs_flush_device { /* for inline stuff */ #define DEF_INLINE_RESERVED_SIZE 1 +#define DEF_MIN_INLINE_SIZE 1 static inline int get_extra_isize(struct inode *inode); -#define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ - (CUR_ADDRS_PER_INODE(inode) - \ - DEF_INLINE_RESERVED_SIZE - \ - F2FS_INLINE_XATTR_ADDRS)) +static inline int get_inline_xattr_addrs(struct inode *inode); +#define F2FS_INLINE_XATTR_ADDRS(inode) get_inline_xattr_addrs(inode) +#define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ + (CUR_ADDRS_PER_INODE(inode) - \ + F2FS_INLINE_XATTR_ADDRS(inode) - \ + DEF_INLINE_RESERVED_SIZE)) /* for inline dir */ #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ @@ -647,6 +672,7 @@ struct f2fs_inode_info { #endif struct list_head dirty_list; /* dirty list for dirs and files */ struct list_head gdirty_list; /* linked in global dirty list */ + struct list_head inmem_ilist; /* list for inmem inodes */ struct list_head inmem_pages; /* inmemory pages managed by f2fs */ struct task_struct *inmem_task; /* store inmemory task */ struct mutex inmem_lock; /* lock for inmemory pages */ @@ -657,6 +683,7 @@ struct f2fs_inode_info { int i_extra_isize; /* size of extra space located in i_addr */ kprojid_t i_projid; /* id for project quota */ + int i_inline_xattr_size; /* inline xattr size */ }; static inline void get_extent_info(struct extent_info *ext, @@ -730,10 +757,13 @@ static inline void __try_update_largest_extent(struct inode *inode, } } -enum nid_list { - FREE_NID_LIST, - ALLOC_NID_LIST, - MAX_NID_LIST, +/* + * For free nid management + */ +enum nid_state { + FREE_NID, /* newly added to free nid list */ + PREALLOC_NID, /* it is preallocated */ + MAX_NID_STATE, }; struct f2fs_nm_info { @@ -756,8 +786,8 @@ struct f2fs_nm_info { /* free node ids management */ struct radix_tree_root free_nid_root;/* root of the free_nid cache */ - struct list_head nid_list[MAX_NID_LIST];/* lists for free nids */ - unsigned int nid_cnt[MAX_NID_LIST]; /* the number of free node id */ + struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ + unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ spinlock_t nid_list_lock; /* protect nid lists ops */ struct mutex build_lock; /* lock for build free nids */ unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE]; @@ -835,6 +865,7 @@ enum { struct flush_cmd { struct completion wait; struct llist_node llnode; + nid_t ino; int ret; }; @@ -853,6 +884,8 @@ struct f2fs_sm_info { struct dirty_seglist_info *dirty_info; /* dirty segment information */ struct curseg_info *curseg_array; /* active segment information */ + struct rw_semaphore curseg_lock; /* for preventing curseg change */ + block_t seg0_blkaddr; /* block address of 0'th segment */ block_t main_blkaddr; /* start block address of main area */ block_t ssa_blkaddr; /* start block address of SSA area */ @@ -874,6 +907,7 @@ struct f2fs_sm_info { unsigned int min_ipu_util; /* in-place-update threshold */ unsigned int min_fsync_blocks; /* threshold for fsync */ unsigned int min_hot_blocks; /* threshold for hot block allocation */ + unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ /* for flush command control */ struct flush_cmd_control *fcc_info; @@ -895,6 +929,7 @@ struct f2fs_sm_info { enum count_type { F2FS_DIRTY_DENTS, F2FS_DIRTY_DATA, + F2FS_DIRTY_QDATA, F2FS_DIRTY_NODES, F2FS_DIRTY_META, F2FS_INMEM_PAGES, @@ -943,6 +978,18 @@ enum need_lock_type { LOCK_RETRY, }; +enum cp_reason_type { + CP_NO_NEEDED, + CP_NON_REGULAR, + CP_HARDLINK, + CP_SB_NEED_CP, + CP_WRONG_PINO, + CP_NO_SPC_ROLL, + CP_NODE_NEED_CP, + CP_FASTBOOT_MODE, + CP_SPEC_LOG_NUM, +}; + enum iostat_type { APP_DIRECT_IO, /* app direct IOs */ APP_BUFFERED_IO, /* app buffered IOs */ @@ -962,6 +1009,7 @@ enum iostat_type { struct f2fs_io_info { struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ + nid_t ino; /* inode number */ enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ enum temp_type temp; /* contains HOT/WARM/COLD */ int op; /* contains REQ_OP_ */ @@ -1006,6 +1054,7 @@ enum inode_type { DIR_INODE, /* for dirty dir inode */ FILE_INODE, /* for dirty regular/symlink inode */ DIRTY_META, /* for all dirtied inode metadata */ + ATOMIC_FILE, /* for all atomic files */ NR_INODE_TYPE, }; @@ -1108,12 +1157,15 @@ struct f2fs_sb_info { loff_t max_file_blocks; /* max block index of file */ int active_logs; /* # of active logs */ int dir_level; /* directory level */ + int inline_xattr_size; /* inline xattr size */ + unsigned int trigger_ssr_threshold; /* threshold to trigger ssr */ block_t user_block_count; /* # of user blocks */ block_t total_valid_block_count; /* # of valid blocks */ block_t discard_blks; /* discard command candidats */ block_t last_valid_block_count; /* for recovery */ block_t reserved_blocks; /* configurable reserved blocks */ + block_t current_reserved_blocks; /* current reserved blocks */ u32 s_next_generation; /* for NFS support */ @@ -1179,6 +1231,8 @@ struct f2fs_sb_info { struct list_head s_list; int s_ndevs; /* number of devices */ struct f2fs_dev_info *devs; /* for device list */ + unsigned int dirty_device; /* for checkpoint data flush */ + spinlock_t dev_lock; /* protect dirty_device */ struct mutex umount_mutex; unsigned int shrinker_run_no; @@ -1242,8 +1296,7 @@ static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) { - struct timespec ts = {sbi->interval_time[type], 0}; - unsigned long interval = timespec_to_jiffies(&ts); + unsigned long interval = sbi->interval_time[type] * HZ; return time_after(jiffies, sbi->last_time[type] + interval); } @@ -1410,6 +1463,13 @@ static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) return le64_to_cpu(cp->checkpoint_ver); } +static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) +{ + if (type < F2FS_MAX_QUOTAS) + return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); + return 0; +} + static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) { size_t crc_offset = le32_to_cpu(cp->checksum_offset); @@ -1588,7 +1648,8 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, spin_lock(&sbi->stat_lock); sbi->total_valid_block_count += (block_t)(*count); - avail_user_block_count = sbi->user_block_count - sbi->reserved_blocks; + avail_user_block_count = sbi->user_block_count - + sbi->current_reserved_blocks; if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { diff = sbi->total_valid_block_count - avail_user_block_count; *count -= diff; @@ -1622,6 +1683,10 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); f2fs_bug_on(sbi, inode->i_blocks < sectors); sbi->total_valid_block_count -= (block_t)count; + if (sbi->reserved_blocks && + sbi->current_reserved_blocks < sbi->reserved_blocks) + sbi->current_reserved_blocks = min(sbi->reserved_blocks, + sbi->current_reserved_blocks + count); spin_unlock(&sbi->stat_lock); f2fs_i_blocks_write(inode, count, false, true); } @@ -1642,6 +1707,8 @@ static inline void inode_inc_dirty_pages(struct inode *inode) atomic_inc(&F2FS_I(inode)->dirty_pages); inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); + if (IS_NOQUOTA(inode)) + inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); } static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) @@ -1658,6 +1725,8 @@ static inline void inode_dec_dirty_pages(struct inode *inode) atomic_dec(&F2FS_I(inode)->dirty_pages); dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); + if (IS_NOQUOTA(inode)) + dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); } static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) @@ -1765,10 +1834,17 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, return ret; } +#ifdef CONFIG_F2FS_FAULT_INJECTION + if (time_to_inject(sbi, FAULT_BLOCK)) { + f2fs_show_injection_info(FAULT_BLOCK); + goto enospc; + } +#endif + spin_lock(&sbi->stat_lock); valid_block_count = sbi->total_valid_block_count + 1; - if (unlikely(valid_block_count + sbi->reserved_blocks > + if (unlikely(valid_block_count + sbi->current_reserved_blocks > sbi->user_block_count)) { spin_unlock(&sbi->stat_lock); goto enospc; @@ -1811,6 +1887,9 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, sbi->total_valid_node_count--; sbi->total_valid_block_count--; + if (sbi->reserved_blocks && + sbi->current_reserved_blocks < sbi->reserved_blocks) + sbi->current_reserved_blocks++; spin_unlock(&sbi->stat_lock); @@ -1857,6 +1936,19 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); } +static inline struct page *f2fs_pagecache_get_page( + struct address_space *mapping, pgoff_t index, + int fgp_flags, gfp_t gfp_mask) +{ +#ifdef CONFIG_F2FS_FAULT_INJECTION + if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) { + f2fs_show_injection_info(FAULT_PAGE_GET); + return NULL; + } +#endif + return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); +} + static inline void f2fs_copy_page(struct page *src, struct page *dst) { char *src_kaddr = kmap(src); @@ -1906,15 +1998,25 @@ static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, return entry; } -static inline struct bio *f2fs_bio_alloc(int npages) +static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, + int npages, bool no_fail) { struct bio *bio; - /* No failure on bio allocation */ - bio = bio_alloc(GFP_NOIO, npages); - if (!bio) - bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages); - return bio; + if (no_fail) { + /* No failure on bio allocation */ + bio = bio_alloc(GFP_NOIO, npages); + if (!bio) + bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages); + return bio; + } +#ifdef CONFIG_F2FS_FAULT_INJECTION + if (time_to_inject(sbi, FAULT_ALLOC_BIO)) { + f2fs_show_injection_info(FAULT_ALLOC_BIO); + return NULL; + } +#endif + return bio_alloc(GFP_KERNEL, npages); } static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, @@ -2224,25 +2326,20 @@ static inline int f2fs_has_inline_xattr(struct inode *inode) static inline unsigned int addrs_per_inode(struct inode *inode) { - if (f2fs_has_inline_xattr(inode)) - return CUR_ADDRS_PER_INODE(inode) - F2FS_INLINE_XATTR_ADDRS; - return CUR_ADDRS_PER_INODE(inode); + return CUR_ADDRS_PER_INODE(inode) - F2FS_INLINE_XATTR_ADDRS(inode); } -static inline void *inline_xattr_addr(struct page *page) +static inline void *inline_xattr_addr(struct inode *inode, struct page *page) { struct f2fs_inode *ri = F2FS_INODE(page); return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - - F2FS_INLINE_XATTR_ADDRS]); + F2FS_INLINE_XATTR_ADDRS(inode)]); } static inline int inline_xattr_size(struct inode *inode) { - if (f2fs_has_inline_xattr(inode)) - return F2FS_INLINE_XATTR_ADDRS << 2; - else - return 0; + return get_inline_xattr_addrs(inode) * sizeof(__le32); } static inline int f2fs_has_inline_data(struct inode *inode) @@ -2323,9 +2420,10 @@ static inline void clear_file(struct inode *inode, int type) static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) { + bool ret; + if (dsync) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - bool ret; spin_lock(&sbi->inode_lock[DIRTY_META]); ret = list_empty(&F2FS_I(inode)->gdirty_list); @@ -2336,9 +2434,15 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) file_keep_isize(inode) || i_size_read(inode) & PAGE_MASK) return false; - return F2FS_I(inode)->last_disk_size == i_size_read(inode); + + down_read(&F2FS_I(inode)->i_sem); + ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); + up_read(&F2FS_I(inode)->i_sem); + + return ret; } +#define sb_rdonly f2fs_readonly static inline int f2fs_readonly(struct super_block *sb) { return sb->s_flags & MS_RDONLY; @@ -2406,6 +2510,12 @@ static inline int get_extra_isize(struct inode *inode) return F2FS_I(inode)->i_extra_isize / sizeof(__le32); } +static inline int f2fs_sb_has_flexible_inline_xattr(struct super_block *sb); +static inline int get_inline_xattr_addrs(struct inode *inode) +{ + return F2FS_I(inode)->i_inline_xattr_size; +} + #define get_inode_mode(i) \ ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) @@ -2534,7 +2644,7 @@ static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) */ int f2fs_inode_dirtied(struct inode *inode, bool sync); void f2fs_inode_synced(struct inode *inode); -void f2fs_enable_quota_files(struct f2fs_sb_info *sbi); +int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); void f2fs_quota_off_umount(struct super_block *sb); int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); int f2fs_sync_fs(struct super_block *sb, int sync); @@ -2562,7 +2672,7 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni); pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); int truncate_inode_blocks(struct inode *inode, pgoff_t from); -int truncate_xattr_node(struct inode *inode, struct page *page); +int truncate_xattr_node(struct inode *inode); int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino); int remove_inode_page(struct inode *inode); struct page *new_inode_page(struct inode *inode); @@ -2597,19 +2707,22 @@ void destroy_node_manager_caches(void); */ bool need_SSR(struct f2fs_sb_info *sbi); void register_inmem_page(struct inode *inode, struct page *page); +void drop_inmem_pages_all(struct f2fs_sb_info *sbi); void drop_inmem_pages(struct inode *inode); void drop_inmem_page(struct inode *inode, struct page *page); int commit_inmem_pages(struct inode *inode); void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi); -int f2fs_issue_flush(struct f2fs_sb_info *sbi); +int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); int create_flush_cmd_control(struct f2fs_sb_info *sbi); +int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); -void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new); +void init_discard_policy(struct discard_policy *dpolicy, int discard_type, + unsigned int granularity); void stop_discard_thread(struct f2fs_sb_info *sbi); -void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount); +bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi); void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc); void release_discard_addrs(struct f2fs_sb_info *sbi); int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); @@ -2664,6 +2777,10 @@ void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); void release_ino_entry(struct f2fs_sb_info *sbi, bool all); bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); +void set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, + unsigned int devidx, int type); +bool is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, + unsigned int devidx, int type); int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi); int acquire_orphan_inode(struct f2fs_sb_info *sbi); void release_orphan_inode(struct f2fs_sb_info *sbi); @@ -2751,14 +2868,16 @@ struct f2fs_stat_info { unsigned long long hit_largest, hit_cached, hit_rbtree; unsigned long long hit_total, total_ext; int ext_tree, zombie_tree, ext_node; - int ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta; + int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; + int ndirty_data, ndirty_qdata; int inmem_pages; - unsigned int ndirty_dirs, ndirty_files, ndirty_all; + unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; int nats, dirty_nats, sits, dirty_sits; int free_nids, avail_nids, alloc_nids; int total_count, utilization; int bg_gc, nr_wb_cp_data, nr_wb_data; - int nr_flushing, nr_flushed, nr_discarding, nr_discarded; + int nr_flushing, nr_flushed, flush_list_empty; + int nr_discarding, nr_discarded; int nr_discard_cmd; unsigned int undiscard_blks; int inline_xattr, inline_inode, inline_dir, append, update, orphans; @@ -3066,6 +3185,16 @@ static inline int f2fs_sb_has_inode_chksum(struct super_block *sb) return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_INODE_CHKSUM); } +static inline int f2fs_sb_has_flexible_inline_xattr(struct super_block *sb) +{ + return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR); +} + +static inline int f2fs_sb_has_quota_ino(struct super_block *sb) +{ + return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_QUOTA_INO); +} + #ifdef CONFIG_BLK_DEV_ZONED static inline int get_blkz_type(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t blkaddr) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index a9e1655a6bf8..bfff53f658e1 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -56,6 +56,11 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, struct dnode_of_data dn; int err; + if (unlikely(f2fs_cp_error(sbi))) { + err = -EIO; + goto err; + } + sb_start_pagefault(inode->i_sb); f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); @@ -117,6 +122,7 @@ out_sem: out: sb_end_pagefault(inode->i_sb); f2fs_update_time(sbi, REQ_TIME); +err: return block_page_mkwrite_return(err); } @@ -141,27 +147,29 @@ static int get_parent_ino(struct inode *inode, nid_t *pino) return 1; } -static inline bool need_do_checkpoint(struct inode *inode) +static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - bool need_cp = false; + enum cp_reason_type cp_reason = CP_NO_NEEDED; - if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1) - need_cp = true; + if (!S_ISREG(inode->i_mode)) + cp_reason = CP_NON_REGULAR; + else if (inode->i_nlink != 1) + cp_reason = CP_HARDLINK; else if (is_sbi_flag_set(sbi, SBI_NEED_CP)) - need_cp = true; + cp_reason = CP_SB_NEED_CP; else if (file_wrong_pino(inode)) - need_cp = true; + cp_reason = CP_WRONG_PINO; else if (!space_for_roll_forward(sbi)) - need_cp = true; + cp_reason = CP_NO_SPC_ROLL; else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) - need_cp = true; + cp_reason = CP_NODE_NEED_CP; else if (test_opt(sbi, FASTBOOT)) - need_cp = true; + cp_reason = CP_FASTBOOT_MODE; else if (sbi->active_logs == 2) - need_cp = true; + cp_reason = CP_SPEC_LOG_NUM; - return need_cp; + return cp_reason; } static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) @@ -196,7 +204,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t ino = inode->i_ino; int ret = 0; - bool need_cp = false; + enum cp_reason_type cp_reason = 0; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, @@ -215,7 +223,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, clear_inode_flag(inode, FI_NEED_IPU); if (ret) { - trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); + trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); return ret; } @@ -246,10 +254,10 @@ go_write: * sudden-power-off. */ down_read(&F2FS_I(inode)->i_sem); - need_cp = need_do_checkpoint(inode); + cp_reason = need_do_checkpoint(inode); up_read(&F2FS_I(inode)->i_sem); - if (need_cp) { + if (cp_reason) { /* all the dirty node pages should be flushed for POR */ ret = f2fs_sync_fs(inode->i_sb, 1); @@ -297,19 +305,24 @@ sync_nodes: remove_ino_entry(sbi, ino, APPEND_INO); clear_inode_flag(inode, FI_APPEND_WRITE); flush_out: - remove_ino_entry(sbi, ino, UPDATE_INO); - clear_inode_flag(inode, FI_UPDATE_WRITE); if (!atomic) - ret = f2fs_issue_flush(sbi); + ret = f2fs_issue_flush(sbi, inode->i_ino); + if (!ret) { + remove_ino_entry(sbi, ino, UPDATE_INO); + clear_inode_flag(inode, FI_UPDATE_WRITE); + remove_ino_entry(sbi, ino, FLUSH_INO); + } f2fs_update_time(sbi, REQ_TIME); out: - trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); + trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); f2fs_trace_ios(NULL, 1); return ret; } int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { + if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) + return -EIO; return f2fs_do_sync_file(file, start, end, datasync, false); } @@ -446,6 +459,9 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) struct inode *inode = file_inode(file); int err; + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) + return -EIO; + /* we don't need to use inline_data strictly */ err = f2fs_convert_inline_inode(inode); if (err) @@ -632,6 +648,9 @@ int f2fs_truncate(struct inode *inode) { int err; + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) + return -EIO; + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return 0; @@ -667,7 +686,8 @@ int f2fs_getattr(struct vfsmount *mnt, generic_fillattr(inode, stat); /* we need to show initial sectors used for inline_data/dentries */ - if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) + if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || + f2fs_has_inline_dentry(inode)) stat->blocks += (stat->size + 511) >> 9; return 0; @@ -709,6 +729,9 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr) int err; bool size_changed = false; + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) + return -EIO; + err = inode_change_ok(inode, attr); if (err) return err; @@ -761,6 +784,10 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr) inode->i_mtime = inode->i_ctime = current_time(inode); } + down_write(&F2FS_I(inode)->i_sem); + F2FS_I(inode)->last_disk_size = i_size_read(inode); + up_write(&F2FS_I(inode)->i_sem); + size_changed = true; } @@ -834,7 +861,7 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE); if (err) { if (err == -ENOENT) { - pg_start++; + pg_start = get_next_page_offset(&dn, pg_start); continue; } return err; @@ -1149,11 +1176,14 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) if (ret) goto out; + /* avoid gc operation during block exchange */ + down_write(&F2FS_I(inode)->dio_rwsem[WRITE]); + truncate_pagecache(inode, offset); ret = f2fs_do_collapse(inode, pg_start, pg_end); if (ret) - goto out; + goto out_unlock; /* write out all moved pages, if possible */ filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); @@ -1165,7 +1195,8 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) ret = truncate_blocks(inode, new_size, true); if (!ret) f2fs_i_size_write(inode, new_size); - +out_unlock: + up_write(&F2FS_I(inode)->dio_rwsem[WRITE]); out: up_write(&F2FS_I(inode)->i_mmap_sem); return ret; @@ -1348,6 +1379,9 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) if (ret) goto out; + /* avoid gc operation during block exchange */ + down_write(&F2FS_I(inode)->dio_rwsem[WRITE]); + truncate_pagecache(inode, offset); pg_start = offset >> PAGE_SHIFT; @@ -1375,6 +1409,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) if (!ret) f2fs_i_size_write(inode, new_size); + + up_write(&F2FS_I(inode)->dio_rwsem[WRITE]); out: up_write(&F2FS_I(inode)->i_mmap_sem); return ret; @@ -1424,8 +1460,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset, new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end; } - if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) - f2fs_i_size_write(inode, new_size); + if (new_size > i_size_read(inode)) { + if (mode & FALLOC_FL_KEEP_SIZE) + file_set_keep_isize(inode); + else + f2fs_i_size_write(inode, new_size); + } return err; } @@ -1436,6 +1476,9 @@ static long f2fs_fallocate(struct file *file, int mode, struct inode *inode = file_inode(file); long ret = 0; + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) + return -EIO; + /* f2fs only support ->fallocate for regular file */ if (!S_ISREG(inode->i_mode)) return -EINVAL; @@ -1469,8 +1512,6 @@ static long f2fs_fallocate(struct file *file, int mode, if (!ret) { inode->i_mtime = inode->i_ctime = current_time(inode); f2fs_mark_inode_dirty_sync(inode, false); - if (mode & FALLOC_FL_KEEP_SIZE) - file_set_keep_isize(inode); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); } @@ -1864,6 +1905,9 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); + if (!f2fs_sb_has_crypto(inode->i_sb)) + return -EOPNOTSUPP; + f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); @@ -1871,6 +1915,8 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) { + if (!f2fs_sb_has_crypto(file_inode(filp)->i_sb)) + return -EOPNOTSUPP; return fscrypt_ioctl_get_policy(filp, (void __user *)arg); } @@ -2226,9 +2272,13 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, } inode_lock(src); + down_write(&F2FS_I(src)->dio_rwsem[WRITE]); if (src != dst) { - if (!inode_trylock(dst)) { - ret = -EBUSY; + ret = -EBUSY; + if (!inode_trylock(dst)) + goto out; + if (!down_write_trylock(&F2FS_I(dst)->dio_rwsem[WRITE])) { + inode_unlock(dst); goto out; } } @@ -2288,9 +2338,12 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, } f2fs_unlock_op(sbi); out_unlock: - if (src != dst) + if (src != dst) { + up_write(&F2FS_I(dst)->dio_rwsem[WRITE]); inode_unlock(dst); + } out: + up_write(&F2FS_I(src)->dio_rwsem[WRITE]); inode_unlock(src); return ret; } @@ -2412,6 +2465,9 @@ static int f2fs_ioc_get_features(struct file *filp, unsigned long arg) long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { + if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) + return -EIO; + switch (cmd) { case F2FS_IOC_GETFLAGS: return f2fs_ioc_getflags(filp, arg); @@ -2465,6 +2521,9 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) struct blk_plug plug; ssize_t ret; + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) + return -EIO; + inode_lock(inode); ret = generic_write_checks(iocb, from); if (ret > 0) { @@ -2475,6 +2534,7 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) err = f2fs_preallocate_blocks(iocb, from); if (err) { + clear_inode_flag(inode, FI_NO_PREALLOC); inode_unlock(inode); return err; } diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index bd16e6631cf3..be9fd616736b 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -267,16 +267,6 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); } -static unsigned int get_greedy_cost(struct f2fs_sb_info *sbi, - unsigned int segno) -{ - unsigned int valid_blocks = - get_valid_blocks(sbi, segno, true); - - return IS_DATASEG(get_seg_entry(sbi, segno)->type) ? - valid_blocks * 2 : valid_blocks; -} - static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno, struct victim_sel_policy *p) { @@ -285,7 +275,7 @@ static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, /* alloc_mode == LFS */ if (p->gc_mode == GC_GREEDY) - return get_greedy_cost(sbi, segno); + return get_valid_blocks(sbi, segno, true); else return get_cb_cost(sbi, segno); } @@ -466,10 +456,10 @@ static int check_valid_map(struct f2fs_sb_info *sbi, struct seg_entry *sentry; int ret; - mutex_lock(&sit_i->sentry_lock); + down_read(&sit_i->sentry_lock); sentry = get_seg_entry(sbi, segno); ret = f2fs_test_bit(offset, sentry->cur_valid_map); - mutex_unlock(&sit_i->sentry_lock); + up_read(&sit_i->sentry_lock); return ret; } @@ -608,6 +598,7 @@ static void move_data_block(struct inode *inode, block_t bidx, { struct f2fs_io_info fio = { .sbi = F2FS_I_SB(inode), + .ino = inode->i_ino, .type = DATA, .temp = COLD, .op = REQ_OP_READ, @@ -659,8 +650,8 @@ static void move_data_block(struct inode *inode, block_t bidx, allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, &sum, CURSEG_COLD_DATA, NULL, false); - fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr, - FGP_LOCK | FGP_CREAT, GFP_NOFS); + fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), + newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); if (!fio.encrypted_page) { err = -ENOMEM; goto recover_block; @@ -738,6 +729,7 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type, } else { struct f2fs_io_info fio = { .sbi = F2FS_I_SB(inode), + .ino = inode->i_ino, .type = DATA, .temp = COLD, .op = REQ_OP_WRITE, @@ -840,10 +832,17 @@ next_step: continue; } + if (!down_write_trylock( + &F2FS_I(inode)->dio_rwsem[WRITE])) { + iput(inode); + continue; + } + start_bidx = start_bidx_of_node(nofs, inode); data_page = get_read_data_page(inode, start_bidx + ofs_in_node, REQ_RAHEAD, true); + up_write(&F2FS_I(inode)->dio_rwsem[WRITE]); if (IS_ERR(data_page)) { iput(inode); continue; @@ -901,10 +900,10 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, struct sit_info *sit_i = SIT_I(sbi); int ret; - mutex_lock(&sit_i->sentry_lock); + down_write(&sit_i->sentry_lock); ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, LFS); - mutex_unlock(&sit_i->sentry_lock); + up_write(&sit_i->sentry_lock); return ret; } @@ -952,8 +951,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, /* * this is to avoid deadlock: * - lock_page(sum_page) - f2fs_replace_block - * - check_valid_map() - mutex_lock(sentry_lock) - * - mutex_lock(sentry_lock) - change_curseg() + * - check_valid_map() - down_write(sentry_lock) + * - down_read(sentry_lock) - change_curseg() * - lock_page(sum_page) */ if (type == SUM_TYPE_NODE) diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index fbf22b0f667f..91d5d831be72 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -130,6 +130,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) { struct f2fs_io_info fio = { .sbi = F2FS_I_SB(dn->inode), + .ino = dn->inode->i_ino, .type = DATA, .op = REQ_OP_WRITE, .op_flags = REQ_SYNC | REQ_NOIDLE | REQ_PRIO, diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 50c88e37ed66..9684d53563f1 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -232,6 +232,23 @@ static int do_read_inode(struct inode *inode) fi->i_extra_isize = f2fs_has_extra_attr(inode) ? le16_to_cpu(ri->i_extra_isize) : 0; + if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) { + f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode)); + fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size); + } else if (f2fs_has_inline_xattr(inode) || + f2fs_has_inline_dentry(inode)) { + fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; + } else { + + /* + * Previous inline data or directory always reserved 200 bytes + * in inode layout, even if inline_xattr is disabled. In order + * to keep inline_dentry's structure for backward compatibility, + * we get the space back only from inline_data. + */ + fi->i_inline_xattr_size = 0; + } + /* check data exist */ if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode)) __recover_inline_status(inode, node_page); @@ -384,6 +401,10 @@ int update_inode(struct inode *inode, struct page *node_page) if (f2fs_has_extra_attr(inode)) { ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize); + if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)->sb)) + ri->i_inline_xattr_size = + cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size); + if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) && F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, i_projid)) { @@ -480,6 +501,7 @@ void f2fs_evict_inode(struct inode *inode) remove_ino_entry(sbi, inode->i_ino, APPEND_INO); remove_ino_entry(sbi, inode->i_ino, UPDATE_INO); + remove_ino_entry(sbi, inode->i_ino, FLUSH_INO); sb_start_intwrite(inode->i_sb); set_inode_flag(inode, FI_NO_ALLOC); @@ -519,8 +541,10 @@ no_delete: stat_dec_inline_dir(inode); stat_dec_inline_inode(inode); - if (!is_set_ckpt_flags(sbi, CP_ERROR_FLAG)) + if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG))) f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE)); + else + f2fs_inode_synced(inode); /* ino == 0, if f2fs_new_inode() was failed t*/ if (inode->i_ino) diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index d92b8e9064cb..cf8f4370d256 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -29,6 +29,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) nid_t ino; struct inode *inode; bool nid_free = false; + int xattr_size = 0; int err; inode = new_inode(dir->i_sb); @@ -86,11 +87,23 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) if (test_opt(sbi, INLINE_XATTR)) set_inode_flag(inode, FI_INLINE_XATTR); + if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode)) set_inode_flag(inode, FI_INLINE_DATA); if (f2fs_may_inline_dentry(inode)) set_inode_flag(inode, FI_INLINE_DENTRY); + if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) { + f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode)); + if (f2fs_has_inline_xattr(inode)) + xattr_size = sbi->inline_xattr_size; + /* Otherwise, will be 0 */ + } else if (f2fs_has_inline_xattr(inode) || + f2fs_has_inline_dentry(inode)) { + xattr_size = DEFAULT_INLINE_XATTR_ADDRS; + } + F2FS_I(inode)->i_inline_xattr_size = xattr_size; + f2fs_init_extent_tree(inode, NULL); stat_inc_inline_xattr(inode); @@ -177,6 +190,9 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, nid_t ino = 0; int err; + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + err = dquot_initialize(dir); if (err) return err; @@ -221,6 +237,9 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir, struct f2fs_sb_info *sbi = F2FS_I_SB(dir); int err; + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + if (f2fs_encrypted_inode(dir) && !fscrypt_has_permitted_context(dir, inode)) return -EPERM; @@ -331,12 +350,15 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, struct inode *inode = NULL; struct f2fs_dir_entry *de; struct page *page; - nid_t ino; + struct dentry *new; + nid_t ino = -1; int err = 0; unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir)); + trace_f2fs_lookup_start(dir, dentry, flags); + if (f2fs_encrypted_inode(dir)) { - int res = fscrypt_get_encryption_info(dir); + err = fscrypt_get_encryption_info(dir); /* * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is @@ -346,18 +368,22 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, if (fscrypt_has_encryption_key(dir)) fscrypt_set_encrypted_dentry(dentry); fscrypt_set_d_op(dentry); - if (res && res != -ENOKEY) - return ERR_PTR(res); + if (err && err != -ENOKEY) + goto out; } - if (dentry->d_name.len > F2FS_NAME_LEN) - return ERR_PTR(-ENAMETOOLONG); + if (dentry->d_name.len > F2FS_NAME_LEN) { + err = -ENAMETOOLONG; + goto out; + } de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) { - if (IS_ERR(page)) - return (struct dentry *)page; - return d_splice_alias(inode, dentry); + if (IS_ERR(page)) { + err = PTR_ERR(page); + goto out; + } + goto out_splice; } ino = le32_to_cpu(de->ino); @@ -365,19 +391,21 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); - if (IS_ERR(inode)) - return ERR_CAST(inode); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto out; + } if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) { err = __recover_dot_dentries(dir, root_ino); if (err) - goto err_out; + goto out_iput; } if (f2fs_has_inline_dots(inode)) { err = __recover_dot_dentries(inode, dir->i_ino); if (err) - goto err_out; + goto out_iput; } if (f2fs_encrypted_inode(dir) && (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && @@ -386,12 +414,18 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, "Inconsistent encryption contexts: %lu/%lu", dir->i_ino, inode->i_ino); err = -EPERM; - goto err_out; + goto out_iput; } - return d_splice_alias(inode, dentry); - -err_out: +out_splice: + new = d_splice_alias(inode, dentry); + if (IS_ERR(new)) + err = PTR_ERR(new); + trace_f2fs_lookup_end(dir, dentry, ino, err); + return new; +out_iput: iput(inode); +out: + trace_f2fs_lookup_end(dir, dentry, ino, err); return ERR_PTR(err); } @@ -405,9 +439,15 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) trace_f2fs_unlink_enter(dir, dentry); + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + err = dquot_initialize(dir); if (err) return err; + err = dquot_initialize(inode); + if (err) + return err; de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) { @@ -457,6 +497,9 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, struct fscrypt_symlink_data *sd = NULL; int err; + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + if (f2fs_encrypted_inode(dir)) { err = fscrypt_get_encryption_info(dir); if (err) @@ -563,6 +606,9 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) struct inode *inode; int err; + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + err = dquot_initialize(dir); if (err) return err; @@ -615,6 +661,9 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry, struct inode *inode; int err = 0; + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + err = dquot_initialize(dir); if (err) return err; @@ -709,6 +758,9 @@ out: static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) { + if (unlikely(f2fs_cp_error(F2FS_I_SB(dir)))) + return -EIO; + if (f2fs_encrypted_inode(dir)) { int err = fscrypt_get_encryption_info(dir); if (err) @@ -720,6 +772,9 @@ static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) static int f2fs_create_whiteout(struct inode *dir, struct inode **whiteout) { + if (unlikely(f2fs_cp_error(F2FS_I_SB(dir)))) + return -EIO; + return __f2fs_tmpfile(dir, NULL, S_IFCHR | WHITEOUT_MODE, whiteout); } @@ -739,6 +794,9 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, bool is_old_inline = f2fs_has_inline_dentry(old_dir); int err = -ENOENT; + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + if ((f2fs_encrypted_inode(old_dir) && !fscrypt_has_encryption_key(old_dir)) || (f2fs_encrypted_inode(new_dir) && @@ -764,6 +822,12 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, if (err) goto out; + if (new_inode) { + err = dquot_initialize(new_inode); + if (err) + goto out; + } + old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) { if (IS_ERR(old_page)) @@ -932,6 +996,9 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, int old_nlink = 0, new_nlink = 0; int err = -ENOENT; + if (unlikely(f2fs_cp_error(sbi))) + return -EIO; + if ((f2fs_encrypted_inode(old_dir) && !fscrypt_has_encryption_key(old_dir)) || (f2fs_encrypted_inode(new_dir) && diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 32474db18ad9..964c99655942 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -46,7 +46,7 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type) * give 25%, 25%, 50%, 50%, 50% memory for each components respectively */ if (type == FREE_NIDS) { - mem_size = (nm_i->nid_cnt[FREE_NID_LIST] * + mem_size = (nm_i->nid_cnt[FREE_NID] * sizeof(struct free_nid)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); } else if (type == NAT_ENTRIES) { @@ -63,7 +63,7 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type) } else if (type == INO_ENTRIES) { int i; - for (i = 0; i <= UPDATE_INO; i++) + for (i = 0; i < MAX_INO_ENTRY; i++) mem_size += sbi->im[i].ino_num * sizeof(struct ino_entry); mem_size >>= PAGE_SHIFT; @@ -74,6 +74,10 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type) atomic_read(&sbi->total_ext_node) * sizeof(struct extent_node)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); + } else if (type == INMEM_PAGES) { + /* it allows 20% / total_ram for inmemory pages */ + mem_size = get_pages(sbi, F2FS_INMEM_PAGES); + res = mem_size < (val.totalram / 5); } else { if (!sbi->sb->s_bdi->wb.dirty_exceeded) return true; @@ -134,6 +138,44 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) return dst_page; } +static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail) +{ + struct nat_entry *new; + + if (no_fail) + new = f2fs_kmem_cache_alloc(nat_entry_slab, + GFP_NOFS | __GFP_ZERO); + else + new = kmem_cache_alloc(nat_entry_slab, + GFP_NOFS | __GFP_ZERO); + if (new) { + nat_set_nid(new, nid); + nat_reset_flag(new); + } + return new; +} + +static void __free_nat_entry(struct nat_entry *e) +{ + kmem_cache_free(nat_entry_slab, e); +} + +/* must be locked by nat_tree_lock */ +static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, + struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail) +{ + if (no_fail) + f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne); + else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne)) + return NULL; + + if (raw_ne) + node_info_from_raw_nat(&ne->ni, raw_ne); + list_add_tail(&ne->list, &nm_i->nat_entries); + nm_i->nat_cnt++; + return ne; +} + static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) { return radix_tree_lookup(&nm_i->nat_root, n); @@ -150,7 +192,7 @@ static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) list_del(&e->list); radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); nm_i->nat_cnt--; - kmem_cache_free(nat_entry_slab, e); + __free_nat_entry(e); } static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, @@ -246,49 +288,29 @@ bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) return need_update; } -static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, - bool no_fail) -{ - struct nat_entry *new; - - if (no_fail) { - new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS); - f2fs_radix_tree_insert(&nm_i->nat_root, nid, new); - } else { - new = kmem_cache_alloc(nat_entry_slab, GFP_NOFS); - if (!new) - return NULL; - if (radix_tree_insert(&nm_i->nat_root, nid, new)) { - kmem_cache_free(nat_entry_slab, new); - return NULL; - } - } - - memset(new, 0, sizeof(struct nat_entry)); - nat_set_nid(new, nid); - nat_reset_flag(new); - list_add_tail(&new->list, &nm_i->nat_entries); - nm_i->nat_cnt++; - return new; -} - +/* must be locked by nat_tree_lock */ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, struct f2fs_nat_entry *ne) { struct f2fs_nm_info *nm_i = NM_I(sbi); - struct nat_entry *e; + struct nat_entry *new, *e; + new = __alloc_nat_entry(nid, false); + if (!new) + return; + + down_write(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, nid); - if (!e) { - e = grab_nat_entry(nm_i, nid, false); - if (e) - node_info_from_raw_nat(&e->ni, ne); - } else { + if (!e) + e = __init_nat_entry(nm_i, new, ne, false); + else f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || nat_get_blkaddr(e) != le32_to_cpu(ne->block_addr) || nat_get_version(e) != ne->version); - } + up_write(&nm_i->nat_tree_lock); + if (e != new) + __free_nat_entry(new); } static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, @@ -296,11 +318,12 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, { struct f2fs_nm_info *nm_i = NM_I(sbi); struct nat_entry *e; + struct nat_entry *new = __alloc_nat_entry(ni->nid, true); down_write(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, ni->nid); if (!e) { - e = grab_nat_entry(nm_i, ni->nid, true); + e = __init_nat_entry(nm_i, new, NULL, true); copy_node_info(&e->ni, ni); f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); } else if (new_blkaddr == NEW_ADDR) { @@ -312,6 +335,9 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, copy_node_info(&e->ni, ni); f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); } + /* let's free early to reduce memory consumption */ + if (e != new) + __free_nat_entry(new); /* sanity check */ f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); @@ -327,10 +353,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { unsigned char version = nat_get_version(e); nat_set_version(e, inc_node_version(version)); - - /* in order to reuse the nid */ - if (nm_i->next_scan_nid > ni->nid) - nm_i->next_scan_nid = ni->nid; } /* change address */ @@ -424,9 +446,7 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) f2fs_put_page(page, 1); cache: /* cache nat entry */ - down_write(&nm_i->nat_tree_lock); cache_nat_entry(sbi, nid, &ne); - up_write(&nm_i->nat_tree_lock); } /* @@ -962,7 +982,8 @@ fail: return err > 0 ? 0 : err; } -int truncate_xattr_node(struct inode *inode, struct page *page) +/* caller must lock inode page */ +int truncate_xattr_node(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t nid = F2FS_I(inode)->i_xattr_nid; @@ -978,10 +999,7 @@ int truncate_xattr_node(struct inode *inode, struct page *page) f2fs_i_xnid_write(inode, 0); - set_new_dnode(&dn, inode, page, npage, nid); - - if (page) - dn.inode_page_locked = true; + set_new_dnode(&dn, inode, NULL, npage, nid); truncate_node(&dn); return 0; } @@ -1000,7 +1018,7 @@ int remove_inode_page(struct inode *inode) if (err) return err; - err = truncate_xattr_node(inode, dn.inode_page); + err = truncate_xattr_node(inode); if (err) { f2fs_put_dnode(&dn); return err; @@ -1220,7 +1238,8 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) if (!inode) return; - page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0); + page = f2fs_pagecache_get_page(inode->i_mapping, 0, + FGP_LOCK|FGP_NOWAIT, 0); if (!page) goto iput_out; @@ -1244,37 +1263,6 @@ iput_out: iput(inode); } -void move_node_page(struct page *node_page, int gc_type) -{ - if (gc_type == FG_GC) { - struct f2fs_sb_info *sbi = F2FS_P_SB(node_page); - struct writeback_control wbc = { - .sync_mode = WB_SYNC_ALL, - .nr_to_write = 1, - .for_reclaim = 0, - }; - - set_page_dirty(node_page); - f2fs_wait_on_page_writeback(node_page, NODE, true); - - f2fs_bug_on(sbi, PageWriteback(node_page)); - if (!clear_page_dirty_for_io(node_page)) - goto out_page; - - if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc)) - unlock_page(node_page); - goto release_page; - } else { - /* set page dirty and write it */ - if (!PageWriteback(node_page)) - set_page_dirty(node_page); - } -out_page: - unlock_page(node_page); -release_page: - f2fs_put_page(node_page, 0); -} - static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) { pgoff_t index, end; @@ -1344,6 +1332,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, struct node_info ni; struct f2fs_io_info fio = { .sbi = sbi, + .ino = ino_of_node(page), .type = NODE, .op = REQ_OP_WRITE, .op_flags = wbc_to_write_flags(wbc), @@ -1416,6 +1405,37 @@ redirty_out: return AOP_WRITEPAGE_ACTIVATE; } +void move_node_page(struct page *node_page, int gc_type) +{ + if (gc_type == FG_GC) { + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .nr_to_write = 1, + .for_reclaim = 0, + }; + + set_page_dirty(node_page); + f2fs_wait_on_page_writeback(node_page, NODE, true); + + f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page)); + if (!clear_page_dirty_for_io(node_page)) + goto out_page; + + if (__write_node_page(node_page, false, NULL, + &wbc, false, FS_GC_NODE_IO)) + unlock_page(node_page); + goto release_page; + } else { + /* set page dirty and write it */ + if (!PageWriteback(node_page)) + set_page_dirty(node_page); + } +out_page: + unlock_page(node_page); +release_page: + f2fs_put_page(node_page, 0); +} + static int f2fs_write_node_page(struct page *page, struct writeback_control *wbc) { @@ -1764,35 +1784,54 @@ static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, return radix_tree_lookup(&nm_i->free_nid_root, n); } -static int __insert_nid_to_list(struct f2fs_sb_info *sbi, - struct free_nid *i, enum nid_list list, bool new) +static int __insert_free_nid(struct f2fs_sb_info *sbi, + struct free_nid *i, enum nid_state state) { struct f2fs_nm_info *nm_i = NM_I(sbi); - if (new) { - int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); - if (err) - return err; - } + int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); + if (err) + return err; - f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW : - i->state != NID_ALLOC); - nm_i->nid_cnt[list]++; - list_add_tail(&i->list, &nm_i->nid_list[list]); + f2fs_bug_on(sbi, state != i->state); + nm_i->nid_cnt[state]++; + if (state == FREE_NID) + list_add_tail(&i->list, &nm_i->free_nid_list); return 0; } -static void __remove_nid_from_list(struct f2fs_sb_info *sbi, - struct free_nid *i, enum nid_list list, bool reuse) +static void __remove_free_nid(struct f2fs_sb_info *sbi, + struct free_nid *i, enum nid_state state) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + + f2fs_bug_on(sbi, state != i->state); + nm_i->nid_cnt[state]--; + if (state == FREE_NID) + list_del(&i->list); + radix_tree_delete(&nm_i->free_nid_root, i->nid); +} + +static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i, + enum nid_state org_state, enum nid_state dst_state) { struct f2fs_nm_info *nm_i = NM_I(sbi); - f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW : - i->state != NID_ALLOC); - nm_i->nid_cnt[list]--; - list_del(&i->list); - if (!reuse) - radix_tree_delete(&nm_i->free_nid_root, i->nid); + f2fs_bug_on(sbi, org_state != i->state); + i->state = dst_state; + nm_i->nid_cnt[org_state]--; + nm_i->nid_cnt[dst_state]++; + + switch (dst_state) { + case PREALLOC_NID: + list_del(&i->list); + break; + case FREE_NID: + list_add_tail(&i->list, &nm_i->free_nid_list); + break; + default: + BUG_ON(1); + } } /* return if the nid is recognized as free */ @@ -1810,7 +1849,7 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); i->nid = nid; - i->state = NID_NEW; + i->state = FREE_NID; if (radix_tree_preload(GFP_NOFS)) goto err; @@ -1823,7 +1862,7 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) * - f2fs_create * - f2fs_new_inode * - alloc_nid - * - __insert_nid_to_list(ALLOC_NID_LIST) + * - __insert_nid_to_list(PREALLOC_NID) * - f2fs_balance_fs_bg * - build_free_nids * - __build_free_nids @@ -1836,8 +1875,8 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) * - new_node_page * - set_node_addr * - alloc_nid_done - * - __remove_nid_from_list(ALLOC_NID_LIST) - * - __insert_nid_to_list(FREE_NID_LIST) + * - __remove_nid_from_list(PREALLOC_NID) + * - __insert_nid_to_list(FREE_NID) */ ne = __lookup_nat_cache(nm_i, nid); if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || @@ -1846,13 +1885,13 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) e = __lookup_free_nid_list(nm_i, nid); if (e) { - if (e->state == NID_NEW) + if (e->state == FREE_NID) ret = true; goto err_out; } } ret = true; - err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true); + err = __insert_free_nid(sbi, i, FREE_NID); err_out: spin_unlock(&nm_i->nid_list_lock); radix_tree_preload_end(); @@ -1870,8 +1909,8 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) spin_lock(&nm_i->nid_list_lock); i = __lookup_free_nid_list(nm_i, nid); - if (i && i->state == NID_NEW) { - __remove_nid_from_list(sbi, i, FREE_NID_LIST, false); + if (i && i->state == FREE_NID) { + __remove_free_nid(sbi, i, FREE_NID); need_free = true; } spin_unlock(&nm_i->nid_list_lock); @@ -1890,15 +1929,18 @@ static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) return; - if (set) + if (set) { + if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) + return; __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); - else - __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); - - if (set) nm_i->free_nid_count[nat_ofs]++; - else if (!build) - nm_i->free_nid_count[nat_ofs]--; + } else { + if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) + return; + __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); + if (!build) + nm_i->free_nid_count[nat_ofs]--; + } } static void scan_nat_page(struct f2fs_sb_info *sbi, @@ -1933,12 +1975,32 @@ static void scan_nat_page(struct f2fs_sb_info *sbi, } } -static void scan_free_nid_bits(struct f2fs_sb_info *sbi) +static void scan_curseg_cache(struct f2fs_sb_info *sbi) { - struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; + int i; + + down_read(&curseg->journal_rwsem); + for (i = 0; i < nats_in_cursum(journal); i++) { + block_t addr; + nid_t nid; + + addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); + nid = le32_to_cpu(nid_in_journal(journal, i)); + if (addr == NULL_ADDR) + add_free_nid(sbi, nid, true); + else + remove_free_nid(sbi, nid); + } + up_read(&curseg->journal_rwsem); +} + +static void scan_free_nid_bits(struct f2fs_sb_info *sbi) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); unsigned int i, idx; + nid_t nid; down_read(&nm_i->nat_tree_lock); @@ -1948,40 +2010,27 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi) if (!nm_i->free_nid_count[i]) continue; for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { - nid_t nid; - - if (!test_bit_le(idx, nm_i->free_nid_bitmap[i])) - continue; + idx = find_next_bit_le(nm_i->free_nid_bitmap[i], + NAT_ENTRY_PER_BLOCK, idx); + if (idx >= NAT_ENTRY_PER_BLOCK) + break; nid = i * NAT_ENTRY_PER_BLOCK + idx; add_free_nid(sbi, nid, true); - if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS) + if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS) goto out; } } out: - down_read(&curseg->journal_rwsem); - for (i = 0; i < nats_in_cursum(journal); i++) { - block_t addr; - nid_t nid; + scan_curseg_cache(sbi); - addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); - nid = le32_to_cpu(nid_in_journal(journal, i)); - if (addr == NULL_ADDR) - add_free_nid(sbi, nid, true); - else - remove_free_nid(sbi, nid); - } - up_read(&curseg->journal_rwsem); up_read(&nm_i->nat_tree_lock); } static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) { struct f2fs_nm_info *nm_i = NM_I(sbi); - struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); - struct f2fs_journal *journal = curseg->journal; int i = 0; nid_t nid = nm_i->next_scan_nid; @@ -1989,7 +2038,7 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) nid = 0; /* Enough entries */ - if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK) + if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) return; if (!sync && !available_free_memory(sbi, FREE_NIDS)) @@ -1999,7 +2048,7 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) /* try to find free nids in free_nid_bitmap */ scan_free_nid_bits(sbi); - if (nm_i->nid_cnt[FREE_NID_LIST]) + if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) return; } @@ -2027,18 +2076,8 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) nm_i->next_scan_nid = nid; /* find free nids from current sum_pages */ - down_read(&curseg->journal_rwsem); - for (i = 0; i < nats_in_cursum(journal); i++) { - block_t addr; + scan_curseg_cache(sbi); - addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); - nid = le32_to_cpu(nid_in_journal(journal, i)); - if (addr == NULL_ADDR) - add_free_nid(sbi, nid, true); - else - remove_free_nid(sbi, nid); - } - up_read(&curseg->journal_rwsem); up_read(&nm_i->nat_tree_lock); ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), @@ -2076,15 +2115,13 @@ retry: } /* We should not use stale free nids created by build_free_nids */ - if (nm_i->nid_cnt[FREE_NID_LIST] && !on_build_free_nids(nm_i)) { - f2fs_bug_on(sbi, list_empty(&nm_i->nid_list[FREE_NID_LIST])); - i = list_first_entry(&nm_i->nid_list[FREE_NID_LIST], + if (nm_i->nid_cnt[FREE_NID] && !on_build_free_nids(nm_i)) { + f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); + i = list_first_entry(&nm_i->free_nid_list, struct free_nid, list); *nid = i->nid; - __remove_nid_from_list(sbi, i, FREE_NID_LIST, true); - i->state = NID_ALLOC; - __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false); + __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID); nm_i->available_nids--; update_free_nid_bitmap(sbi, *nid, false, false); @@ -2110,7 +2147,7 @@ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) spin_lock(&nm_i->nid_list_lock); i = __lookup_free_nid_list(nm_i, nid); f2fs_bug_on(sbi, !i); - __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false); + __remove_free_nid(sbi, i, PREALLOC_NID); spin_unlock(&nm_i->nid_list_lock); kmem_cache_free(free_nid_slab, i); @@ -2133,12 +2170,10 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) f2fs_bug_on(sbi, !i); if (!available_free_memory(sbi, FREE_NIDS)) { - __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false); + __remove_free_nid(sbi, i, PREALLOC_NID); need_free = true; } else { - __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, true); - i->state = NID_NEW; - __insert_nid_to_list(sbi, i, FREE_NID_LIST, false); + __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID); } nm_i->available_nids++; @@ -2157,20 +2192,19 @@ int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) struct free_nid *i, *next; int nr = nr_shrink; - if (nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS) + if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) return 0; if (!mutex_trylock(&nm_i->build_lock)) return 0; spin_lock(&nm_i->nid_list_lock); - list_for_each_entry_safe(i, next, &nm_i->nid_list[FREE_NID_LIST], - list) { + list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { if (nr_shrink <= 0 || - nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS) + nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) break; - __remove_nid_from_list(sbi, i, FREE_NID_LIST, false); + __remove_free_nid(sbi, i, FREE_NID); kmem_cache_free(free_nid_slab, i); nr_shrink--; } @@ -2196,8 +2230,8 @@ void recover_inline_xattr(struct inode *inode, struct page *page) goto update_inode; } - dst_addr = inline_xattr_addr(ipage); - src_addr = inline_xattr_addr(page); + dst_addr = inline_xattr_addr(inode, ipage); + src_addr = inline_xattr_addr(inode, page); inline_size = inline_xattr_size(inode); f2fs_wait_on_page_writeback(ipage, NODE, true); @@ -2286,6 +2320,12 @@ retry: dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR); if (dst->i_inline & F2FS_EXTRA_ATTR) { dst->i_extra_isize = src->i_extra_isize; + + if (f2fs_sb_has_flexible_inline_xattr(sbi->sb) && + F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), + i_inline_xattr_size)) + dst->i_inline_xattr_size = src->i_inline_xattr_size; + if (f2fs_sb_has_project_quota(sbi->sb) && F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), i_projid)) @@ -2357,8 +2397,8 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi) ne = __lookup_nat_cache(nm_i, nid); if (!ne) { - ne = grab_nat_entry(nm_i, nid, true); - node_info_from_raw_nat(&ne->ni, &raw_ne); + ne = __alloc_nat_entry(nid, true); + __init_nat_entry(nm_i, ne, &raw_ne, true); } /* @@ -2404,15 +2444,17 @@ static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK; struct f2fs_nat_block *nat_blk = page_address(page); int valid = 0; - int i; + int i = 0; if (!enabled_nat_bits(sbi, NULL)) return; - for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) { - if (start_nid == 0 && i == 0) - valid++; - if (nat_blk->entries[i].block_addr) + if (nat_index == 0) { + valid = 1; + i = 1; + } + for (; i < NAT_ENTRY_PER_BLOCK; i++) { + if (nat_blk->entries[i].block_addr != NULL_ADDR) valid++; } if (valid == 0) { @@ -2607,7 +2649,7 @@ static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) __set_bit_le(i, nm_i->nat_block_bitmap); nid = i * NAT_ENTRY_PER_BLOCK; - last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK; + last_nid = nid + NAT_ENTRY_PER_BLOCK; spin_lock(&NM_I(sbi)->nid_list_lock); for (; nid < last_nid; nid++) @@ -2642,16 +2684,15 @@ static int init_node_manager(struct f2fs_sb_info *sbi) /* not used nids: 0, node, meta, (and root counted as valid node) */ nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - F2FS_RESERVED_NODE_NUM; - nm_i->nid_cnt[FREE_NID_LIST] = 0; - nm_i->nid_cnt[ALLOC_NID_LIST] = 0; + nm_i->nid_cnt[FREE_NID] = 0; + nm_i->nid_cnt[PREALLOC_NID] = 0; nm_i->nat_cnt = 0; nm_i->ram_thresh = DEF_RAM_THRESHOLD; nm_i->ra_nid_pages = DEF_RA_NID_PAGES; nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); - INIT_LIST_HEAD(&nm_i->nid_list[FREE_NID_LIST]); - INIT_LIST_HEAD(&nm_i->nid_list[ALLOC_NID_LIST]); + INIT_LIST_HEAD(&nm_i->free_nid_list); INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); INIT_LIST_HEAD(&nm_i->nat_entries); @@ -2743,16 +2784,15 @@ void destroy_node_manager(struct f2fs_sb_info *sbi) /* destroy free nid list */ spin_lock(&nm_i->nid_list_lock); - list_for_each_entry_safe(i, next_i, &nm_i->nid_list[FREE_NID_LIST], - list) { - __remove_nid_from_list(sbi, i, FREE_NID_LIST, false); + list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { + __remove_free_nid(sbi, i, FREE_NID); spin_unlock(&nm_i->nid_list_lock); kmem_cache_free(free_nid_slab, i); spin_lock(&nm_i->nid_list_lock); } - f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID_LIST]); - f2fs_bug_on(sbi, nm_i->nid_cnt[ALLOC_NID_LIST]); - f2fs_bug_on(sbi, !list_empty(&nm_i->nid_list[ALLOC_NID_LIST])); + f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]); + f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]); + f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list)); spin_unlock(&nm_i->nid_list_lock); /* destroy nat cache */ diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index bb53e9955ff2..0ee3e5ff49a3 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -140,6 +140,7 @@ enum mem_type { DIRTY_DENTS, /* indicates dirty dentry pages */ INO_ENTRIES, /* indicates inode entries */ EXTENT_CACHE, /* indicates extent cache */ + INMEM_PAGES, /* indicates inmemory pages */ BASE_CHECK, /* check kernel status */ }; @@ -150,18 +151,10 @@ struct nat_entry_set { unsigned int entry_cnt; /* the # of nat entries in set */ }; -/* - * For free nid mangement - */ -enum nid_state { - NID_NEW, /* newly added to free nid list */ - NID_ALLOC /* it is allocated */ -}; - struct free_nid { struct list_head list; /* for free node id list */ nid_t nid; /* node id */ - int state; /* in use or not: NID_NEW or NID_ALLOC */ + int state; /* in use or not: FREE_NID or PREALLOC_NID */ }; static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid) @@ -170,12 +163,11 @@ static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid) struct free_nid *fnid; spin_lock(&nm_i->nid_list_lock); - if (nm_i->nid_cnt[FREE_NID_LIST] <= 0) { + if (nm_i->nid_cnt[FREE_NID] <= 0) { spin_unlock(&nm_i->nid_list_lock); return; } - fnid = list_first_entry(&nm_i->nid_list[FREE_NID_LIST], - struct free_nid, list); + fnid = list_first_entry(&nm_i->free_nid_list, struct free_nid, list); *nid = fnid->nid; spin_unlock(&nm_i->nid_list_lock); } diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 9626758bc762..92c57ace1939 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -594,6 +594,9 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) int ret = 0; unsigned long s_flags = sbi->sb->s_flags; bool need_writecp = false; +#ifdef CONFIG_QUOTA + int quota_enabled; +#endif if (s_flags & MS_RDONLY) { f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs"); @@ -604,7 +607,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) /* Needed for iput() to work correctly and not trash data */ sbi->sb->s_flags |= MS_ACTIVE; /* Turn on quotas so that they are updated correctly */ - f2fs_enable_quota_files(sbi); + quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY); #endif fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", @@ -665,7 +668,8 @@ skip: out: #ifdef CONFIG_QUOTA /* Turn quotas off */ - f2fs_quota_off_umount(sbi->sb); + if (quota_enabled) + f2fs_quota_off_umount(sbi->sb); #endif sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */ diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index f5c494389483..94939a5a96c8 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -181,11 +181,12 @@ bool need_SSR(struct f2fs_sb_info *sbi) return true; return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs + - 2 * reserved_sections(sbi)); + SM_I(sbi)->min_ssr_sections + reserved_sections(sbi)); } void register_inmem_page(struct inode *inode, struct page *page) { + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); struct inmem_pages *new; @@ -204,6 +205,10 @@ void register_inmem_page(struct inode *inode, struct page *page) mutex_lock(&fi->inmem_lock); get_page(page); list_add_tail(&new->list, &fi->inmem_pages); + spin_lock(&sbi->inode_lock[ATOMIC_FILE]); + if (list_empty(&fi->inmem_ilist)) + list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]); + spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); mutex_unlock(&fi->inmem_lock); @@ -262,12 +267,41 @@ next: return err; } +void drop_inmem_pages_all(struct f2fs_sb_info *sbi) +{ + struct list_head *head = &sbi->inode_list[ATOMIC_FILE]; + struct inode *inode; + struct f2fs_inode_info *fi; +next: + spin_lock(&sbi->inode_lock[ATOMIC_FILE]); + if (list_empty(head)) { + spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); + return; + } + fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist); + inode = igrab(&fi->vfs_inode); + spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); + + if (inode) { + drop_inmem_pages(inode); + iput(inode); + } + congestion_wait(BLK_RW_ASYNC, HZ/50); + cond_resched(); + goto next; +} + void drop_inmem_pages(struct inode *inode) { + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); mutex_lock(&fi->inmem_lock); __revoke_inmem_pages(inode, &fi->inmem_pages, true, false); + spin_lock(&sbi->inode_lock[ATOMIC_FILE]); + if (!list_empty(&fi->inmem_ilist)) + list_del_init(&fi->inmem_ilist); + spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); mutex_unlock(&fi->inmem_lock); clear_inode_flag(inode, FI_ATOMIC_FILE); @@ -313,6 +347,7 @@ static int __commit_inmem_pages(struct inode *inode, struct inmem_pages *cur, *tmp; struct f2fs_io_info fio = { .sbi = sbi, + .ino = inode->i_ino, .type = DATA, .op = REQ_OP_WRITE, .op_flags = REQ_SYNC | REQ_PRIO, @@ -398,6 +433,10 @@ int commit_inmem_pages(struct inode *inode) /* drop all uncommitted pages */ __revoke_inmem_pages(inode, &fi->inmem_pages, true, false); } + spin_lock(&sbi->inode_lock[ATOMIC_FILE]); + if (!list_empty(&fi->inmem_ilist)) + list_del_init(&fi->inmem_ilist); + spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); mutex_unlock(&fi->inmem_lock); clear_inode_flag(inode, FI_ATOMIC_COMMIT); @@ -472,7 +511,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) static int __submit_flush_wait(struct f2fs_sb_info *sbi, struct block_device *bdev) { - struct bio *bio = f2fs_bio_alloc(0); + struct bio *bio = f2fs_bio_alloc(sbi, 0, true); int ret; bio->bi_rw = REQ_OP_WRITE; @@ -485,15 +524,17 @@ static int __submit_flush_wait(struct f2fs_sb_info *sbi, return ret; } -static int submit_flush_wait(struct f2fs_sb_info *sbi) +static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino) { - int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev); + int ret = 0; int i; - if (!sbi->s_ndevs || ret) - return ret; + if (!sbi->s_ndevs) + return __submit_flush_wait(sbi, sbi->sb->s_bdev); - for (i = 1; i < sbi->s_ndevs; i++) { + for (i = 0; i < sbi->s_ndevs; i++) { + if (!is_dirty_device(sbi, ino, i, FLUSH_INO)) + continue; ret = __submit_flush_wait(sbi, FDEV(i).bdev); if (ret) break; @@ -519,7 +560,9 @@ repeat: fcc->dispatch_list = llist_del_all(&fcc->issue_list); fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); - ret = submit_flush_wait(sbi); + cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode); + + ret = submit_flush_wait(sbi, cmd->ino); atomic_inc(&fcc->issued_flush); llist_for_each_entry_safe(cmd, next, @@ -537,7 +580,7 @@ repeat: goto repeat; } -int f2fs_issue_flush(struct f2fs_sb_info *sbi) +int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino) { struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; struct flush_cmd cmd; @@ -547,19 +590,20 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi) return 0; if (!test_opt(sbi, FLUSH_MERGE)) { - ret = submit_flush_wait(sbi); + ret = submit_flush_wait(sbi, ino); atomic_inc(&fcc->issued_flush); return ret; } - if (atomic_inc_return(&fcc->issing_flush) == 1) { - ret = submit_flush_wait(sbi); + if (atomic_inc_return(&fcc->issing_flush) == 1 || sbi->s_ndevs > 1) { + ret = submit_flush_wait(sbi, ino); atomic_dec(&fcc->issing_flush); atomic_inc(&fcc->issued_flush); return ret; } + cmd.ino = ino; init_completion(&cmd.wait); llist_add(&cmd.llnode, &fcc->issue_list); @@ -583,7 +627,7 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi) } else { struct flush_cmd *tmp, *next; - ret = submit_flush_wait(sbi); + ret = submit_flush_wait(sbi, ino); llist_for_each_entry_safe(tmp, next, list, llnode) { if (tmp == &cmd) { @@ -653,6 +697,28 @@ void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free) } } +int f2fs_flush_device_cache(struct f2fs_sb_info *sbi) +{ + int ret = 0, i; + + if (!sbi->s_ndevs) + return 0; + + for (i = 1; i < sbi->s_ndevs; i++) { + if (!f2fs_test_bit(i, (char *)&sbi->dirty_device)) + continue; + ret = __submit_flush_wait(sbi, FDEV(i).bdev); + if (ret) + break; + + spin_lock(&sbi->dev_lock); + f2fs_clear_bit(i, (char *)&sbi->dirty_device); + spin_unlock(&sbi->dev_lock); + } + + return ret; +} + static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, enum dirty_type dirty_type) { @@ -794,6 +860,8 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi, { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len); + f2fs_bug_on(sbi, dc->ref); if (dc->error == -EOPNOTSUPP) @@ -875,7 +943,7 @@ static int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, if (ret) return ret; } - bio = f2fs_bio_alloc(1); + bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, 1); bio->bi_iter.bi_sector = sector; bio->bi_bdev = bdev; bio_set_op_attrs(bio, op, 0); @@ -926,10 +994,14 @@ void __check_sit_bitmap(struct f2fs_sb_info *sbi, /* this function is copied from blkdev_issue_discard from block/blk-lib.c */ static void __submit_discard_cmd(struct f2fs_sb_info *sbi, - struct discard_cmd *dc) + struct discard_policy *dpolicy, + struct discard_cmd *dc) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ? + &(dcc->fstrim_list) : &(dcc->wait_list); struct bio *bio = NULL; + int flag = dpolicy->sync ? REQ_SYNC : 0; if (dc->state != D_PREP) return; @@ -948,8 +1020,8 @@ static void __submit_discard_cmd(struct f2fs_sb_info *sbi, if (bio) { bio->bi_private = dc; bio->bi_end_io = f2fs_submit_discard_endio; - submit_bio(REQ_SYNC, bio); - list_move_tail(&dc->list, &dcc->wait_list); + submit_bio(flag, bio); + list_move_tail(&dc->list, wait_list); __check_sit_bitmap(sbi, dc->start, dc->start + dc->len); f2fs_update_iostat(sbi, FS_DISCARD, 1); @@ -966,7 +1038,7 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi, struct rb_node *insert_parent) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; - struct rb_node **p = &dcc->root.rb_node; + struct rb_node **p; struct rb_node *parent = NULL; struct discard_cmd *dc = NULL; @@ -1134,58 +1206,107 @@ static int __queue_discard_cmd(struct f2fs_sb_info *sbi, return 0; } -static int __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond) +static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi, + struct discard_policy *dpolicy, + unsigned int start, unsigned int end) +{ + struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + struct discard_cmd *prev_dc = NULL, *next_dc = NULL; + struct rb_node **insert_p = NULL, *insert_parent = NULL; + struct discard_cmd *dc; + struct blk_plug plug; + int issued; + +next: + issued = 0; + + mutex_lock(&dcc->cmd_lock); + f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root)); + + dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root, + NULL, start, + (struct rb_entry **)&prev_dc, + (struct rb_entry **)&next_dc, + &insert_p, &insert_parent, true); + if (!dc) + dc = next_dc; + + blk_start_plug(&plug); + + while (dc && dc->lstart <= end) { + struct rb_node *node; + + if (dc->len < dpolicy->granularity) + goto skip; + + if (dc->state != D_PREP) { + list_move_tail(&dc->list, &dcc->fstrim_list); + goto skip; + } + + __submit_discard_cmd(sbi, dpolicy, dc); + + if (++issued >= dpolicy->max_requests) { + start = dc->lstart + dc->len; + + blk_finish_plug(&plug); + mutex_unlock(&dcc->cmd_lock); + + schedule(); + + goto next; + } +skip: + node = rb_next(&dc->rb_node); + dc = rb_entry_safe(node, struct discard_cmd, rb_node); + + if (fatal_signal_pending(current)) + break; + } + + blk_finish_plug(&plug); + mutex_unlock(&dcc->cmd_lock); +} + +static int __issue_discard_cmd(struct f2fs_sb_info *sbi, + struct discard_policy *dpolicy) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct list_head *pend_list; struct discard_cmd *dc, *tmp; struct blk_plug plug; - int iter = 0, issued = 0; - int i; + int i, iter = 0, issued = 0; bool io_interrupted = false; - mutex_lock(&dcc->cmd_lock); - f2fs_bug_on(sbi, - !__check_rb_tree_consistence(sbi, &dcc->root)); - blk_start_plug(&plug); - for (i = MAX_PLIST_NUM - 1; - i >= 0 && plist_issue(dcc->pend_list_tag[i]); i--) { + for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { + if (i + 1 < dpolicy->granularity) + break; pend_list = &dcc->pend_list[i]; + + mutex_lock(&dcc->cmd_lock); + f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root)); + blk_start_plug(&plug); list_for_each_entry_safe(dc, tmp, pend_list, list) { f2fs_bug_on(sbi, dc->state != D_PREP); - /* Hurry up to finish fstrim */ - if (dcc->pend_list_tag[i] & P_TRIM) { - __submit_discard_cmd(sbi, dc); - issued++; - - if (fatal_signal_pending(current)) - break; - continue; - } - - if (!issue_cond) { - __submit_discard_cmd(sbi, dc); - issued++; - continue; - } - - if (is_idle(sbi)) { - __submit_discard_cmd(sbi, dc); - issued++; - } else { + if (dpolicy->io_aware && i < dpolicy->io_aware_gran && + !is_idle(sbi)) { io_interrupted = true; + goto skip; } - if (++iter >= DISCARD_ISSUE_RATE) - goto out; + __submit_discard_cmd(sbi, dpolicy, dc); + issued++; +skip: + if (++iter >= dpolicy->max_requests) + break; } - if (list_empty(pend_list) && dcc->pend_list_tag[i] & P_TRIM) - dcc->pend_list_tag[i] &= (~P_TRIM); + blk_finish_plug(&plug); + mutex_unlock(&dcc->cmd_lock); + + if (iter >= dpolicy->max_requests) + break; } -out: - blk_finish_plug(&plug); - mutex_unlock(&dcc->cmd_lock); if (!issued && io_interrupted) issued = -1; @@ -1193,12 +1314,13 @@ out: return issued; } -static void __drop_discard_cmd(struct f2fs_sb_info *sbi) +static bool __drop_discard_cmd(struct f2fs_sb_info *sbi) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct list_head *pend_list; struct discard_cmd *dc, *tmp; int i; + bool dropped = false; mutex_lock(&dcc->cmd_lock); for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { @@ -1206,39 +1328,58 @@ static void __drop_discard_cmd(struct f2fs_sb_info *sbi) list_for_each_entry_safe(dc, tmp, pend_list, list) { f2fs_bug_on(sbi, dc->state != D_PREP); __remove_discard_cmd(sbi, dc); + dropped = true; } } mutex_unlock(&dcc->cmd_lock); + + return dropped; } -static void __wait_one_discard_bio(struct f2fs_sb_info *sbi, +static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi, struct discard_cmd *dc) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + unsigned int len = 0; wait_for_completion_io(&dc->wait); mutex_lock(&dcc->cmd_lock); f2fs_bug_on(sbi, dc->state != D_DONE); dc->ref--; - if (!dc->ref) + if (!dc->ref) { + if (!dc->error) + len = dc->len; __remove_discard_cmd(sbi, dc); + } mutex_unlock(&dcc->cmd_lock); + + return len; } -static void __wait_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond) +static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi, + struct discard_policy *dpolicy, + block_t start, block_t end) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; - struct list_head *wait_list = &(dcc->wait_list); + struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ? + &(dcc->fstrim_list) : &(dcc->wait_list); struct discard_cmd *dc, *tmp; bool need_wait; + unsigned int trimmed = 0; next: need_wait = false; mutex_lock(&dcc->cmd_lock); list_for_each_entry_safe(dc, tmp, wait_list, list) { - if (!wait_cond || (dc->state == D_DONE && !dc->ref)) { + if (dc->lstart + dc->len <= start || end <= dc->lstart) + continue; + if (dc->len < dpolicy->granularity) + continue; + if (dc->state == D_DONE && !dc->ref) { wait_for_completion_io(&dc->wait); + if (!dc->error) + trimmed += dc->len; __remove_discard_cmd(sbi, dc); } else { dc->ref++; @@ -1249,9 +1390,17 @@ next: mutex_unlock(&dcc->cmd_lock); if (need_wait) { - __wait_one_discard_bio(sbi, dc); + trimmed += __wait_one_discard_bio(sbi, dc); goto next; } + + return trimmed; +} + +static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi, + struct discard_policy *dpolicy) +{ + __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX); } /* This should be covered by global mutex, &sit_i->sentry_lock */ @@ -1289,23 +1438,19 @@ void stop_discard_thread(struct f2fs_sb_info *sbi) } } -/* This comes from f2fs_put_super and f2fs_trim_fs */ -void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount) -{ - __issue_discard_cmd(sbi, false); - __drop_discard_cmd(sbi); - __wait_discard_cmd(sbi, !umount); -} - -static void mark_discard_range_all(struct f2fs_sb_info *sbi) +/* This comes from f2fs_put_super */ +bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; - int i; + struct discard_policy dpolicy; + bool dropped; - mutex_lock(&dcc->cmd_lock); - for (i = 0; i < MAX_PLIST_NUM; i++) - dcc->pend_list_tag[i] |= P_TRIM; - mutex_unlock(&dcc->cmd_lock); + init_discard_policy(&dpolicy, DPOLICY_UMOUNT, dcc->discard_granularity); + __issue_discard_cmd(sbi, &dpolicy); + dropped = __drop_discard_cmd(sbi); + __wait_all_discard_cmd(sbi, &dpolicy); + + return dropped; } static int issue_discard_thread(void *data) @@ -1313,12 +1458,16 @@ static int issue_discard_thread(void *data) struct f2fs_sb_info *sbi = data; struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; wait_queue_head_t *q = &dcc->discard_wait_queue; + struct discard_policy dpolicy; unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME; int issued; set_freezable(); do { + init_discard_policy(&dpolicy, DPOLICY_BG, + dcc->discard_granularity); + wait_event_interruptible_timeout(*q, kthread_should_stop() || freezing(current) || dcc->discard_wake, @@ -1331,17 +1480,18 @@ static int issue_discard_thread(void *data) if (dcc->discard_wake) { dcc->discard_wake = 0; if (sbi->gc_thread && sbi->gc_thread->gc_urgent) - mark_discard_range_all(sbi); + init_discard_policy(&dpolicy, + DPOLICY_FORCE, 1); } sb_start_intwrite(sbi->sb); - issued = __issue_discard_cmd(sbi, true); + issued = __issue_discard_cmd(sbi, &dpolicy); if (issued) { - __wait_discard_cmd(sbi, true); - wait_ms = DEF_MIN_DISCARD_ISSUE_TIME; + __wait_all_discard_cmd(sbi, &dpolicy); + wait_ms = dpolicy.min_interval; } else { - wait_ms = DEF_MAX_DISCARD_ISSUE_TIME; + wait_ms = dpolicy.max_interval; } sb_end_intwrite(sbi->sb); @@ -1605,7 +1755,6 @@ find_next: f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos, len); - cpc->trimmed += len; total_len += len; } else { next_pos = find_next_bit_le(entry->discard_map, @@ -1626,6 +1775,37 @@ skip: wake_up_discard_thread(sbi, false); } +void init_discard_policy(struct discard_policy *dpolicy, + int discard_type, unsigned int granularity) +{ + /* common policy */ + dpolicy->type = discard_type; + dpolicy->sync = true; + dpolicy->granularity = granularity; + + if (discard_type == DPOLICY_BG) { + dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME; + dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME; + dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST; + dpolicy->io_aware_gran = MAX_PLIST_NUM; + dpolicy->io_aware = true; + } else if (discard_type == DPOLICY_FORCE) { + dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME; + dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME; + dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST; + dpolicy->io_aware_gran = MAX_PLIST_NUM; + dpolicy->io_aware = true; + } else if (discard_type == DPOLICY_FSTRIM) { + dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST; + dpolicy->io_aware_gran = MAX_PLIST_NUM; + dpolicy->io_aware = false; + } else if (discard_type == DPOLICY_UMOUNT) { + dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST; + dpolicy->io_aware_gran = MAX_PLIST_NUM; + dpolicy->io_aware = false; + } +} + static int create_discard_cmd_control(struct f2fs_sb_info *sbi) { dev_t dev = sbi->sb->s_bdev->bd_dev; @@ -1643,12 +1823,10 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi) dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY; INIT_LIST_HEAD(&dcc->entry_list); - for (i = 0; i < MAX_PLIST_NUM; i++) { + for (i = 0; i < MAX_PLIST_NUM; i++) INIT_LIST_HEAD(&dcc->pend_list[i]); - if (i >= dcc->discard_granularity - 1) - dcc->pend_list_tag[i] |= P_ACTIVE; - } INIT_LIST_HEAD(&dcc->wait_list); + INIT_LIST_HEAD(&dcc->fstrim_list); mutex_init(&dcc->cmd_lock); atomic_set(&dcc->issued_discard, 0); atomic_set(&dcc->issing_discard, 0); @@ -1796,16 +1974,6 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) get_sec_entry(sbi, segno)->valid_blocks += del; } -void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new) -{ - update_sit_entry(sbi, new, 1); - if (GET_SEGNO(sbi, old) != NULL_SEGNO) - update_sit_entry(sbi, old, -1); - - locate_dirty_segment(sbi, GET_SEGNO(sbi, old)); - locate_dirty_segment(sbi, GET_SEGNO(sbi, new)); -} - void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) { unsigned int segno = GET_SEGNO(sbi, addr); @@ -1816,14 +1984,14 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) return; /* add it into sit main buffer */ - mutex_lock(&sit_i->sentry_lock); + down_write(&sit_i->sentry_lock); update_sit_entry(sbi, addr, -1); /* add it into dirty seglist */ locate_dirty_segment(sbi, segno); - mutex_unlock(&sit_i->sentry_lock); + up_write(&sit_i->sentry_lock); } bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) @@ -1836,7 +2004,7 @@ bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) return true; - mutex_lock(&sit_i->sentry_lock); + down_read(&sit_i->sentry_lock); segno = GET_SEGNO(sbi, blkaddr); se = get_seg_entry(sbi, segno); @@ -1845,7 +2013,7 @@ bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) if (f2fs_test_bit(offset, se->ckpt_valid_map)) is_cp = true; - mutex_unlock(&sit_i->sentry_lock); + up_read(&sit_i->sentry_lock); return is_cp; } @@ -1903,12 +2071,8 @@ struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr) { struct page *page = grab_meta_page(sbi, blk_addr); - void *dst = page_address(page); - if (src) - memcpy(dst, src, PAGE_SIZE); - else - memset(dst, 0, PAGE_SIZE); + memcpy(page_address(page), src, PAGE_SIZE); set_page_dirty(page); f2fs_put_page(page, 1); } @@ -2007,7 +2171,6 @@ find_other_zone: } secno = left_start; skip_left: - hint = secno; segno = GET_SEG_FROM_SEC(sbi, secno); zoneno = GET_ZONE_FROM_SEC(sbi, secno); @@ -2242,12 +2405,16 @@ void allocate_new_segments(struct f2fs_sb_info *sbi) unsigned int old_segno; int i; + down_write(&SIT_I(sbi)->sentry_lock); + for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { curseg = CURSEG_I(sbi, i); old_segno = curseg->segno; SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); locate_dirty_segment(sbi, old_segno); } + + up_write(&SIT_I(sbi)->sentry_lock); } static const struct segment_allocation default_salloc_ops = { @@ -2259,14 +2426,14 @@ bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc) __u64 trim_start = cpc->trim_start; bool has_candidate = false; - mutex_lock(&SIT_I(sbi)->sentry_lock); + down_write(&SIT_I(sbi)->sentry_lock); for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) { if (add_discard_addrs(sbi, cpc, true)) { has_candidate = true; break; } } - mutex_unlock(&SIT_I(sbi)->sentry_lock); + up_write(&SIT_I(sbi)->sentry_lock); cpc->trim_start = trim_start; return has_candidate; @@ -2276,14 +2443,16 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) { __u64 start = F2FS_BYTES_TO_BLK(range->start); __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; - unsigned int start_segno, end_segno; + unsigned int start_segno, end_segno, cur_segno; + block_t start_block, end_block; struct cp_control cpc; + struct discard_policy dpolicy; + unsigned long long trimmed = 0; int err = 0; if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) return -EINVAL; - cpc.trimmed = 0; if (end <= MAIN_BLKADDR(sbi)) goto out; @@ -2297,12 +2466,14 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : GET_SEGNO(sbi, end); + cpc.reason = CP_DISCARD; cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen)); /* do checkpoint to issue discard commands safely */ - for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) { - cpc.trim_start = start_segno; + for (cur_segno = start_segno; cur_segno <= end_segno; + cur_segno = cpc.trim_end + 1) { + cpc.trim_start = cur_segno; if (sbi->discard_blks == 0) break; @@ -2310,7 +2481,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) cpc.trim_end = end_segno; else cpc.trim_end = min_t(unsigned int, - rounddown(start_segno + + rounddown(cur_segno + BATCHED_TRIM_SEGMENTS(sbi), sbi->segs_per_sec) - 1, end_segno); @@ -2322,11 +2493,16 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) schedule(); } - /* It's time to issue all the filed discards */ - mark_discard_range_all(sbi); - f2fs_wait_discard_bios(sbi, false); + + start_block = START_BLOCK(sbi, start_segno); + end_block = START_BLOCK(sbi, min(cur_segno, end_segno) + 1); + + init_discard_policy(&dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen); + __issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block); + trimmed = __wait_discard_cmd_range(sbi, &dpolicy, + start_block, end_block); out: - range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); + range->len = F2FS_BLK_TO_BYTES(trimmed); return err; } @@ -2338,6 +2514,20 @@ static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) return false; } +#if 0 +int rw_hint_to_seg_type(enum rw_hint hint) +{ + switch (hint) { + case WRITE_LIFE_SHORT: + return CURSEG_HOT_DATA; + case WRITE_LIFE_EXTREME: + return CURSEG_COLD_DATA; + default: + return CURSEG_WARM_DATA; + } +} +#endif + static int __get_segment_type_2(struct f2fs_io_info *fio) { if (fio->type == DATA) @@ -2372,6 +2562,7 @@ static int __get_segment_type_6(struct f2fs_io_info *fio) return CURSEG_COLD_DATA; if (is_inode_flag_set(inode, FI_HOT_DATA)) return CURSEG_HOT_DATA; + /* rw_hint_to_seg_type(inode->i_write_hint); */ return CURSEG_WARM_DATA; } else { if (IS_DNODE(fio->page)) @@ -2416,8 +2607,10 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, struct sit_info *sit_i = SIT_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, type); + down_read(&SM_I(sbi)->curseg_lock); + mutex_lock(&curseg->curseg_mutex); - mutex_lock(&sit_i->sentry_lock); + down_write(&sit_i->sentry_lock); *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); @@ -2434,15 +2627,26 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, stat_inc_block_count(sbi, curseg); + /* + * SIT information should be updated before segment allocation, + * since SSR needs latest valid block information. + */ + update_sit_entry(sbi, *new_blkaddr, 1); + if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) + update_sit_entry(sbi, old_blkaddr, -1); + if (!__has_curseg_space(sbi, type)) sit_i->s_ops->allocate_segment(sbi, type, false); + /* - * SIT information should be updated after segment allocation, - * since we need to keep dirty segments precisely under SSR. + * segment dirty status should be updated after segment allocation, + * so we just need to update status only one time after previous + * segment being closed. */ - refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); + locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); + locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr)); - mutex_unlock(&sit_i->sentry_lock); + up_write(&sit_i->sentry_lock); if (page && IS_NODESEG(type)) { fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); @@ -2462,6 +2666,29 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, } mutex_unlock(&curseg->curseg_mutex); + + up_read(&SM_I(sbi)->curseg_lock); +} + +static void update_device_state(struct f2fs_io_info *fio) +{ + struct f2fs_sb_info *sbi = fio->sbi; + unsigned int devidx; + + if (!sbi->s_ndevs) + return; + + devidx = f2fs_target_device_index(sbi, fio->new_blkaddr); + + /* update device state for fsync */ + set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO); + + /* update device state for checkpoint */ + if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) { + spin_lock(&sbi->dev_lock); + f2fs_set_bit(devidx, (char *)&sbi->dirty_device); + spin_unlock(&sbi->dev_lock); + } } static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) @@ -2478,6 +2705,8 @@ reallocate: if (err == -EAGAIN) { fio->old_blkaddr = fio->new_blkaddr; goto reallocate; + } else if (!err) { + update_device_state(fio); } } @@ -2538,12 +2767,26 @@ int rewrite_data_page(struct f2fs_io_info *fio) stat_inc_inplace_blocks(fio->sbi); err = f2fs_submit_page_bio(fio); + if (!err) + update_device_state(fio); f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE); return err; } +static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi, + unsigned int segno) +{ + int i; + + for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) { + if (CURSEG_I(sbi, i)->segno == segno) + break; + } + return i; +} + void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, block_t old_blkaddr, block_t new_blkaddr, bool recover_curseg, bool recover_newaddr) @@ -2559,6 +2802,8 @@ void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, se = get_seg_entry(sbi, segno); type = se->type; + down_write(&SM_I(sbi)->curseg_lock); + if (!recover_curseg) { /* for recovery flow */ if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { @@ -2568,14 +2813,19 @@ void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, type = CURSEG_WARM_DATA; } } else { - if (!IS_CURSEG(sbi, segno)) + if (IS_CURSEG(sbi, segno)) { + /* se->type is volatile as SSR allocation */ + type = __f2fs_get_curseg(sbi, segno); + f2fs_bug_on(sbi, type == NO_CHECK_TYPE); + } else { type = CURSEG_WARM_DATA; + } } curseg = CURSEG_I(sbi, type); mutex_lock(&curseg->curseg_mutex); - mutex_lock(&sit_i->sentry_lock); + down_write(&sit_i->sentry_lock); old_cursegno = curseg->segno; old_blkoff = curseg->next_blkoff; @@ -2607,8 +2857,9 @@ void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, curseg->next_blkoff = old_blkoff; } - mutex_unlock(&sit_i->sentry_lock); + up_write(&sit_i->sentry_lock); mutex_unlock(&curseg->curseg_mutex); + up_write(&SM_I(sbi)->curseg_lock); } void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, @@ -3062,7 +3313,7 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) bool to_journal = true; struct seg_entry *se; - mutex_lock(&sit_i->sentry_lock); + down_write(&sit_i->sentry_lock); if (!sit_i->dirty_sentries) goto out; @@ -3156,7 +3407,7 @@ out: cpc->trim_start = trim_start; } - mutex_unlock(&sit_i->sentry_lock); + up_write(&sit_i->sentry_lock); set_prefree_as_free_segments(sbi); } @@ -3249,7 +3500,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi) sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; - mutex_init(&sit_i->sentry_lock); + init_rwsem(&sit_i->sentry_lock); return 0; } @@ -3490,7 +3741,7 @@ static void init_min_max_mtime(struct f2fs_sb_info *sbi) struct sit_info *sit_i = SIT_I(sbi); unsigned int segno; - mutex_lock(&sit_i->sentry_lock); + down_write(&sit_i->sentry_lock); sit_i->min_mtime = LLONG_MAX; @@ -3507,7 +3758,7 @@ static void init_min_max_mtime(struct f2fs_sb_info *sbi) sit_i->min_mtime = mtime; } sit_i->max_mtime = get_mtime(sbi); - mutex_unlock(&sit_i->sentry_lock); + up_write(&sit_i->sentry_lock); } int build_segment_manager(struct f2fs_sb_info *sbi) @@ -3540,11 +3791,14 @@ int build_segment_manager(struct f2fs_sb_info *sbi) sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS; + sm_info->min_ssr_sections = reserved_sections(sbi); sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS; INIT_LIST_HEAD(&sm_info->sit_entry_set); + init_rwsem(&sm_info->curseg_lock); + if (!f2fs_readonly(sbi->sb)) { err = create_flush_cmd_control(sbi); if (err) diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index ffa11274b0ce..5264b6ed120c 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -231,7 +231,7 @@ struct sit_info { unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */ unsigned int dirty_sentries; /* # of dirty sentries */ unsigned int sents_per_block; /* # of SIT entries per block */ - struct mutex sentry_lock; /* to protect SIT cache */ + struct rw_semaphore sentry_lock; /* to protect SIT cache */ struct seg_entry *sentries; /* SIT segment-level cache */ struct sec_entry *sec_entries; /* SIT section-level cache */ @@ -497,6 +497,33 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi) return GET_SEC_FROM_SEG(sbi, (unsigned int)reserved_segments(sbi)); } +static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi) +{ + unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) + + get_pages(sbi, F2FS_DIRTY_DENTS); + unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS); + unsigned int segno, left_blocks; + int i; + + /* check current node segment */ + for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) { + segno = CURSEG_I(sbi, i)->segno; + left_blocks = sbi->blocks_per_seg - + get_seg_entry(sbi, segno)->ckpt_valid_blocks; + + if (node_blocks > left_blocks) + return false; + } + + /* check current data segment */ + segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno; + left_blocks = sbi->blocks_per_seg - + get_seg_entry(sbi, segno)->ckpt_valid_blocks; + if (dent_blocks > left_blocks) + return false; + return true; +} + static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed, int needed) { @@ -507,6 +534,9 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) return false; + if (free_sections(sbi) + freed == reserved_sections(sbi) + needed && + has_curseg_enough_space(sbi)) + return false; return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs + imeta_secs + reserved_sections(sbi) + needed); @@ -730,7 +760,7 @@ static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type) static inline bool no_fggc_candidate(struct f2fs_sb_info *sbi, unsigned int secno) { - if (get_valid_blocks(sbi, GET_SEG_FROM_SEC(sbi, secno), true) >= + if (get_valid_blocks(sbi, GET_SEG_FROM_SEC(sbi, secno), true) > sbi->fggc_threshold) return true; return false; @@ -795,8 +825,9 @@ static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force) goto wake_up; mutex_lock(&dcc->cmd_lock); - for (i = MAX_PLIST_NUM - 1; - i >= 0 && plist_issue(dcc->pend_list_tag[i]); i--) { + for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { + if (i + 1 < dcc->discard_granularity) + break; if (!list_empty(&dcc->pend_list[i])) { wakeup = true; break; diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c index 5c60fc28ec75..0b5664a1a6cc 100644 --- a/fs/f2fs/shrinker.c +++ b/fs/f2fs/shrinker.c @@ -28,7 +28,7 @@ static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi) static unsigned long __count_free_nids(struct f2fs_sb_info *sbi) { - long count = NM_I(sbi)->nid_cnt[FREE_NID_LIST] - MAX_FREE_NIDS; + long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS; return count > 0 ? count : 0; } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 482bb0333806..76e2f1518224 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -44,6 +44,8 @@ static struct kmem_cache *f2fs_inode_cachep; char *fault_name[FAULT_MAX] = { [FAULT_KMALLOC] = "kmalloc", [FAULT_PAGE_ALLOC] = "page alloc", + [FAULT_PAGE_GET] = "page get", + [FAULT_ALLOC_BIO] = "alloc bio", [FAULT_ALLOC_NID] = "alloc nid", [FAULT_ORPHAN] = "orphan", [FAULT_BLOCK] = "no more block", @@ -92,6 +94,7 @@ enum { Opt_disable_ext_identify, Opt_inline_xattr, Opt_noinline_xattr, + Opt_inline_xattr_size, Opt_inline_data, Opt_inline_dentry, Opt_noinline_dentry, @@ -141,6 +144,7 @@ static match_table_t f2fs_tokens = { {Opt_disable_ext_identify, "disable_ext_identify"}, {Opt_inline_xattr, "inline_xattr"}, {Opt_noinline_xattr, "noinline_xattr"}, + {Opt_inline_xattr_size, "inline_xattr_size=%u"}, {Opt_inline_data, "inline_data"}, {Opt_inline_dentry, "inline_dentry"}, {Opt_noinline_dentry, "noinline_dentry"}, @@ -209,6 +213,12 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype, "quota options when quota turned on"); return -EINVAL; } + if (f2fs_sb_has_quota_ino(sb)) { + f2fs_msg(sb, KERN_INFO, + "QUOTA feature is enabled, so ignore qf_name"); + return 0; + } + qname = match_strdup(args); if (!qname) { f2fs_msg(sb, KERN_ERR, @@ -287,6 +297,18 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi) return -1; } } + + if (f2fs_sb_has_quota_ino(sbi->sb) && sbi->s_jquota_fmt) { + f2fs_msg(sbi->sb, KERN_INFO, + "QUOTA feature is enabled, so ignore jquota_fmt"); + sbi->s_jquota_fmt = 0; + } + if (f2fs_sb_has_quota_ino(sbi->sb) && sb_rdonly(sbi->sb)) { + f2fs_msg(sbi->sb, KERN_INFO, + "Filesystem with quota feature cannot be mounted RDWR " + "without CONFIG_QUOTA"); + return -1; + } return 0; } #endif @@ -383,6 +405,12 @@ static int parse_options(struct super_block *sb, char *options) case Opt_noinline_xattr: clear_opt(sbi, INLINE_XATTR); break; + case Opt_inline_xattr_size: + if (args->from && match_int(args, &arg)) + return -EINVAL; + set_opt(sbi, INLINE_XATTR_SIZE); + sbi->inline_xattr_size = arg; + break; #else case Opt_user_xattr: f2fs_msg(sb, KERN_INFO, @@ -604,6 +632,24 @@ static int parse_options(struct super_block *sb, char *options) F2FS_IO_SIZE_KB(sbi)); return -EINVAL; } + + if (test_opt(sbi, INLINE_XATTR_SIZE)) { + if (!test_opt(sbi, INLINE_XATTR)) { + f2fs_msg(sb, KERN_ERR, + "inline_xattr_size option should be " + "set with inline_xattr option"); + return -EINVAL; + } + if (!sbi->inline_xattr_size || + sbi->inline_xattr_size >= DEF_ADDRS_PER_INODE - + F2FS_TOTAL_EXTRA_ATTR_SIZE - + DEF_INLINE_RESERVED_SIZE - + DEF_MIN_INLINE_SIZE) { + f2fs_msg(sb, KERN_ERR, + "inline xattr size is out of range"); + return -EINVAL; + } + } return 0; } @@ -618,13 +664,13 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) init_once((void *) fi); /* Initialize f2fs-specific inode info */ - fi->vfs_inode.i_version = 1; atomic_set(&fi->dirty_pages, 0); fi->i_current_depth = 1; fi->i_advise = 0; init_rwsem(&fi->i_sem); INIT_LIST_HEAD(&fi->dirty_list); INIT_LIST_HEAD(&fi->gdirty_list); + INIT_LIST_HEAD(&fi->inmem_ilist); INIT_LIST_HEAD(&fi->inmem_pages); mutex_init(&fi->inmem_lock); init_rwsem(&fi->dio_rwsem[READ]); @@ -673,7 +719,6 @@ static int f2fs_drop_inode(struct inode *inode) sb_end_intwrite(inode->i_sb); - fscrypt_put_encryption_info(inode, NULL); spin_lock(&inode->i_lock); atomic_dec(&inode->i_count); } @@ -781,6 +826,7 @@ static void f2fs_put_super(struct super_block *sb) { struct f2fs_sb_info *sbi = F2FS_SB(sb); int i; + bool dropped; f2fs_quota_off_umount(sb); @@ -801,9 +847,9 @@ static void f2fs_put_super(struct super_block *sb) } /* be sure to wait for any on-going discard commands */ - f2fs_wait_discard_bios(sbi, true); + dropped = f2fs_wait_discard_bios(sbi); - if (f2fs_discard_en(sbi) && !sbi->discard_blks) { + if (f2fs_discard_en(sbi) && !sbi->discard_blks && !dropped) { struct cp_control cpc = { .reason = CP_UMOUNT | CP_TRIMMED, }; @@ -859,6 +905,9 @@ int f2fs_sync_fs(struct super_block *sb, int sync) struct f2fs_sb_info *sbi = F2FS_SB(sb); int err = 0; + if (unlikely(f2fs_cp_error(sbi))) + return 0; + trace_f2fs_sync_fs(sb, sync); if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) @@ -958,7 +1007,7 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_blocks = total_count - start_count; buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count; buf->f_bavail = user_block_count - valid_user_blocks(sbi) - - sbi->reserved_blocks; + sbi->current_reserved_blocks; avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; @@ -1047,6 +1096,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) seq_puts(seq, ",inline_xattr"); else seq_puts(seq, ",noinline_xattr"); + if (test_opt(sbi, INLINE_XATTR_SIZE)) + seq_printf(seq, ",inline_xattr_size=%u", + sbi->inline_xattr_size); #endif #ifdef CONFIG_F2FS_FS_POSIX_ACL if (test_opt(sbi, POSIX_ACL)) @@ -1109,6 +1161,7 @@ static void default_options(struct f2fs_sb_info *sbi) { /* init some FS parameters */ sbi->active_logs = NR_CURSEG_TYPE; + sbi->inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; set_opt(sbi, BG_GC); set_opt(sbi, INLINE_XATTR); @@ -1137,6 +1190,9 @@ static void default_options(struct f2fs_sb_info *sbi) #endif } +#ifdef CONFIG_QUOTA +static int f2fs_enable_quotas(struct super_block *sb); +#endif static int f2fs_remount(struct super_block *sb, int *flags, char *data) { struct f2fs_sb_info *sbi = F2FS_SB(sb); @@ -1203,6 +1259,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) if (f2fs_readonly(sb) && (*flags & MS_RDONLY)) goto skip; +#ifdef CONFIG_QUOTA if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) { err = dquot_suspend(sb, -1); if (err < 0) @@ -1210,9 +1267,15 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) } else { /* dquot_resume needs RW */ sb->s_flags &= ~MS_RDONLY; - dquot_resume(sb, -1); + if (sb_any_quota_suspended(sb)) { + dquot_resume(sb, -1); + } else if (f2fs_sb_has_quota_ino(sb)) { + err = f2fs_enable_quotas(sb); + if (err) + goto restore_opts; + } } - +#endif /* disallow enable/disable extent_cache dynamically */ if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) { err = -EINVAL; @@ -1321,8 +1384,13 @@ static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data, tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread); repeat: page = read_mapping_page(mapping, blkidx, NULL); - if (IS_ERR(page)) + if (IS_ERR(page)) { + if (PTR_ERR(page) == -ENOMEM) { + congestion_wait(BLK_RW_ASYNC, HZ/50); + goto repeat; + } return PTR_ERR(page); + } lock_page(page); @@ -1365,11 +1433,16 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type, while (towrite > 0) { tocopy = min_t(unsigned long, sb->s_blocksize - offset, towrite); - +retry: err = a_ops->write_begin(NULL, mapping, off, tocopy, 0, &page, NULL); - if (unlikely(err)) + if (unlikely(err)) { + if (err == -ENOMEM) { + congestion_wait(BLK_RW_ASYNC, HZ/50); + goto retry; + } break; + } kaddr = kmap_atomic(page); memcpy(kaddr + offset, data, tocopy); @@ -1386,8 +1459,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type, } if (len == towrite) - return 0; - inode->i_version++; + return err; inode->i_mtime = inode->i_ctime = current_time(inode); f2fs_mark_inode_dirty_sync(inode, false); return len - towrite; @@ -1409,19 +1481,91 @@ static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type) sbi->s_jquota_fmt, type); } -void f2fs_enable_quota_files(struct f2fs_sb_info *sbi) +int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly) { - int i, ret; + int enabled = 0; + int i, err; + + if (f2fs_sb_has_quota_ino(sbi->sb) && rdonly) { + err = f2fs_enable_quotas(sbi->sb); + if (err) { + f2fs_msg(sbi->sb, KERN_ERR, + "Cannot turn on quota_ino: %d", err); + return 0; + } + return 1; + } for (i = 0; i < MAXQUOTAS; i++) { if (sbi->s_qf_names[i]) { - ret = f2fs_quota_on_mount(sbi, i); - if (ret < 0) - f2fs_msg(sbi->sb, KERN_ERR, - "Cannot turn on journaled " - "quota: error %d", ret); + err = f2fs_quota_on_mount(sbi, i); + if (!err) { + enabled = 1; + continue; + } + f2fs_msg(sbi->sb, KERN_ERR, + "Cannot turn on quotas: %d on %d", err, i); + } + } + return enabled; +} + +static int f2fs_quota_enable(struct super_block *sb, int type, int format_id, + unsigned int flags) +{ + struct inode *qf_inode; + unsigned long qf_inum; + int err; + + BUG_ON(!f2fs_sb_has_quota_ino(sb)); + + qf_inum = f2fs_qf_ino(sb, type); + if (!qf_inum) + return -EPERM; + + qf_inode = f2fs_iget(sb, qf_inum); + if (IS_ERR(qf_inode)) { + f2fs_msg(sb, KERN_ERR, + "Bad quota inode %u:%lu", type, qf_inum); + return PTR_ERR(qf_inode); + } + + /* Don't account quota for quota files to avoid recursion */ + qf_inode->i_flags |= S_NOQUOTA; + err = dquot_enable(qf_inode, type, format_id, flags); + iput(qf_inode); + return err; +} + +static int f2fs_enable_quotas(struct super_block *sb) +{ + int type, err = 0; + unsigned long qf_inum; + bool quota_mopt[MAXQUOTAS] = { + test_opt(F2FS_SB(sb), USRQUOTA), + test_opt(F2FS_SB(sb), GRPQUOTA), + test_opt(F2FS_SB(sb), PRJQUOTA), + }; + + sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; + for (type = 0; type < MAXQUOTAS; type++) { + qf_inum = f2fs_qf_ino(sb, type); + if (qf_inum) { + err = f2fs_quota_enable(sb, type, QFMT_VFS_V1, + DQUOT_USAGE_ENABLED | + (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0)); + if (err) { + f2fs_msg(sb, KERN_ERR, + "Failed to enable quota tracking " + "(type=%d, err=%d). Please run " + "fsck to fix.", type, err); + for (type--; type >= 0; type--) + dquot_quota_off(sb, type); + return err; + } } } + return 0; } static int f2fs_quota_sync(struct super_block *sb, int type) @@ -1492,7 +1636,7 @@ static int f2fs_quota_off(struct super_block *sb, int type) f2fs_quota_sync(sb, type); err = dquot_quota_off(sb, type); - if (err) + if (err || f2fs_sb_has_quota_ino(sb)) goto out_put; inode_lock(inode); @@ -1660,7 +1804,7 @@ static loff_t max_file_blocks(void) /* * note: previously, result is equal to (DEF_ADDRS_PER_INODE - - * F2FS_INLINE_XATTR_ADDRS), but now f2fs try to reserve more + * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more * space in inode.i_addr, it will be more safe to reassign * result as zero. */ @@ -1969,6 +2113,9 @@ static void init_sb_info(struct f2fs_sb_info *sbi) for (j = HOT; j < NR_TEMP_TYPE; j++) mutex_init(&sbi->wio_mutex[i][j]); spin_lock_init(&sbi->cp_lock); + + sbi->dirty_device = 0; + spin_lock_init(&sbi->dev_lock); } static int init_percpu_info(struct f2fs_sb_info *sbi) @@ -2323,7 +2470,10 @@ try_onemore: #ifdef CONFIG_QUOTA sb->dq_op = &f2fs_quota_operations; - sb->s_qcop = &f2fs_quotactl_ops; + if (f2fs_sb_has_quota_ino(sb)) + sb->s_qcop = &dquot_quotactl_sysfile_ops; + else + sb->s_qcop = &f2fs_quotactl_ops; sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; #endif @@ -2419,6 +2569,7 @@ try_onemore: le64_to_cpu(sbi->ckpt->valid_block_count); sbi->last_valid_block_count = sbi->total_valid_block_count; sbi->reserved_blocks = 0; + sbi->current_reserved_blocks = 0; for (i = 0; i < NR_INODE_TYPE; i++) { INIT_LIST_HEAD(&sbi->inode_list[i]); @@ -2493,10 +2644,24 @@ try_onemore: if (err) goto free_root_inode; +#ifdef CONFIG_QUOTA + /* + * Turn on quotas which were not enabled for read-only mounts if + * filesystem has quota feature, so that they are updated correctly. + */ + if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb)) { + err = f2fs_enable_quotas(sb); + if (err) { + f2fs_msg(sb, KERN_ERR, + "Cannot turn on quotas: error %d", err); + goto free_sysfs; + } + } +#endif /* if there are nt orphan nodes free them */ err = recover_orphan_inodes(sbi); if (err) - goto free_sysfs; + goto free_meta; /* recover fsynced data */ if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { @@ -2530,7 +2695,7 @@ try_onemore: err = -EINVAL; f2fs_msg(sb, KERN_ERR, "Need to recover fsync data"); - goto free_sysfs; + goto free_meta; } } skip_recovery: @@ -2564,6 +2729,10 @@ skip_recovery: return 0; free_meta: +#ifdef CONFIG_QUOTA + if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb)) + f2fs_quota_off_umount(sbi->sb); +#endif f2fs_sync_inode_meta(sbi); /* * Some dirty meta pages can be produced by recover_orphan_inodes() @@ -2572,7 +2741,9 @@ free_meta: * falls into an infinite loop in sync_meta_pages(). */ truncate_inode_pages_final(META_MAPPING(sbi)); +#ifdef CONFIG_QUOTA free_sysfs: +#endif f2fs_unregister_sysfs(sbi); free_root_inode: dput(sb->s_root); diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index e2c258f717cd..9835348b6e5d 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -30,7 +30,7 @@ enum { FAULT_INFO_RATE, /* struct f2fs_fault_info */ FAULT_INFO_TYPE, /* struct f2fs_fault_info */ #endif - RESERVED_BLOCKS, + RESERVED_BLOCKS, /* struct f2fs_sb_info */ }; struct f2fs_attr { @@ -63,6 +63,13 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type) return NULL; } +static ssize_t dirty_segments_show(struct f2fs_attr *a, + struct f2fs_sb_info *sbi, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)(dirty_segments(sbi))); +} + static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a, struct f2fs_sb_info *sbi, char *buf) { @@ -100,10 +107,22 @@ static ssize_t features_show(struct f2fs_attr *a, if (f2fs_sb_has_inode_chksum(sb)) len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", len ? ", " : "", "inode_checksum"); + if (f2fs_sb_has_flexible_inline_xattr(sb)) + len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", + len ? ", " : "", "flexible_inline_xattr"); + if (f2fs_sb_has_quota_ino(sb)) + len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", + len ? ", " : "", "quota_ino"); len += snprintf(buf + len, PAGE_SIZE - len, "\n"); return len; } +static ssize_t current_reserved_blocks_show(struct f2fs_attr *a, + struct f2fs_sb_info *sbi, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", sbi->current_reserved_blocks); +} + static ssize_t f2fs_sbi_show(struct f2fs_attr *a, struct f2fs_sb_info *sbi, char *buf) { @@ -143,34 +162,22 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a, #endif if (a->struct_type == RESERVED_BLOCKS) { spin_lock(&sbi->stat_lock); - if ((unsigned long)sbi->total_valid_block_count + t > - (unsigned long)sbi->user_block_count) { + if (t > (unsigned long)sbi->user_block_count) { spin_unlock(&sbi->stat_lock); return -EINVAL; } *ui = t; + sbi->current_reserved_blocks = min(sbi->reserved_blocks, + sbi->user_block_count - valid_user_blocks(sbi)); spin_unlock(&sbi->stat_lock); return count; } if (!strcmp(a->attr.name, "discard_granularity")) { - struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; - int i; - if (t == 0 || t > MAX_PLIST_NUM) return -EINVAL; if (t == *ui) return count; - - mutex_lock(&dcc->cmd_lock); - for (i = 0; i < MAX_PLIST_NUM; i++) { - if (i >= t - 1) - dcc->pend_list_tag[i] |= P_ACTIVE; - else - dcc->pend_list_tag[i] &= (~P_ACTIVE); - } - mutex_unlock(&dcc->cmd_lock); - *ui = t; return count; } @@ -222,6 +229,8 @@ enum feat_id { FEAT_EXTRA_ATTR, FEAT_PROJECT_QUOTA, FEAT_INODE_CHECKSUM, + FEAT_FLEXIBLE_INLINE_XATTR, + FEAT_QUOTA_INO, }; static ssize_t f2fs_feature_show(struct f2fs_attr *a, @@ -234,6 +243,8 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a, case FEAT_EXTRA_ATTR: case FEAT_PROJECT_QUOTA: case FEAT_INODE_CHECKSUM: + case FEAT_FLEXIBLE_INLINE_XATTR: + case FEAT_QUOTA_INO: return snprintf(buf, PAGE_SIZE, "supported\n"); } return 0; @@ -279,6 +290,7 @@ F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_hot_blocks, min_hot_blocks); +F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ssr_sections, min_ssr_sections); F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh); F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages); F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio); @@ -291,8 +303,10 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable); F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate); F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type); #endif +F2FS_GENERAL_RO_ATTR(dirty_segments); F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes); F2FS_GENERAL_RO_ATTR(features); +F2FS_GENERAL_RO_ATTR(current_reserved_blocks); #ifdef CONFIG_F2FS_FS_ENCRYPTION F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO); @@ -304,6 +318,8 @@ F2FS_FEATURE_RO_ATTR(atomic_write, FEAT_ATOMIC_WRITE); F2FS_FEATURE_RO_ATTR(extra_attr, FEAT_EXTRA_ATTR); F2FS_FEATURE_RO_ATTR(project_quota, FEAT_PROJECT_QUOTA); F2FS_FEATURE_RO_ATTR(inode_checksum, FEAT_INODE_CHECKSUM); +F2FS_FEATURE_RO_ATTR(flexible_inline_xattr, FEAT_FLEXIBLE_INLINE_XATTR); +F2FS_FEATURE_RO_ATTR(quota_ino, FEAT_QUOTA_INO); #define ATTR_LIST(name) (&f2fs_attr_##name.attr) static struct attribute *f2fs_attrs[] = { @@ -321,6 +337,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(min_ipu_util), ATTR_LIST(min_fsync_blocks), ATTR_LIST(min_hot_blocks), + ATTR_LIST(min_ssr_sections), ATTR_LIST(max_victim_search), ATTR_LIST(dir_level), ATTR_LIST(ram_thresh), @@ -333,9 +350,11 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(inject_rate), ATTR_LIST(inject_type), #endif + ATTR_LIST(dirty_segments), ATTR_LIST(lifetime_write_kbytes), ATTR_LIST(features), ATTR_LIST(reserved_blocks), + ATTR_LIST(current_reserved_blocks), NULL, }; @@ -350,6 +369,8 @@ static struct attribute *f2fs_feat_attrs[] = { ATTR_LIST(extra_attr), ATTR_LIST(project_quota), ATTR_LIST(inode_checksum), + ATTR_LIST(flexible_inline_xattr), + ATTR_LIST(quota_ino), NULL, }; diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index ab658419552b..7acf56ebda65 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -264,12 +264,12 @@ static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index, return entry; } -static struct f2fs_xattr_entry *__find_inline_xattr(void *base_addr, - void **last_addr, int index, - size_t len, const char *name) +static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode, + void *base_addr, void **last_addr, int index, + size_t len, const char *name) { struct f2fs_xattr_entry *entry; - unsigned int inline_size = F2FS_INLINE_XATTR_ADDRS << 2; + unsigned int inline_size = inline_xattr_size(inode); list_for_each_xattr(entry, base_addr) { if ((void *)entry + sizeof(__u32) > base_addr + inline_size || @@ -288,12 +288,54 @@ static struct f2fs_xattr_entry *__find_inline_xattr(void *base_addr, return entry; } +static int read_inline_xattr(struct inode *inode, struct page *ipage, + void *txattr_addr) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + unsigned int inline_size = inline_xattr_size(inode); + struct page *page = NULL; + void *inline_addr; + + if (ipage) { + inline_addr = inline_xattr_addr(inode, ipage); + } else { + page = get_node_page(sbi, inode->i_ino); + if (IS_ERR(page)) + return PTR_ERR(page); + + inline_addr = inline_xattr_addr(inode, page); + } + memcpy(txattr_addr, inline_addr, inline_size); + f2fs_put_page(page, 1); + + return 0; +} + +static int read_xattr_block(struct inode *inode, void *txattr_addr) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + nid_t xnid = F2FS_I(inode)->i_xattr_nid; + unsigned int inline_size = inline_xattr_size(inode); + struct page *xpage; + void *xattr_addr; + + /* The inode already has an extended attribute block. */ + xpage = get_node_page(sbi, xnid); + if (IS_ERR(xpage)) + return PTR_ERR(xpage); + + xattr_addr = page_address(xpage); + memcpy(txattr_addr + inline_size, xattr_addr, VALID_XATTR_BLOCK_SIZE); + f2fs_put_page(xpage, 1); + + return 0; +} + static int lookup_all_xattrs(struct inode *inode, struct page *ipage, unsigned int index, unsigned int len, const char *name, struct f2fs_xattr_entry **xe, void **base_addr) { - struct f2fs_sb_info *sbi = F2FS_I_SB(inode); void *cur_addr, *txattr_addr, *last_addr = NULL; nid_t xnid = F2FS_I(inode)->i_xattr_nid; unsigned int size = xnid ? VALID_XATTR_BLOCK_SIZE : 0; @@ -310,23 +352,11 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage, /* read from inline xattr */ if (inline_size) { - struct page *page = NULL; - void *inline_addr; - - if (ipage) { - inline_addr = inline_xattr_addr(ipage); - } else { - page = get_node_page(sbi, inode->i_ino); - if (IS_ERR(page)) { - err = PTR_ERR(page); - goto out; - } - inline_addr = inline_xattr_addr(page); - } - memcpy(txattr_addr, inline_addr, inline_size); - f2fs_put_page(page, 1); + err = read_inline_xattr(inode, ipage, txattr_addr); + if (err) + goto out; - *xe = __find_inline_xattr(txattr_addr, &last_addr, + *xe = __find_inline_xattr(inode, txattr_addr, &last_addr, index, len, name); if (*xe) goto check; @@ -334,19 +364,9 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage, /* read from xattr node block */ if (xnid) { - struct page *xpage; - void *xattr_addr; - - /* The inode already has an extended attribute block. */ - xpage = get_node_page(sbi, xnid); - if (IS_ERR(xpage)) { - err = PTR_ERR(xpage); + err = read_xattr_block(inode, txattr_addr); + if (err) goto out; - } - - xattr_addr = page_address(xpage); - memcpy(txattr_addr + inline_size, xattr_addr, size); - f2fs_put_page(xpage, 1); } if (last_addr) @@ -371,7 +391,6 @@ out: static int read_all_xattrs(struct inode *inode, struct page *ipage, void **base_addr) { - struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_xattr_header *header; nid_t xnid = F2FS_I(inode)->i_xattr_nid; unsigned int size = VALID_XATTR_BLOCK_SIZE; @@ -386,38 +405,16 @@ static int read_all_xattrs(struct inode *inode, struct page *ipage, /* read from inline xattr */ if (inline_size) { - struct page *page = NULL; - void *inline_addr; - - if (ipage) { - inline_addr = inline_xattr_addr(ipage); - } else { - page = get_node_page(sbi, inode->i_ino); - if (IS_ERR(page)) { - err = PTR_ERR(page); - goto fail; - } - inline_addr = inline_xattr_addr(page); - } - memcpy(txattr_addr, inline_addr, inline_size); - f2fs_put_page(page, 1); + err = read_inline_xattr(inode, ipage, txattr_addr); + if (err) + goto fail; } /* read from xattr node block */ if (xnid) { - struct page *xpage; - void *xattr_addr; - - /* The inode already has an extended attribute block. */ - xpage = get_node_page(sbi, xnid); - if (IS_ERR(xpage)) { - err = PTR_ERR(xpage); + err = read_xattr_block(inode, txattr_addr); + if (err) goto fail; - } - - xattr_addr = page_address(xpage); - memcpy(txattr_addr + inline_size, xattr_addr, size); - f2fs_put_page(xpage, 1); } header = XATTR_HDR(txattr_addr); @@ -439,10 +436,12 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize, { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); size_t inline_size = inline_xattr_size(inode); + struct page *in_page = NULL; void *xattr_addr; + void *inline_addr = NULL; struct page *xpage; nid_t new_nid = 0; - int err; + int err = 0; if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid) if (!alloc_nid(sbi, &new_nid)) @@ -450,30 +449,30 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize, /* write to inline xattr */ if (inline_size) { - struct page *page = NULL; - void *inline_addr; - if (ipage) { - inline_addr = inline_xattr_addr(ipage); - f2fs_wait_on_page_writeback(ipage, NODE, true); - set_page_dirty(ipage); + inline_addr = inline_xattr_addr(inode, ipage); } else { - page = get_node_page(sbi, inode->i_ino); - if (IS_ERR(page)) { + in_page = get_node_page(sbi, inode->i_ino); + if (IS_ERR(in_page)) { alloc_nid_failed(sbi, new_nid); - return PTR_ERR(page); + return PTR_ERR(in_page); } - inline_addr = inline_xattr_addr(page); - f2fs_wait_on_page_writeback(page, NODE, true); + inline_addr = inline_xattr_addr(inode, in_page); } - memcpy(inline_addr, txattr_addr, inline_size); - f2fs_put_page(page, 1); + f2fs_wait_on_page_writeback(ipage ? ipage : in_page, + NODE, true); /* no need to use xattr node block */ if (hsize <= inline_size) { - err = truncate_xattr_node(inode, ipage); + err = truncate_xattr_node(inode); alloc_nid_failed(sbi, new_nid); - return err; + if (err) { + f2fs_put_page(in_page, 1); + return err; + } + memcpy(inline_addr, txattr_addr, inline_size); + set_page_dirty(ipage ? ipage : in_page); + goto in_page_out; } } @@ -482,7 +481,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize, xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid); if (IS_ERR(xpage)) { alloc_nid_failed(sbi, new_nid); - return PTR_ERR(xpage); + goto in_page_out; } f2fs_bug_on(sbi, new_nid); f2fs_wait_on_page_writeback(xpage, NODE, true); @@ -492,17 +491,24 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize, xpage = new_node_page(&dn, XATTR_NODE_OFFSET); if (IS_ERR(xpage)) { alloc_nid_failed(sbi, new_nid); - return PTR_ERR(xpage); + goto in_page_out; } alloc_nid_done(sbi, new_nid); } - xattr_addr = page_address(xpage); + + if (inline_size) + memcpy(inline_addr, txattr_addr, inline_size); memcpy(xattr_addr, txattr_addr + inline_size, VALID_XATTR_BLOCK_SIZE); + + if (inline_size) + set_page_dirty(ipage ? ipage : in_page); set_page_dirty(xpage); - f2fs_put_page(xpage, 1); - return 0; + f2fs_put_page(xpage, 1); +in_page_out: + f2fs_put_page(in_page, 1); + return err; } int f2fs_getxattr(struct inode *inode, int index, const char *name, @@ -721,6 +727,10 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int err; + err = dquot_initialize(inode); + if (err) + return err; + /* this case is only from init_inode_metadata */ if (ipage) return __f2fs_setxattr(inode, index, name, value, diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h index 0ac4c1f73fbd..25177e6bd603 100644 --- a/fs/isofs/isofs.h +++ b/fs/isofs/isofs.h @@ -103,7 +103,7 @@ static inline unsigned int isonum_733(char *p) /* Ignore bigendian datum due to broken mastering programs */ return get_unaligned_le32(p); } -extern int iso_date(char *, int); +extern int iso_date(u8 *, int); struct inode; /* To make gcc happy */ diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h index ed09e2b08637..f835976ce033 100644 --- a/fs/isofs/rock.h +++ b/fs/isofs/rock.h @@ -65,7 +65,7 @@ struct RR_PL_s { }; struct stamp { - char time[7]; + __u8 time[7]; /* actually 6 unsigned, 1 signed */ } __attribute__ ((packed)); struct RR_TF_s { diff --git a/fs/isofs/util.c b/fs/isofs/util.c index 005a15cfd30a..37860fea364d 100644 --- a/fs/isofs/util.c +++ b/fs/isofs/util.c @@ -15,7 +15,7 @@ * to GMT. Thus we should always be correct. */ -int iso_date(char * p, int flag) +int iso_date(u8 *p, int flag) { int year, month, day, hour, minute, second, tz; int crtime; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 348e0a05bd18..44e09483d2cd 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1260,7 +1260,7 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags) return 0; } - error = nfs_revalidate_inode(NFS_SERVER(inode), inode); + error = nfs_lookup_verify_inode(inode, flags); dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n", __func__, inode->i_ino, error ? "invalid" : "valid"); return !error; @@ -1420,6 +1420,7 @@ static int nfs4_lookup_revalidate(struct dentry *, unsigned int); const struct dentry_operations nfs4_dentry_operations = { .d_revalidate = nfs4_lookup_revalidate, + .d_weak_revalidate = nfs_weak_revalidate, .d_delete = nfs_dentry_delete, .d_iput = nfs_dentry_iput, .d_automount = nfs_d_automount, diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 8e425f2c5ddd..8ef6f70c9e25 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -38,7 +38,6 @@ #include <linux/mm.h> #include <linux/delay.h> #include <linux/errno.h> -#include <linux/file.h> #include <linux/string.h> #include <linux/ratelimit.h> #include <linux/printk.h> @@ -242,15 +241,12 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE }; const u32 nfs4_fs_locations_bitmap[3] = { - FATTR4_WORD0_TYPE - | FATTR4_WORD0_CHANGE + FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID | FATTR4_WORD0_FILEID | FATTR4_WORD0_FS_LOCATIONS, - FATTR4_WORD1_MODE - | FATTR4_WORD1_NUMLINKS - | FATTR4_WORD1_OWNER + FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV | FATTR4_WORD1_SPACE_USED @@ -5741,7 +5737,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, p->server = server; atomic_inc(&lsp->ls_count); p->ctx = get_nfs_open_context(ctx); - get_file(fl->fl_file); memcpy(&p->fl, fl, sizeof(p->fl)); return p; out_free_seqid: @@ -5854,7 +5849,6 @@ static void nfs4_lock_release(void *calldata) nfs_free_seqid(data->arg.lock_seqid); nfs4_put_lock_state(data->lsp); put_nfs_open_context(data->ctx); - fput(data->fl.fl_file); kfree(data); dprintk("%s: done!\n", __func__); } @@ -6351,9 +6345,7 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, struct page *page) { struct nfs_server *server = NFS_SERVER(dir); - u32 bitmask[3] = { - [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, - }; + u32 bitmask[3]; struct nfs4_fs_locations_arg args = { .dir_fh = NFS_FH(dir), .name = name, @@ -6372,12 +6364,15 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, dprintk("%s: start\n", __func__); + bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS; + bitmask[1] = nfs4_fattr_bitmap[1]; + /* Ask for the fileid of the absent filesystem if mounted_on_fileid * is not supported */ if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) - bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; + bitmask[0] &= ~FATTR4_WORD0_FILEID; else - bitmask[0] |= FATTR4_WORD0_FILEID; + bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; nfs_fattr_init(&fs_locations->fattr); fs_locations->server = server; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index e8d1d6c5000c..9a0b219ff74d 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1680,7 +1680,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error) break; case -NFS4ERR_STALE_CLIENTID: set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); - nfs4_state_clear_reclaim_reboot(clp); nfs4_state_start_reclaim_reboot(clp); break; case -NFS4ERR_EXPIRED: diff --git a/fs/nfs/super.c b/fs/nfs/super.c index f1268280244e..3149f7e58d6f 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1322,7 +1322,7 @@ static int nfs_parse_mount_options(char *raw, mnt->options |= NFS_OPTION_MIGRATION; break; case Opt_nomigration: - mnt->options &= NFS_OPTION_MIGRATION; + mnt->options &= ~NFS_OPTION_MIGRATION; break; /* diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index ca9ebc3242d3..11c67e8b939d 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3379,7 +3379,9 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) /* ignore lock owners */ if (local->st_stateowner->so_is_open_owner == 0) continue; - if (local->st_stateowner == &oo->oo_owner) { + if (local->st_stateowner != &oo->oo_owner) + continue; + if (local->st_stid.sc_type == NFS4_OPEN_STID) { ret = local; atomic_inc(&ret->st_stid.sc_count); break; @@ -3388,6 +3390,52 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) return ret; } +static __be32 +nfsd4_verify_open_stid(struct nfs4_stid *s) +{ + __be32 ret = nfs_ok; + + switch (s->sc_type) { + default: + break; + case NFS4_CLOSED_STID: + case NFS4_CLOSED_DELEG_STID: + ret = nfserr_bad_stateid; + break; + case NFS4_REVOKED_DELEG_STID: + ret = nfserr_deleg_revoked; + } + return ret; +} + +/* Lock the stateid st_mutex, and deal with races with CLOSE */ +static __be32 +nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp) +{ + __be32 ret; + + mutex_lock(&stp->st_mutex); + ret = nfsd4_verify_open_stid(&stp->st_stid); + if (ret != nfs_ok) + mutex_unlock(&stp->st_mutex); + return ret; +} + +static struct nfs4_ol_stateid * +nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) +{ + struct nfs4_ol_stateid *stp; + for (;;) { + spin_lock(&fp->fi_lock); + stp = nfsd4_find_existing_open(fp, open); + spin_unlock(&fp->fi_lock); + if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok) + break; + nfs4_put_stid(&stp->st_stid); + } + return stp; +} + static struct nfs4_openowner * alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, struct nfsd4_compound_state *cstate) @@ -3420,23 +3468,27 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, } static struct nfs4_ol_stateid * -init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, - struct nfsd4_open *open) +init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open) { struct nfs4_openowner *oo = open->op_openowner; struct nfs4_ol_stateid *retstp = NULL; + struct nfs4_ol_stateid *stp; + stp = open->op_stp; /* We are moving these outside of the spinlocks to avoid the warnings */ mutex_init(&stp->st_mutex); mutex_lock(&stp->st_mutex); +retry: spin_lock(&oo->oo_owner.so_client->cl_lock); spin_lock(&fp->fi_lock); retstp = nfsd4_find_existing_open(fp, open); if (retstp) goto out_unlock; + + open->op_stp = NULL; atomic_inc(&stp->st_stid.sc_count); stp->st_stid.sc_type = NFS4_OPEN_STID; INIT_LIST_HEAD(&stp->st_locks); @@ -3453,11 +3505,16 @@ out_unlock: spin_unlock(&fp->fi_lock); spin_unlock(&oo->oo_owner.so_client->cl_lock); if (retstp) { - mutex_lock(&retstp->st_mutex); - /* Not that we need to, just for neatness */ + /* Handle races with CLOSE */ + if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { + nfs4_put_stid(&retstp->st_stid); + goto retry; + } + /* To keep mutex tracking happy */ mutex_unlock(&stp->st_mutex); + stp = retstp; } - return retstp; + return stp; } /* @@ -3829,7 +3886,8 @@ static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, statei { struct nfs4_stid *ret; - ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID); + ret = find_stateid_by_type(cl, s, + NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID); if (!ret) return NULL; return delegstateid(ret); @@ -3852,6 +3910,12 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); if (deleg == NULL) goto out; + if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) { + nfs4_put_stid(&deleg->dl_stid); + if (cl->cl_minorversion) + status = nfserr_deleg_revoked; + goto out; + } flags = share_access_to_flags(open->op_share_access); status = nfs4_check_delegmode(deleg, flags); if (status) { @@ -4253,9 +4317,9 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; struct nfs4_file *fp = NULL; struct nfs4_ol_stateid *stp = NULL; - struct nfs4_ol_stateid *swapstp = NULL; struct nfs4_delegation *dp = NULL; __be32 status; + bool new_stp = false; /* * Lookup file; if found, lookup stateid and check open request, @@ -4267,9 +4331,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf status = nfs4_check_deleg(cl, open, &dp); if (status) goto out; - spin_lock(&fp->fi_lock); - stp = nfsd4_find_existing_open(fp, open); - spin_unlock(&fp->fi_lock); + stp = nfsd4_find_and_lock_existing_open(fp, open); } else { open->op_file = NULL; status = nfserr_bad_stateid; @@ -4277,41 +4339,31 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf goto out; } + if (!stp) { + stp = init_open_stateid(fp, open); + if (!open->op_stp) + new_stp = true; + } + /* * OPEN the file, or upgrade an existing OPEN. * If truncate fails, the OPEN fails. + * + * stp is already locked. */ - if (stp) { + if (!new_stp) { /* Stateid was found, this is an OPEN upgrade */ - mutex_lock(&stp->st_mutex); status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); if (status) { mutex_unlock(&stp->st_mutex); goto out; } } else { - stp = open->op_stp; - open->op_stp = NULL; - /* - * init_open_stateid() either returns a locked stateid - * it found, or initializes and locks the new one we passed in - */ - swapstp = init_open_stateid(stp, fp, open); - if (swapstp) { - nfs4_put_stid(&stp->st_stid); - stp = swapstp; - status = nfs4_upgrade_open(rqstp, fp, current_fh, - stp, open); - if (status) { - mutex_unlock(&stp->st_mutex); - goto out; - } - goto upgrade_out; - } status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); if (status) { - mutex_unlock(&stp->st_mutex); + stp->st_stid.sc_type = NFS4_CLOSED_STID; release_open_stateid(stp); + mutex_unlock(&stp->st_mutex); goto out; } @@ -4320,7 +4372,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf if (stp->st_clnt_odstate == open->op_odstate) open->op_odstate = NULL; } -upgrade_out: + nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); mutex_unlock(&stp->st_mutex); @@ -4696,6 +4748,16 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, struct nfs4_stid **s, struct nfsd_net *nn) { __be32 status; + bool return_revoked = false; + + /* + * only return revoked delegations if explicitly asked. + * otherwise we report revoked or bad_stateid status. + */ + if (typemask & NFS4_REVOKED_DELEG_STID) + return_revoked = true; + else if (typemask & NFS4_DELEG_STID) + typemask |= NFS4_REVOKED_DELEG_STID; if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) return nfserr_bad_stateid; @@ -4710,6 +4772,12 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, *s = find_stateid_by_type(cstate->clp, stateid, typemask); if (!*s) return nfserr_bad_stateid; + if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) { + nfs4_put_stid(*s); + if (cstate->minorversion) + return nfserr_deleg_revoked; + return nfserr_bad_stateid; + } return nfs_ok; } @@ -5130,7 +5198,6 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) bool unhashed; LIST_HEAD(reaplist); - s->st_stid.sc_type = NFS4_CLOSED_STID; spin_lock(&clp->cl_lock); unhashed = unhash_open_stateid(s, &reaplist); @@ -5169,10 +5236,12 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, nfsd4_bump_seqid(cstate, status); if (status) goto out; + + stp->st_stid.sc_type = NFS4_CLOSED_STID; nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); - mutex_unlock(&stp->st_mutex); nfsd4_close_open_stateid(stp); + mutex_unlock(&stp->st_mutex); /* put reference from nfs4_preprocess_seqid_op */ nfs4_put_stid(&stp->st_stid); diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 2f27c935bd57..34c22fe4eca0 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1945,8 +1945,6 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, "failed to get inode block.\n"); return err; } - mark_buffer_dirty(ibh); - nilfs_mdt_mark_dirty(ifile); spin_lock(&nilfs->ns_inode_lock); if (likely(!ii->i_bh)) ii->i_bh = ibh; @@ -1955,6 +1953,10 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, goto retry; } + // Always redirty the buffer to avoid race condition + mark_buffer_dirty(ii->i_bh); + nilfs_mdt_mark_dirty(ifile); + clear_bit(NILFS_I_QUEUED, &ii->i_state); set_bit(NILFS_I_BUSY, &ii->i_state); list_move_tail(&ii->i_dirty, &sci->sc_dirty_files); diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h index 13949259705a..0d4fe32b3ae2 100644 --- a/include/dt-bindings/pinctrl/omap.h +++ b/include/dt-bindings/pinctrl/omap.h @@ -45,8 +45,8 @@ #define PIN_OFF_NONE 0 #define PIN_OFF_OUTPUT_HIGH (OFF_EN | OFFOUT_EN | OFFOUT_VAL) #define PIN_OFF_OUTPUT_LOW (OFF_EN | OFFOUT_EN) -#define PIN_OFF_INPUT_PULLUP (OFF_EN | OFF_PULL_EN | OFF_PULL_UP) -#define PIN_OFF_INPUT_PULLDOWN (OFF_EN | OFF_PULL_EN) +#define PIN_OFF_INPUT_PULLUP (OFF_EN | OFFOUT_EN | OFF_PULL_EN | OFF_PULL_UP) +#define PIN_OFF_INPUT_PULLDOWN (OFF_EN | OFFOUT_EN | OFF_PULL_EN) #define PIN_OFF_WAKEUPENABLE WAKEUP_EN /* diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 89d9aa9e79bf..6fe974dbe741 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -234,12 +234,10 @@ static inline int block_page_mkwrite_return(int err) { if (err == 0) return VM_FAULT_LOCKED; - if (err == -EFAULT) + if (err == -EFAULT || err == -EAGAIN) return VM_FAULT_NOPAGE; if (err == -ENOMEM) return VM_FAULT_OOM; - if (err == -EAGAIN) - return VM_FAULT_RETRY; /* -ENOSPC, -EDQUOT, -EIO ... */ return VM_FAULT_SIGBUS; } diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index c2a975e4a711..fef1caeddf54 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -36,6 +36,8 @@ #define F2FS_NODE_INO(sbi) (sbi->node_ino_num) #define F2FS_META_INO(sbi) (sbi->meta_ino_num) +#define F2FS_MAX_QUOTAS 3 + #define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */ #define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */ #define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */ @@ -108,7 +110,8 @@ struct f2fs_super_block { __u8 encryption_level; /* versioning level for encryption */ __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */ struct f2fs_device devs[MAX_DEVICES]; /* device list */ - __u8 reserved[327]; /* valid reserved region */ + __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */ + __u8 reserved[315]; /* valid reserved region */ } __packed; /* @@ -184,7 +187,8 @@ struct f2fs_extent { } __packed; #define F2FS_NAME_LEN 255 -#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */ +/* 200 bytes for inline xattrs by default */ +#define DEFAULT_INLINE_XATTR_ADDRS 50 #define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */ #define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \ get_extra_isize(inode)) @@ -238,7 +242,7 @@ struct f2fs_inode { union { struct { __le16 i_extra_isize; /* extra inode attribute size */ - __le16 i_padding; /* padding */ + __le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */ __le32 i_projid; /* project id */ __le32 i_inode_checksum;/* inode meta checksum */ __le32 i_extra_end[0]; /* for attribute size calculation */ diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index ee971f335a8b..7118876e9896 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -128,6 +128,8 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) #define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) #define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE) #define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY) +#define IN_DEV_NF_IPV4_DEFRAG_SKIP(in_dev) \ + IN_DEV_ORCONF((in_dev), NF_IPV4_DEFRAG_SKIP) struct in_ifaddr { struct hlist_node hash; diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 50220cab738c..05b63a1e9f84 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -53,6 +53,13 @@ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) +#define u64_to_user_ptr(x) ( \ +{ \ + typecheck(u64, x); \ + (void __user *)(uintptr_t)x; \ +} \ +) + /* * This looks more complex than it should be. But we need to * get the type for the ~ right in round_down (it needs to be diff --git a/include/linux/mm.h b/include/linux/mm.h index 7d0b5e7bcadb..b4a5021fbbfa 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -71,6 +71,10 @@ extern int mmap_rnd_compat_bits __read_mostly; #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) #endif +#ifndef lm_alias +#define lm_alias(x) __va(__pa_symbol(x)) +#endif + /* * To prevent common memory management code establishing * a zero page mapping on a read fault. diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 2b1be7efde55..721bdb0226bd 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -715,7 +715,8 @@ typedef struct pglist_data { * is the first PFN that needs to be initialised. */ unsigned long first_deferred_pfn; - unsigned long static_init_size; + /* Number of non-deferred pages */ + unsigned long static_init_pgcnt; #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ } pg_data_t; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0a306b431ece..c77de3b5f564 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3473,6 +3473,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *), unsigned int txqs, unsigned int rxqs); +int dev_get_valid_name(struct net *net, struct net_device *dev, + const char *name); + #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 639e9b8b0e4d..0b41959aab9f 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -131,6 +131,7 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask) struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; + int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff * skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); @@ -153,6 +154,7 @@ struct nlmsghdr * __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags); struct netlink_dump_control { + int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *skb, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; diff --git a/include/linux/phy.h b/include/linux/phy.h index b64825d6ad26..5bc4b9d563a9 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -136,11 +136,7 @@ static inline const char *phy_modes(phy_interface_t interface) /* Used when trying to connect to a specific phy (mii bus id:phy device id) */ #define PHY_ID_FMT "%s:%02x" -/* - * Need to be a little smaller than phydev->dev.bus_id to leave room - * for the ":%02x" - */ -#define MII_BUS_ID_SIZE (20 - 3) +#define MII_BUS_ID_SIZE 61 /* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */ @@ -599,7 +595,7 @@ struct phy_driver { /* A Structure for boards to register fixups with the PHY Lib */ struct phy_fixup { struct list_head list; - char bus_id[20]; + char bus_id[MII_BUS_ID_SIZE + 3]; u32 phy_uid; u32 phy_uid_mask; int (*run)(struct phy_device *phydev); diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 75e4e30677f1..7eeceac52dea 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -65,19 +65,24 @@ /* * Are we doing bottom half or hardware interrupt processing? - * Are we in a softirq context? Interrupt context? - * in_softirq - Are we currently processing softirq or have bh disabled? - * in_serving_softirq - Are we currently processing softirq? + * + * in_irq() - We're in (hard) IRQ context + * in_softirq() - We have BH disabled, or are processing softirqs + * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled + * in_serving_softirq() - We're in softirq context + * in_nmi() - We're in NMI context + * in_task() - We're in task context + * + * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really + * should not be used in new code. */ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) - -/* - * Are we in NMI context? - */ -#define in_nmi() (preempt_count() & NMI_MASK) +#define in_nmi() (preempt_count() & NMI_MASK) +#define in_task() (!(preempt_count() & \ + (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) /* * The preempt_count offset after preempt_disable(); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3f61c647fc5c..b5421f6f155a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3400,6 +3400,13 @@ static inline void nf_reset_trace(struct sk_buff *skb) #endif } +static inline void ipvs_reset(struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_IP_VS) + skb->ipvs_property = 0; +#endif +} + /* Note: This doesn't put any conntrack and bridge info in dst. */ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, bool copy) diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h new file mode 100644 index 000000000000..0f175b8f6456 --- /dev/null +++ b/include/linux/tee_drv.h @@ -0,0 +1,277 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __TEE_DRV_H +#define __TEE_DRV_H + +#include <linux/types.h> +#include <linux/idr.h> +#include <linux/list.h> +#include <linux/tee.h> + +/* + * The file describes the API provided by the generic TEE driver to the + * specific TEE driver. + */ + +#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */ +#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */ + +struct tee_device; +struct tee_shm; +struct tee_shm_pool; + +/** + * struct tee_context - driver specific context on file pointer data + * @teedev: pointer to this drivers struct tee_device + * @list_shm: List of shared memory object owned by this context + * @data: driver specific context data, managed by the driver + */ +struct tee_context { + struct tee_device *teedev; + struct list_head list_shm; + void *data; +}; + +struct tee_param_memref { + size_t shm_offs; + size_t size; + struct tee_shm *shm; +}; + +struct tee_param_value { + u64 a; + u64 b; + u64 c; +}; + +struct tee_param { + u64 attr; + union { + struct tee_param_memref memref; + struct tee_param_value value; + } u; +}; + +/** + * struct tee_driver_ops - driver operations vtable + * @get_version: returns version of driver + * @open: called when the device file is opened + * @release: release this open file + * @open_session: open a new session + * @close_session: close a session + * @invoke_func: invoke a trusted function + * @cancel_req: request cancel of an ongoing invoke or open + * @supp_revc: called for supplicant to get a command + * @supp_send: called for supplicant to send a response + */ +struct tee_driver_ops { + void (*get_version)(struct tee_device *teedev, + struct tee_ioctl_version_data *vers); + int (*open)(struct tee_context *ctx); + void (*release)(struct tee_context *ctx); + int (*open_session)(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param); + int (*close_session)(struct tee_context *ctx, u32 session); + int (*invoke_func)(struct tee_context *ctx, + struct tee_ioctl_invoke_arg *arg, + struct tee_param *param); + int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session); + int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params, + struct tee_param *param); + int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params, + struct tee_param *param); +}; + +/** + * struct tee_desc - Describes the TEE driver to the subsystem + * @name: name of driver + * @ops: driver operations vtable + * @owner: module providing the driver + * @flags: Extra properties of driver, defined by TEE_DESC_* below + */ +#define TEE_DESC_PRIVILEGED 0x1 +struct tee_desc { + const char *name; + const struct tee_driver_ops *ops; + struct module *owner; + u32 flags; +}; + +/** + * tee_device_alloc() - Allocate a new struct tee_device instance + * @teedesc: Descriptor for this driver + * @dev: Parent device for this device + * @pool: Shared memory pool, NULL if not used + * @driver_data: Private driver data for this device + * + * Allocates a new struct tee_device instance. The device is + * removed by tee_device_unregister(). + * + * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure + */ +struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, + struct device *dev, + struct tee_shm_pool *pool, + void *driver_data); + +/** + * tee_device_register() - Registers a TEE device + * @teedev: Device to register + * + * tee_device_unregister() need to be called to remove the @teedev if + * this function fails. + * + * @returns < 0 on failure + */ +int tee_device_register(struct tee_device *teedev); + +/** + * tee_device_unregister() - Removes a TEE device + * @teedev: Device to unregister + * + * This function should be called to remove the @teedev even if + * tee_device_register() hasn't been called yet. Does nothing if + * @teedev is NULL. + */ +void tee_device_unregister(struct tee_device *teedev); + +/** + * struct tee_shm_pool_mem_info - holds information needed to create a shared + * memory pool + * @vaddr: Virtual address of start of pool + * @paddr: Physical address of start of pool + * @size: Size in bytes of the pool + */ +struct tee_shm_pool_mem_info { + unsigned long vaddr; + phys_addr_t paddr; + size_t size; +}; + +/** + * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved + * memory range + * @priv_info: Information for driver private shared memory pool + * @dmabuf_info: Information for dma-buf shared memory pool + * + * Start and end of pools will must be page aligned. + * + * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied + * in @dmabuf, others will use the range provided by @priv. + * + * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. + */ +struct tee_shm_pool * +tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, + struct tee_shm_pool_mem_info *dmabuf_info); + +/** + * tee_shm_pool_free() - Free a shared memory pool + * @pool: The shared memory pool to free + * + * The must be no remaining shared memory allocated from this pool when + * this function is called. + */ +void tee_shm_pool_free(struct tee_shm_pool *pool); + +/** + * tee_get_drvdata() - Return driver_data pointer + * @returns the driver_data pointer supplied to tee_register(). + */ +void *tee_get_drvdata(struct tee_device *teedev); + +/** + * tee_shm_alloc() - Allocate shared memory + * @ctx: Context that allocates the shared memory + * @size: Requested size of shared memory + * @flags: Flags setting properties for the requested shared memory. + * + * Memory allocated as global shared memory is automatically freed when the + * TEE file pointer is closed. The @flags field uses the bits defined by + * TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If + * TEE_SHM_DMA_BUF global shared memory will be allocated and associated + * with a dma-buf handle, else driver private memory. + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); + +/** + * tee_shm_free() - Free shared memory + * @shm: Handle to shared memory to free + */ +void tee_shm_free(struct tee_shm *shm); + +/** + * tee_shm_put() - Decrease reference count on a shared memory handle + * @shm: Shared memory handle + */ +void tee_shm_put(struct tee_shm *shm); + +/** + * tee_shm_va2pa() - Get physical address of a virtual address + * @shm: Shared memory handle + * @va: Virtual address to tranlsate + * @pa: Returned physical address + * @returns 0 on success and < 0 on failure + */ +int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa); + +/** + * tee_shm_pa2va() - Get virtual address of a physical address + * @shm: Shared memory handle + * @pa: Physical address to tranlsate + * @va: Returned virtual address + * @returns 0 on success and < 0 on failure + */ +int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va); + +/** + * tee_shm_get_va() - Get virtual address of a shared memory plus an offset + * @shm: Shared memory handle + * @offs: Offset from start of this shared memory + * @returns virtual address of the shared memory + offs if offs is within + * the bounds of this shared memory, else an ERR_PTR + */ +void *tee_shm_get_va(struct tee_shm *shm, size_t offs); + +/** + * tee_shm_get_pa() - Get physical address of a shared memory plus an offset + * @shm: Shared memory handle + * @offs: Offset from start of this shared memory + * @pa: Physical address to return + * @returns 0 if offs is within the bounds of this shared memory, else an + * error code. + */ +int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa); + +/** + * tee_shm_get_id() - Get id of a shared memory object + * @shm: Shared memory handle + * @returns id + */ +int tee_shm_get_id(struct tee_shm *shm); + +/** + * tee_shm_get_from_id() - Find shared memory object and increase reference + * count + * @ctx: Context owning the shared memory + * @id: Id of shared memory object + * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure + */ +struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); + +#endif /*__TEE_DRV_H*/ diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index f0f1793cfa49..3a5af09af18b 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -50,13 +50,13 @@ struct tk_read_base { * @tai_offset: The current UTC to TAI offset in seconds * @clock_was_set_seq: The sequence number of clock was set events * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second - * @raw_time: Monotonic raw base time in timespec64 format + * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds * @cycle_interval: Number of clock cycles in one NTP interval * @xtime_interval: Number of clock shifted nano seconds in one NTP * interval. * @xtime_remainder: Shifted nano seconds left over when rounding * @cycle_interval - * @raw_interval: Raw nano seconds accumulated per NTP interval. + * @raw_interval: Shifted raw nano seconds accumulated per NTP interval. * @ntp_error: Difference between accumulated time and NTP time in ntp * shifted nano seconds. * @ntp_error_shift: Shift conversion between clock shifted nano seconds and @@ -91,13 +91,13 @@ struct timekeeper { s32 tai_offset; unsigned int clock_was_set_seq; ktime_t next_leap_ktime; - struct timespec64 raw_time; + u64 raw_sec; /* The following members are for timekeeping internal use */ cycle_t cycle_interval; u64 xtime_interval; s64 xtime_remainder; - u32 raw_interval; + u64 raw_interval; /* The ntp_tick_length() value currently being used. * This cached copy ensures we consistently apply the tick * length for an entire tick, as ntp_tick_length may change diff --git a/include/linux/usb.h b/include/linux/usb.h index 83a505c749e1..1821d34c24a5 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -330,6 +330,7 @@ struct usb_host_bos { struct usb_ss_cap_descriptor *ss_cap; struct usb_ssp_cap_descriptor *ssp_cap; struct usb_ss_container_id_descriptor *ss_id; + struct usb_ptm_cap_descriptor *ptm_cap; struct usb_config_summary_descriptor *config_summary; unsigned int num_config_summary_desc; }; diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 3a375d07d0dc..6670e9b34f20 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -82,6 +82,7 @@ /* Driver flags */ #define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */ +#define CDC_NCM_FLAG_RESET_NTB16 0x08 /* set NDP16 one more time after altsetting switch */ #define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 1b6b6dcb018d..43c0e771f417 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -114,6 +114,7 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net) * @flags: flags * @policy: attribute validation policy * @doit: standard command callback + * @start: start callback for dumps * @dumpit: callback for dumpers * @done: completion callback for dumps * @ops_list: operations list @@ -122,6 +123,7 @@ struct genl_ops { const struct nla_policy *policy; int (*doit)(struct sk_buff *skb, struct genl_info *info); + int (*start)(struct netlink_callback *cb); int (*dumpit)(struct sk_buff *skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 625bdf95d673..95aa999f31d7 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -95,7 +95,7 @@ struct inet_request_sock { kmemcheck_bitfield_end(flags); u32 ir_mark; union { - struct ip_options_rcu *opt; + struct ip_options_rcu __rcu *ireq_opt; struct sk_buff *pktopts; }; }; @@ -113,6 +113,12 @@ static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) return sk->sk_mark; } +static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq) +{ + return rcu_dereference_check(ireq->ireq_opt, + atomic_read(&ireq->req.rsk_refcnt) > 0); +} + struct inet_cork { unsigned int flags; __be32 addr; diff --git a/include/net/tcp.h b/include/net/tcp.h index 52402ab90c57..340b01dd8c37 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1629,12 +1629,12 @@ static inline void tcp_highest_sack_reset(struct sock *sk) tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk); } -/* Called when old skb is about to be deleted (to be combined with new skb) */ -static inline void tcp_highest_sack_combine(struct sock *sk, +/* Called when old skb is about to be deleted and replaced by new skb */ +static inline void tcp_highest_sack_replace(struct sock *sk, struct sk_buff *old, struct sk_buff *new) { - if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack)) + if (old == tcp_highest_sack(sk)) tcp_sk(sk)->highest_sack = new; } diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h index 6819af659768..ca2ceff39f2f 100644 --- a/include/sound/apr_audio-v2.h +++ b/include/sound/apr_audio-v2.h @@ -501,70 +501,36 @@ struct adm_cmd_device_open_v6 { /* Sets one or more parameters to a COPP. */ #define ADM_CMD_SET_PP_PARAMS_V5 0x00010328 +#define ADM_CMD_SET_PP_PARAMS_V6 0x0001035D -/* Payload of the #ADM_CMD_SET_PP_PARAMS_V5 command. - * If the data_payload_addr_lsw and data_payload_addr_msw element - * are NULL, a series of adm_param_datastructures immediately - * follows, whose total size is data_payload_size bytes. +/* + * Structure of the ADM Set PP Params command. Parameter data must be + * pre-packed with correct header for either V2 or V3 when sent in-band. + * Use q6core_pack_pp_params to pack the header and data correctly depending on + * Instance ID support. */ -struct adm_cmd_set_pp_params_v5 { - struct apr_hdr hdr; - u32 payload_addr_lsw; - /* LSW of parameter data payload address.*/ - u32 payload_addr_msw; - /* MSW of parameter data payload address.*/ +struct adm_cmd_set_pp_params { + /* APR Header */ + struct apr_hdr apr_hdr; - u32 mem_map_handle; -/* Memory map handle returned by ADM_CMD_SHARED_MEM_MAP_REGIONS - * command */ -/* If mem_map_handle is zero implies the message is in - * the payload */ + /* The memory mapping header to be used when sending out of band */ + struct mem_mapping_hdr mem_hdr; - u32 payload_size; -/* Size in bytes of the variable payload accompanying this - * message or - * in shared memory. This is used for parsing the parameter - * payload. - */ -} __packed; - -/* Payload format for COPP parameter data. - * Immediately following this structure are param_size bytes - * of parameter - * data. - */ -struct adm_param_data_v5 { - u32 module_id; - /* Unique ID of the module. */ - u32 param_id; - /* Unique ID of the parameter. */ - u16 param_size; - /* Data size of the param_id/module_id combination. - This value is a - multiple of 4 bytes. */ - u16 reserved; - /* Reserved for future enhancements. - * This field must be set to zero. + /* Size in bytes of the variable payload accompanying this + * message or + * in shared memory. This is used for parsing the parameter + * payload. */ -} __packed; - - -struct param_data_v6 { - /* Unique ID of the module. */ - u32 module_id; - /* Unique ID of the instance. */ - u16 instance_id; - /* Reserved for future enhancements. - * This field must be set to zero. + u32 payload_size; + + /* Parameter data for in band payload. This should be structured as the + * parameter header immediately followed by the parameter data. Multiple + * parameters can be set in one command by repeating the header followed + * by the data for as many parameters as need to be set. + * Use q6core_pack_pp_params to pack the header and data correctly + * depending on Instance ID support. */ - u16 reserved; - /* Unique ID of the parameter. */ - u32 param_id; - /* Data size of the param_id/module_id combination. - * This value is a - * multiple of 4 bytes. - */ - u32 param_size; + u8 param_data[0]; } __packed; /* ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1 command is used to set @@ -582,7 +548,7 @@ struct param_data_v6 { /* Payload of the #define ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1 command. * If the data_payload_addr_lsw and data_payload_addr_msw element - * are NULL, a series of struct param_data_v6 structures immediately + * are NULL, a series of struct param_hdr_v3 structures immediately * follows, whose total size is payload_size bytes. */ struct adm_cmd_set_mtmx_params_v1 { @@ -619,7 +585,7 @@ struct enable_param_v6 { * This parameter is generic/common parameter to configure or * determine the state of any audio processing module. */ - struct param_data_v6 param; + struct param_hdr_v3 param; /* @values 0 : Disable 1: Enable */ uint32_t enable; @@ -672,25 +638,6 @@ struct adm_cmd_set_pspd_mtmx_strtr_params_v5 { u16 reserved; } __packed; -/* Defined specifically for in-band use, includes params */ -struct adm_cmd_set_pp_params_inband_v5 { - struct apr_hdr hdr; - /* LSW of parameter data payload address.*/ - u32 payload_addr_lsw; - /* MSW of parameter data payload address.*/ - u32 payload_addr_msw; - /* Memory map handle returned by ADM_CMD_SHARED_MEM_MAP_REGIONS */ - /* command. If mem_map_handle is zero implies the message is in */ - /* the payload */ - u32 mem_map_handle; - /* Size in bytes of the variable payload accompanying this */ - /* message or in shared memory. This is used for parsing the */ - /* parameter payload. */ - u32 payload_size; - /* Parameters passed for in band payload */ - struct adm_param_data_v5 params; -} __packed; - /* Returns the status and COPP ID to an #ADM_CMD_DEVICE_OPEN_V5 command. */ #define ADM_CMDRSP_DEVICE_OPEN_V5 0x00010329 @@ -723,44 +670,21 @@ struct adm_cmd_rsp_device_open_v5 { /* This command allows a query of one COPP parameter. */ #define ADM_CMD_GET_PP_PARAMS_V5 0x0001032A +#define ADM_CMD_GET_PP_PARAMS_V6 0x0001035E -/* Payload an #ADM_CMD_GET_PP_PARAMS_V5 command. -*/ -struct adm_cmd_get_pp_params_v5 { - struct apr_hdr hdr; - u32 data_payload_addr_lsw; - /* LSW of parameter data payload address.*/ - - u32 data_payload_addr_msw; - /* MSW of parameter data payload address.*/ - - /* If the mem_map_handle is non zero, - * on ACK, the ParamData payloads begin at - * the address specified (out-of-band). - */ - - u32 mem_map_handle; - /* Memory map handle returned - * by ADM_CMD_SHARED_MEM_MAP_REGIONS command. - * If the mem_map_handle is 0, it implies that - * the ACK's payload will contain the ParamData (in-band). - */ - - u32 module_id; - /* Unique ID of the module. */ +/* + * Structure of the ADM Get PP Params command. Parameter header must be + * packed correctly for either V2 or V3. Use q6core_pack_pp_params to pack the + * header correctly depending on Instance ID support. + */ +struct adm_cmd_get_pp_params { + struct apr_hdr apr_hdr; - u32 param_id; - /* Unique ID of the parameter. */ + /* The memory mapping header to be used when requesting outband */ + struct mem_mapping_hdr mem_hdr; - u16 param_max_size; - /* Maximum data size of the parameter - *ID/module ID combination. This - * field is a multiple of 4 bytes. - */ - u16 reserved; - /* Reserved for future enhancements. - * This field must be set to zero. - */ + /* Parameter header for in band payload. */ + union param_hdrs param_hdr; } __packed; /* Returns parameter values @@ -772,15 +696,48 @@ struct adm_cmd_get_pp_params_v5 { * which returns parameter values in response * to an #ADM_CMD_GET_PP_PARAMS_V5 command. * Immediately following this - * structure is the adm_param_data_v5 + * structure is the param_hdr_v1 * structure containing the pre/postprocessing * parameter data. For an in-band * scenario, the variable payload depends * on the size of the parameter. */ struct adm_cmd_rsp_get_pp_params_v5 { - u32 status; /* Status message (error code).*/ + u32 status; + + /* The header that identifies the subsequent parameter data */ + struct param_hdr_v1 param_hdr; + + /* The parameter data returned */ + u32 param_data[0]; +} __packed; + +/* + * Returns parameter values in response to an #ADM_CMD_GET_PP_PARAMS_V5/6 + * command. + */ +#define ADM_CMDRSP_GET_PP_PARAMS_V6 0x0001035F + +/* Payload of the #ADM_CMDRSP_GET_PP_PARAMS_V6 message, + * which returns parameter values in response + * to an #ADM_CMD_GET_PP_PARAMS_V6 command. + * Immediately following this + * structure is the param_hdr_v3 + * structure containing the pre/postprocessing + * parameter data. For an in-band + * scenario, the variable payload depends + * on the size of the parameter. +*/ +struct adm_cmd_rsp_get_pp_params_v6 { + /* Status message (error code).*/ + u32 status; + + /* The header that identifies the subsequent parameter data */ + struct param_hdr_v3 param_hdr; + + /* The parameter data returned */ + u32 param_data[0]; } __packed; /* Structure for holding soft stepping volume parameters. */ @@ -833,9 +790,29 @@ struct adm_pspd_param_data_t { uint16_t reserved; } __packed; -struct audproc_mfc_output_media_fmt { - struct adm_cmd_set_pp_params_v5 params; - struct adm_param_data_v5 data; +struct adm_cmd_set_pp_params_v5 { + struct apr_hdr hdr; + u32 payload_addr_lsw; + /* LSW of parameter data payload address.*/ + u32 payload_addr_msw; + /* MSW of parameter data payload address.*/ + + u32 mem_map_handle; + /* Memory map handle returned by ADM_CMD_SHARED_MEM_MAP_REGIONS + * command. + * If mem_map_handle is zero implies the message is in + * the payload + */ + + u32 payload_size; + /* Size in bytes of the variable payload accompanying this + * message or + * in shared memory. This is used for parsing the parameter + * payload. + */ +} __packed; + +struct audproc_mfc_param_media_fmt { uint32_t sampling_rate; uint16_t bits_per_sample; uint16_t num_channels; @@ -843,8 +820,6 @@ struct audproc_mfc_output_media_fmt { } __packed; struct audproc_volume_ctrl_master_gain { - struct adm_cmd_set_pp_params_v5 params; - struct adm_param_data_v5 data; /* Linear gain in Q13 format. */ uint16_t master_gain; /* Clients must set this field to zero. */ @@ -852,8 +827,6 @@ struct audproc_volume_ctrl_master_gain { } __packed; struct audproc_soft_step_volume_params { - struct adm_cmd_set_pp_params_v5 params; - struct adm_param_data_v5 data; /* * Period in milliseconds. * Supported values: 0 to 15000 @@ -875,7 +848,6 @@ struct audproc_soft_step_volume_params { } __packed; struct audproc_enable_param_t { - struct adm_cmd_set_pp_params_inband_v5 pp_params; /* * Specifies whether the Audio processing module is enabled. * This parameter is generic/common parameter to configure or @@ -1599,87 +1571,136 @@ struct afe_sidetone_iir_filter_config_params { #define AFE_MODULE_LOOPBACK 0x00010205 #define AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH 0x00010206 -/* Payload of the #AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH parameter, - * which gets/sets loopback gain of a port to an Rx port. - * The Tx port ID of the loopback is part of the set_param command. - */ +/* Used by RTAC */ +struct afe_rtac_user_data_set_v2 { + /* Port interface and direction (Rx or Tx) to start. */ + u16 port_id; -/* Payload of the #AFE_PORT_CMD_SET_PARAM_V2 command's - * configuration/calibration settings for the AFE port. - */ -struct afe_port_cmd_set_param_v2 { + /* Actual size of the payload in bytes. + * This is used for parsing the parameter payload. + * Supported values: > 0 + */ + u16 payload_size; + + /* The header detailing the memory mapping for out of band. */ + struct mem_mapping_hdr mem_hdr; + + /* The parameter header for the parameter data to set */ + struct param_hdr_v1 param_hdr; + + /* The parameter data to be filled when sent inband */ + u32 *param_data; +} __packed; + +struct afe_rtac_user_data_set_v3 { + /* Port interface and direction (Rx or Tx) to start. */ + u16 port_id; + /* Reserved for future enhancements. Must be 0. */ + u16 reserved; + + /* The header detailing the memory mapping for out of band. */ + struct mem_mapping_hdr mem_hdr; + + /* The size of the parameter header and parameter data */ + u32 payload_size; + + /* The parameter header for the parameter data to set */ + struct param_hdr_v3 param_hdr; + + /* The parameter data to be filled when sent inband */ + u32 *param_data; +} __packed; + +struct afe_rtac_user_data_get_v2 { + /* Port interface and direction (Rx or Tx) to start. */ u16 port_id; -/* Port interface and direction (Rx or Tx) to start. - */ + /* Actual size of the payload in bytes. + * This is used for parsing the parameter payload. + * Supported values: > 0 + */ u16 payload_size; -/* Actual size of the payload in bytes. - * This is used for parsing the parameter payload. - * Supported values: > 0 - */ -u32 payload_address_lsw; -/* LSW of 64 bit Payload address. - * Address should be 32-byte, - * 4kbyte aligned and must be contiguous memory. - */ + /* The header detailing the memory mapping for out of band. */ + struct mem_mapping_hdr mem_hdr; -u32 payload_address_msw; -/* MSW of 64 bit Payload address. - * In case of 32-bit shared memory address, - * this field must be set to zero. - * In case of 36-bit shared memory address, - * bit-4 to bit-31 must be set to zero. - * Address should be 32-byte, 4kbyte aligned - * and must be contiguous memory. - */ + /* The module ID of the parameter to get */ + u32 module_id; -u32 mem_map_handle; -/* Memory map handle returned by - * AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS commands. - * Supported Values: - * - NULL -- Message. The parameter data is in-band. - * - Non-NULL -- The parameter data is Out-band.Pointer to - * the physical address - * in shared memory of the payload data. - * An optional field is available if parameter - * data is in-band: - * afe_param_data_v2 param_data[...]. - * For detailed payload content, see the - * afe_port_param_data_v2 structure. - */ + /* The parameter ID of the parameter to get */ + u32 param_id; + + /* The parameter data to be filled when sent inband */ + struct param_hdr_v1 param_hdr; } __packed; +struct afe_rtac_user_data_get_v3 { + /* Port interface and direction (Rx or Tx) to start. */ + u16 port_id; + /* Reserved for future enhancements. Must be 0. */ + u16 reserved; + + /* The header detailing the memory mapping for out of band. */ + struct mem_mapping_hdr mem_hdr; + + /* The parameter data to be filled when sent inband */ + struct param_hdr_v3 param_hdr; +} __packed; #define AFE_PORT_CMD_SET_PARAM_V2 0x000100EF +struct afe_port_cmd_set_param_v2 { + /* APR Header */ + struct apr_hdr apr_hdr; -struct afe_port_param_data_v2 { - u32 module_id; -/* ID of the module to be configured. - * Supported values: Valid module ID - */ + /* Port interface and direction (Rx or Tx) to start. */ + u16 port_id; -u32 param_id; -/* ID of the parameter corresponding to the supported parameters - * for the module ID. - * Supported values: Valid parameter ID - */ + /* + * Actual size of the payload in bytes. + * This is used for parsing the parameter payload. + * Supported values: > 0 + */ + u16 payload_size; -u16 param_size; -/* Actual size of the data for the - * module_id/param_id pair. The size is a - * multiple of four bytes. - * Supported values: > 0 - */ + /* The header detailing the memory mapping for out of band. */ + struct mem_mapping_hdr mem_hdr; -u16 reserved; -/* This field must be set to zero. - */ + /* The parameter data to be filled when sent inband */ + u8 param_data[0]; } __packed; +#define AFE_PORT_CMD_SET_PARAM_V3 0x000100FA +struct afe_port_cmd_set_param_v3 { + /* APR Header */ + struct apr_hdr apr_hdr; + + /* Port ID of the AFE port to configure. Port interface and direction + * (Rx or Tx) to configure. An even number represents the Rx direction, + * and an odd number represents the Tx direction. + */ + u16 port_id; + + /* Reserved. This field must be set to zero. */ + u16 reserved; + + /* The memory mapping header to be used when sending outband */ + struct mem_mapping_hdr mem_hdr; + + /* The total size of the payload, including param_hdr_v3 */ + u32 payload_size; + + /* + * The parameter data to be filled when sent inband. + * Must include param_hdr packed correctly. + */ + u8 param_data[0]; +} __packed; + +/* Payload of the #AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH parameter, + * which gets/sets loopback gain of a port to an Rx port. + * The Tx port ID of the loopback is part of the set_param command. + */ + struct afe_loopback_gain_per_path_param { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; u16 rx_port_id; /* Rx port of the loopback. */ @@ -1715,9 +1736,6 @@ enum afe_loopback_routing_mode { * which enables/disables one AFE loopback. */ struct afe_loopback_cfg_v1 { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; u32 loopback_cfg_minor_version; /* Minor version used for tracking the version of the RMC module * configuration interface. @@ -1779,19 +1797,19 @@ struct loopback_cfg_data { struct afe_st_loopback_cfg_v1 { struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 gain_pdata; + struct mem_mapping_hdr mem_hdr; + struct param_hdr_v1 gain_pdata; struct afe_loopback_sidetone_gain gain_data; - struct afe_port_param_data_v2 cfg_pdata; + struct param_hdr_v1 cfg_pdata; struct loopback_cfg_data cfg_data; } __packed; struct afe_loopback_iir_cfg_v2 { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 st_iir_enable_pdata; - struct afe_mod_enable_param st_iir_mode_enable_data; - struct afe_port_param_data_v2 st_iir_filter_config_pdata; + struct apr_hdr hdr; + struct mem_mapping_hdr param; + struct param_hdr_v1 st_iir_enable_pdata; + struct afe_mod_enable_param st_iir_mode_enable_data; + struct param_hdr_v1 st_iir_filter_config_pdata; struct afe_sidetone_iir_filter_config_params st_iir_filter_config_data; } __packed; #define AFE_MODULE_SPEAKER_PROTECTION 0x00010209 @@ -2243,20 +2261,6 @@ struct afe_param_id_spdif_clk_cfg { */ } __packed; -struct afe_spdif_clk_config_command { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; - struct afe_param_id_spdif_clk_cfg clk_cfg; -} __packed; - -struct afe_spdif_chstatus_config_command { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; - struct afe_param_id_spdif_ch_status_cfg ch_status; -} __packed; - struct afe_spdif_port_config { struct afe_param_id_spdif_cfg cfg; struct afe_param_id_spdif_ch_status_cfg ch_status; @@ -2782,16 +2786,6 @@ struct afe_param_id_usb_audio_cfg { u32 endian; } __packed; -struct afe_usb_audio_dev_param_command { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; - union { - struct afe_param_id_usb_audio_dev_params usb_dev; - struct afe_param_id_usb_audio_dev_lpcm_fmt lpcm_fmt; - }; -} __packed; - /* * This param id is used to configure Real Time Proxy interface. */ @@ -3186,20 +3180,6 @@ struct afe_param_id_custom_tdm_header_cfg { uint16_t header7; Reserved Info[3] - Bitrate[kbps] - Low Byte -> 0x0 */ } __packed; -struct afe_slot_mapping_config_command { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; - struct afe_param_id_slot_mapping_cfg slot_mapping; -} __packed; - -struct afe_custom_tdm_header_config_command { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; - struct afe_param_id_custom_tdm_header_cfg custom_tdm_header; -} __packed; - struct afe_tdm_port_config { struct afe_param_id_tdm_cfg tdm; struct afe_param_id_slot_mapping_cfg slot_mapping; @@ -3576,18 +3556,6 @@ union afe_port_config { struct avs_enc_packetizer_id_param_t enc_pkt_id_param; } __packed; -struct afe_audioif_config_command_no_payload { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; -} __packed; - -struct afe_audioif_config_command { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; - union afe_port_config port; -} __packed; - #define AFE_PORT_CMD_DEVICE_START 0x000100E5 /* Payload of the #AFE_PORT_CMD_DEVICE_START.*/ @@ -3750,13 +3718,8 @@ u32 mem_map_handle; */ } __packed; -#define AFE_PORT_CMD_GET_PARAM_V2 0x000100F0 - -/* Payload of the #AFE_PORT_CMD_GET_PARAM_V2 command, - * which queries for one post/preprocessing parameter of a - * stream. - */ -struct afe_port_cmd_get_param_v2 { +/* Used by RTAC */ +struct afe_rtac_get_param_v2 { u16 port_id; /* Port interface and direction (Rx or Tx) to start. */ @@ -3802,6 +3765,37 @@ struct afe_port_cmd_get_param_v2 { */ } __packed; +#define AFE_PORT_CMD_GET_PARAM_V2 0x000100F0 + +/* Payload of the #AFE_PORT_CMD_GET_PARAM_V2 command, + * which queries for one post/preprocessing parameter of a + * stream. + */ +struct afe_port_cmd_get_param_v2 { + struct apr_hdr apr_hdr; + + /* Port interface and direction (Rx or Tx) to start. */ + u16 port_id; + + /* Maximum data size of the parameter ID/module ID combination. + * This is a multiple of four bytes + * Supported values: > 0 + */ + u16 payload_size; + + /* The memory mapping header to be used when requesting outband */ + struct mem_mapping_hdr mem_hdr; + + /* The module ID of the parameter data requested */ + u32 module_id; + + /* The parameter ID of the parameter data requested */ + u32 param_id; + + /* The header information for the parameter data */ + struct param_hdr_v1 param_hdr; +} __packed; + #define AFE_PORT_CMDRSP_GET_PARAM_V2 0x00010106 /* Payload of the #AFE_PORT_CMDRSP_GET_PARAM_V2 message, which @@ -3817,6 +3811,41 @@ struct afe_port_cmd_get_param_v2 { struct afe_port_cmdrsp_get_param_v2 { u32 status; + struct param_hdr_v1 param_hdr; + u8 param_data[0]; +} __packed; + +#define AFE_PORT_CMD_GET_PARAM_V3 0x000100FB +struct afe_port_cmd_get_param_v3 { + /* APR Header */ + struct apr_hdr apr_hdr; + + /* Port ID of the AFE port to configure. Port interface and direction + * (Rx or Tx) to configure. An even number represents the Rx direction, + * and an odd number represents the Tx direction. + */ + u16 port_id; + + /* Reserved. This field must be set to zero. */ + u16 reserved; + + /* The memory mapping header to be used when requesting outband */ + struct mem_mapping_hdr mem_hdr; + + /* The header information for the parameter data */ + struct param_hdr_v3 param_hdr; +} __packed; + +#define AFE_PORT_CMDRSP_GET_PARAM_V3 0x00010108 +struct afe_port_cmdrsp_get_param_v3 { + /* The status of the command */ + uint32_t status; + + /* The header information for the parameter data */ + struct param_hdr_v3 param_hdr; + + /* The parameter data to be filled when sent inband */ + u8 param_data[0]; } __packed; #define AFE_PARAM_ID_LPASS_CORE_SHARED_CLOCK_CONFIG 0x0001028C @@ -3838,13 +3867,6 @@ struct afe_param_id_lpass_core_shared_clk_cfg { */ } __packed; -struct afe_lpass_core_shared_clk_config_command { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; - struct afe_param_id_lpass_core_shared_clk_cfg clk_cfg; -} __packed; - /* adsp_afe_service_commands.h */ #define ADSP_MEMORY_MAP_EBI_POOL 0 @@ -6484,59 +6506,33 @@ struct asm_stream_cmd_open_transcode_loopback_t { #define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09 #define ASM_STREAM_CMD_SET_PP_PARAMS_V2 0x00010DA1 +#define ASM_STREAM_CMD_SET_PP_PARAMS_V3 0x0001320D -struct asm_stream_cmd_set_pp_params_v2 { - u32 data_payload_addr_lsw; -/* LSW of parameter data payload address. Supported values: any. */ - u32 data_payload_addr_msw; -/* MSW of Parameter data payload address. Supported values: any. - * - Must be set to zero for in-band data. - * - In the case of 32 bit Shared memory address, msw field must be - * - set to zero. - * - In the case of 36 bit shared memory address, bit 31 to bit 4 of - * msw - * - * - must be set to zero. +/* + * Structure for the ASM Stream Set PP Params command. Parameter data must be + * pre-packed with the correct header for either V2 or V3 when sent in-band. + * Use q6core_pack_pp_params to pack the header and data correctly depending on + * Instance ID support. */ - u32 mem_map_handle; -/* Supported Values: Any. -* memory map handle returned by DSP through -* ASM_CMD_SHARED_MEM_MAP_REGIONS -* command. -* if mmhandle is NULL, the ParamData payloads are within the -* message payload (in-band). -* If mmhandle is non-NULL, the ParamData payloads begin at the -* address specified in the address msw and lsw (out-of-band). -*/ +struct asm_stream_cmd_set_pp_params { + /* APR Header */ + struct apr_hdr apr_hdr; - u32 data_payload_size; -/* Size in bytes of the variable payload accompanying the -message, or in shared memory. This field is used for parsing the -parameter payload. */ + /* The memory mapping header to be used when sending out of band */ + struct mem_mapping_hdr mem_hdr; -} __packed; - - -struct asm_stream_param_data_v2 { - u32 module_id; - /* Unique module ID. */ - - u32 param_id; - /* Unique parameter ID. */ - - u16 param_size; -/* Data size of the param_id/module_id combination. This is - * a multiple of 4 bytes. - */ - - u16 reserved; -/* Reserved for future enhancements. This field must be set to - * zero. - */ + /* The total size of the payload, including the parameter header */ + u32 payload_size; + /* The parameter data to be filled when sent inband. Parameter data + * must be pre-packed with parameter header and then copied here. Use + * q6core_pack_pp_params to pack the header and param data correctly. + */ + u32 param_data[0]; } __packed; #define ASM_STREAM_CMD_GET_PP_PARAMS_V2 0x00010DA2 +#define ASM_STREAM_CMD_GET_PP_PARAMS_V3 0x0001320E struct asm_stream_cmd_get_pp_params_v2 { u32 data_payload_addr_lsw; @@ -6715,6 +6711,7 @@ struct asm_aac_dual_mono_mapping_param { } __packed; #define ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 0x00010DA4 +#define ASM_STREAM_CMDRSP_GET_PP_PARAMS_V3 0x0001320F struct asm_stream_cmdrsp_get_pp_params_v2 { u32 status; @@ -7490,12 +7487,6 @@ struct admx_mic_gain { /*< Clients must set this field to zero. */ } __packed; -struct adm_set_mic_gain_params { - struct adm_cmd_set_pp_params_v5 params; - struct adm_param_data_v5 data; - struct admx_mic_gain mic_gain_data; -} __packed; - /* end_addtogroup audio_pp_param_ids */ /* @ingroup audio_pp_module_ids @@ -7851,56 +7842,23 @@ struct adm_qensemble_param_set_new_angle { #define ADM_CMD_GET_PP_TOPO_MODULE_LIST 0x00010349 #define ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST 0x00010350 +#define ADM_CMD_GET_PP_TOPO_MODULE_LIST_V2 0x00010360 +#define ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST_V2 0x00010361 #define AUDPROC_PARAM_ID_ENABLE 0x00010904 - /* - * Payload of the ADM_CMD_GET_PP_TOPO_MODULE_LIST command. - */ -struct adm_cmd_get_pp_topo_module_list_t { - struct apr_hdr hdr; - /* Lower 32 bits of the 64-bit parameter data payload address. */ - uint32_t data_payload_addr_lsw; - /* - * Upper 32 bits of the 64-bit parameter data payload address. - * - * - * The size of the shared memory, if specified, must be large enough to - * contain the entire parameter data payload, including the module ID, - * parameter ID, parameter size, and parameter values. - */ - uint32_t data_payload_addr_msw; - /* - * Unique identifier for an address. - * - * This memory map handle is returned by the aDSP through the - * #ADM_CMD_SHARED_MEM_MAP_REGIONS command. - * - * @values - * - Non-NULL -- On acknowledgment, the parameter data payloads begin at - * the address specified (out-of-band) - * - NULL -- The acknowledgment's payload contains the parameter data - * (in-band) @tablebulletend - */ - uint32_t mem_map_handle; +/* + * Payload of the ADM_CMD_GET_PP_TOPO_MODULE_LIST command. + */ +struct adm_cmd_get_pp_topo_module_list { + struct apr_hdr apr_hdr; + + /* The memory mapping header to be used when requesting out of band */ + struct mem_mapping_hdr mem_hdr; + /* * Maximum data size of the list of modules. This * field is a multiple of 4 bytes. */ - uint16_t param_max_size; - /* This field must be set to zero. */ - uint16_t reserved; -} __packed; - -/* - * Payload of the ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST message, which returns - * module ids in response to an ADM_CMD_GET_PP_TOPO_MODULE_LIST command. - * Immediately following this structure is the acknowledgement <b>module id - * data variable payload</b> containing the pre/postprocessing module id - * values. For an in-band scenario, the variable payload depends on the size - * of the parameter. - */ -struct adm_cmd_rsp_get_pp_topo_module_list_t { - /* Status message (error code). */ - uint32_t status; + uint32_t param_max_size; } __packed; struct audproc_topology_module_id_info_t { @@ -7993,9 +7951,6 @@ struct audproc_topology_module_id_info_t { struct asm_volume_ctrl_master_gain { - struct apr_hdr hdr; - struct asm_stream_cmd_set_pp_params_v2 param; - struct asm_stream_param_data_v2 data; uint16_t master_gain; /*< Linear gain in Q13 format. */ @@ -8006,10 +7961,6 @@ struct asm_volume_ctrl_master_gain { struct asm_volume_ctrl_lr_chan_gain { - struct apr_hdr hdr; - struct asm_stream_cmd_set_pp_params_v2 param; - struct asm_stream_param_data_v2 data; - uint16_t l_chan_gain; /*< Linear gain in Q13 format for the left channel. */ @@ -8021,6 +7972,7 @@ struct audproc_chmixer_param_coeff { uint32_t index; uint16_t num_output_channels; uint16_t num_input_channels; + uint32_t payload[0]; } __packed; @@ -8049,6 +8001,7 @@ struct audproc_volume_ctrl_channel_type_gain_pair { /* Payload of the AUDPROC_PARAM_ID_MULTICHANNEL_MUTE parameters used by * the Volume Control module. */ +#define ASM_MAX_CHANNELS 8 struct audproc_volume_ctrl_multichannel_gain { uint32_t num_channels; /* Number of channels for which mute configuration is provided. Any @@ -8056,7 +8009,8 @@ struct audproc_volume_ctrl_multichannel_gain { * provided are set to unmute. */ - struct audproc_volume_ctrl_channel_type_gain_pair *gain_data; + struct audproc_volume_ctrl_channel_type_gain_pair + gain_data[ASM_MAX_CHANNELS]; /* Array of channel type/mute setting pairs. */ } __packed; @@ -8070,9 +8024,6 @@ struct audproc_volume_ctrl_multichannel_gain { struct asm_volume_ctrl_mute_config { - struct apr_hdr hdr; - struct asm_stream_cmd_set_pp_params_v2 param; - struct asm_stream_param_data_v2 data; uint32_t mute_flag; /*< Specifies whether mute is disabled (0) or enabled (nonzero).*/ @@ -8100,9 +8051,6 @@ struct asm_volume_ctrl_mute_config { * parameters used by the Volume Control module. */ struct asm_soft_step_volume_params { - struct apr_hdr hdr; - struct asm_stream_cmd_set_pp_params_v2 param; - struct asm_stream_param_data_v2 data; uint32_t period; /*< Period in milliseconds. * Supported values: 0 to 15000 @@ -8132,9 +8080,6 @@ struct asm_soft_step_volume_params { struct asm_soft_pause_params { - struct apr_hdr hdr; - struct asm_stream_cmd_set_pp_params_v2 param; - struct asm_stream_param_data_v2 data; uint32_t enable_flag; /*< Specifies whether soft pause is disabled (0) or enabled * (nonzero). @@ -8224,10 +8169,7 @@ struct asm_volume_ctrl_channeltype_gain_pair { struct asm_volume_ctrl_multichannel_gain { - struct apr_hdr hdr; - struct asm_stream_cmd_set_pp_params_v2 param; - struct asm_stream_param_data_v2 data; - uint32_t num_channels; + uint32_t num_channels; /* * Number of channels for which gain values are provided. Any * channels present in the data for which gain is not provided are @@ -8252,9 +8194,6 @@ struct asm_volume_ctrl_multichannel_gain { struct asm_volume_ctrl_channelype_mute_pair { - struct apr_hdr hdr; - struct asm_stream_cmd_set_pp_params_v2 param; - struct asm_stream_param_data_v2 data; uint8_t channelype; /*< Channel type for which the mute setting is to be applied. * Supported values: @@ -8303,9 +8242,6 @@ struct asm_volume_ctrl_channelype_mute_pair { struct asm_volume_ctrl_multichannel_mute { - struct apr_hdr hdr; - struct asm_stream_cmd_set_pp_params_v2 param; - struct asm_stream_param_data_v2 data; uint32_t num_channels; /*< Number of channels for which mute configuration is * provided. Any channels present in the data for which mute @@ -8750,9 +8686,6 @@ struct asm_eq_per_band_params { } __packed; struct asm_eq_params { - struct apr_hdr hdr; - struct asm_stream_cmd_set_pp_params_v2 param; - struct asm_stream_param_data_v2 data; uint32_t enable_flag; /*< Specifies whether the equalizer module is disabled (0) or enabled * (nonzero). @@ -8791,6 +8724,9 @@ struct asm_eq_params { #define VSS_ICOMMON_CMD_SET_PARAM_V2 0x0001133D #define VSS_ICOMMON_CMD_GET_PARAM_V2 0x0001133E #define VSS_ICOMMON_RSP_GET_PARAM 0x00011008 +#define VSS_ICOMMON_CMD_SET_PARAM_V3 0x00013245 +#define VSS_ICOMMON_CMD_GET_PARAM_V3 0x00013246 +#define VSS_ICOMMON_RSP_GET_PARAM_V3 0x00013247 /** ID of the Bass Boost module. This module supports the following parameter IDs: @@ -9174,15 +9110,13 @@ struct afe_sp_th_vi_ftm_params { } __packed; struct afe_sp_th_vi_get_param { - struct apr_hdr hdr; - struct afe_port_cmd_get_param_v2 get_param; - struct afe_port_param_data_v2 pdata; + struct param_hdr_v3 pdata; struct afe_sp_th_vi_ftm_params param; } __packed; struct afe_sp_th_vi_get_param_resp { uint32_t status; - struct afe_port_param_data_v2 pdata; + struct param_hdr_v3 pdata; struct afe_sp_th_vi_ftm_params param; } __packed; @@ -9248,15 +9182,13 @@ struct afe_sp_ex_vi_ftm_params { } __packed; struct afe_sp_ex_vi_get_param { - struct apr_hdr hdr; - struct afe_port_cmd_get_param_v2 get_param; - struct afe_port_param_data_v2 pdata; + struct param_hdr_v3 pdata; struct afe_sp_ex_vi_ftm_params param; } __packed; struct afe_sp_ex_vi_get_param_resp { uint32_t status; - struct afe_port_param_data_v2 pdata; + struct param_hdr_v3 pdata; struct afe_sp_ex_vi_ftm_params param; } __packed; @@ -9271,23 +9203,16 @@ union afe_spkr_prot_config { struct afe_sp_ex_vi_ftm_cfg ex_vi_ftm_cfg; } __packed; -struct afe_spkr_prot_config_command { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; - union afe_spkr_prot_config prot_config; -} __packed; - struct afe_spkr_prot_get_vi_calib { struct apr_hdr hdr; - struct afe_port_cmd_get_param_v2 get_param; - struct afe_port_param_data_v2 pdata; + struct mem_mapping_hdr mem_hdr; + struct param_hdr_v3 pdata; struct asm_calib_res_cfg res_cfg; } __packed; struct afe_spkr_prot_calib_get_resp { uint32_t status; - struct afe_port_param_data_v2 pdata; + struct param_hdr_v3 pdata; struct asm_calib_res_cfg res_cfg; } __packed; @@ -9415,16 +9340,6 @@ struct srs_trumedia_params { #define ASM_STREAM_POSTPROC_TOPO_ID_DTS_HPX 0x00010DED #define ASM_STREAM_POSTPROC_TOPO_ID_HPX_PLUS 0x10015000 #define ASM_STREAM_POSTPROC_TOPO_ID_HPX_MASTER 0x10015001 -struct asm_dts_eagle_param { - struct apr_hdr hdr; - struct asm_stream_cmd_set_pp_params_v2 param; - struct asm_stream_param_data_v2 data; -} __packed; - -struct asm_dts_eagle_param_get { - struct apr_hdr hdr; - struct asm_stream_cmd_get_pp_params_v2 param; -} __packed; /* Opcode to set BT address and license for aptx decoder */ #define APTX_DECODER_BT_ADDRESS 0x00013201 @@ -9532,6 +9447,7 @@ struct avcs_fwk_ver_info { #define LSM_SESSION_CMD_CLOSE_TX (0x00012A88) #define LSM_SESSION_CMD_SET_PARAMS (0x00012A83) #define LSM_SESSION_CMD_SET_PARAMS_V2 (0x00012A8F) +#define LSM_SESSION_CMD_SET_PARAMS_V3 (0x00012A92) #define LSM_SESSION_CMD_REGISTER_SOUND_MODEL (0x00012A84) #define LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL (0x00012A85) #define LSM_SESSION_CMD_START (0x00012A86) @@ -9578,6 +9494,7 @@ struct avcs_fwk_ver_info { /* Commands/Params to pass the codec/slimbus data to DSP */ #define AFE_SVC_CMD_SET_PARAM (0x000100f3) +#define AFE_SVC_CMD_SET_PARAM_V2 (0x000100fc) #define AFE_MODULE_CDC_DEV_CFG (0x00010234) #define AFE_PARAM_ID_CDC_SLIMBUS_SLAVE_CFG (0x00010235) #define AFE_PARAM_ID_CDC_REG_CFG (0x00010236) @@ -9962,13 +9879,6 @@ struct afe_clk_cfg { #define AFE_MODULE_CLOCK_SET 0x0001028F #define AFE_PARAM_ID_CLOCK_SET 0x00010290 -struct afe_lpass_clk_config_command { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; - struct afe_clk_cfg clk_cfg; -} __packed; - enum afe_lpass_digital_clk_src { Q6AFE_LPASS_DIGITAL_ROOT_INVALID, Q6AFE_LPASS_DIGITAL_ROOT_PRI_MI2S_OSR, @@ -10004,14 +9914,6 @@ struct afe_digital_clk_cfg { u16 reserved; } __packed; - -struct afe_lpass_digital_clk_config_command { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; - struct afe_digital_clk_cfg clk_cfg; -} __packed; - /* * Opcode for AFE to start DTMF. */ @@ -10120,107 +10022,43 @@ struct afe_param_cdc_reg_cfg_data { struct afe_param_cdc_reg_cfg *reg_data; } __packed; -struct afe_svc_cmd_set_param { - uint32_t payload_size; - uint32_t payload_address_lsw; - uint32_t payload_address_msw; - uint32_t mem_map_handle; -} __packed; - -struct afe_svc_param_data { - uint32_t module_id; - uint32_t param_id; - uint16_t param_size; - uint16_t reserved; -} __packed; +struct afe_svc_cmd_set_param_v1 { + /* APR Header */ + struct apr_hdr apr_hdr; -struct afe_param_hw_mad_ctrl { - uint32_t minor_version; - uint16_t mad_type; - uint16_t mad_enable; -} __packed; - -struct afe_cmd_hw_mad_ctrl { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; - struct afe_param_hw_mad_ctrl payload; -} __packed; - -struct afe_cmd_hw_mad_slimbus_slave_port_cfg { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; - struct afe_param_slimbus_slave_port_cfg sb_port_cfg; -} __packed; - -struct afe_cmd_sw_mad_enable { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; -} __packed; - -struct afe_param_cdc_reg_cfg_payload { - struct afe_svc_param_data common; - struct afe_param_cdc_reg_cfg reg_cfg; -} __packed; + /* The total size of the payload, including param_hdr_v3 */ + uint32_t payload_size; -struct afe_lpass_clk_config_command_v2 { - struct apr_hdr hdr; - struct afe_svc_cmd_set_param param; - struct afe_svc_param_data pdata; - struct afe_clk_set clk_cfg; -} __packed; + /* The memory mapping header to be used when sending outband */ + struct mem_mapping_hdr mem_hdr; -/* - * reg_data's size can be up to AFE_MAX_CDC_REGISTERS_TO_CONFIG - */ -struct afe_svc_cmd_cdc_reg_cfg { - struct apr_hdr hdr; - struct afe_svc_cmd_set_param param; - struct afe_param_cdc_reg_cfg_payload reg_data[0]; + /* The parameter data to be filled when sent inband */ + u32 param_data[0]; } __packed; -struct afe_svc_cmd_init_cdc_reg_cfg { - struct apr_hdr hdr; - struct afe_svc_cmd_set_param param; - struct afe_port_param_data_v2 init; -} __packed; +struct afe_svc_cmd_set_param_v2 { + /* APR Header */ + struct apr_hdr apr_hdr; -struct afe_svc_cmd_sb_slave_cfg { - struct apr_hdr hdr; - struct afe_svc_cmd_set_param param; - struct afe_port_param_data_v2 pdata; - struct afe_param_cdc_slimbus_slave_cfg sb_slave_cfg; -} __packed; + /* The memory mapping header to be used when sending outband */ + struct mem_mapping_hdr mem_hdr; -struct afe_svc_cmd_cdc_reg_page_cfg { - struct apr_hdr hdr; - struct afe_svc_cmd_set_param param; - struct afe_port_param_data_v2 pdata; - struct afe_param_cdc_reg_page_cfg cdc_reg_page_cfg; -} __packed; + /* The total size of the payload, including param_hdr_v3 */ + u32 payload_size; -struct afe_svc_cmd_cdc_aanc_version { - struct apr_hdr hdr; - struct afe_svc_cmd_set_param param; - struct afe_port_param_data_v2 pdata; - struct afe_param_id_cdc_aanc_version version; + /* The parameter data to be filled when sent inband */ + u32 param_data[0]; } __packed; -struct afe_port_cmd_set_aanc_param { - struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; - struct afe_port_param_data_v2 pdata; - union { - struct afe_param_aanc_port_cfg aanc_port_cfg; - struct afe_mod_enable_param mod_enable; - } __packed data; +struct afe_param_hw_mad_ctrl { + uint32_t minor_version; + uint16_t mad_type; + uint16_t mad_enable; } __packed; struct afe_port_cmd_set_aanc_acdb_table { struct apr_hdr hdr; - struct afe_port_cmd_set_param_v2 param; + struct mem_mapping_hdr mem_hdr; } __packed; /* Dolby DAP topology */ @@ -10243,13 +10081,6 @@ struct afe_port_cmd_set_aanc_acdb_table { #define Q14_GAIN_ZERO_POINT_FIVE 0x2000 #define Q14_GAIN_UNITY 0x4000 -struct afe_svc_cmd_set_clip_bank_selection { - struct apr_hdr hdr; - struct afe_svc_cmd_set_param param; - struct afe_port_param_data_v2 pdata; - struct afe_param_id_clip_bank_sel bank_sel; -} __packed; - /* Ultrasound supported formats */ #define US_POINT_EPOS_FORMAT_V2 0x0001272D #define US_RAW_FORMAT_V2 0x0001272C @@ -10463,13 +10294,6 @@ union afe_port_group_config { struct afe_param_id_group_device_tdm_cfg tdm_cfg; } __packed; -struct afe_port_group_create { - struct apr_hdr hdr; - struct afe_svc_cmd_set_param param; - struct afe_port_param_data_v2 pdata; - union afe_port_group_config data; -} __packed; - /* ID of the parameter used by #AFE_MODULE_AUDIO_DEV_INTERFACE to specify * the timing statistics of the corresponding device interface. * Client can periodically query for the device time statistics to help adjust @@ -10559,16 +10383,9 @@ struct afe_param_id_dev_timing_stats { u32 ref_timer_abs_ts_msw; } __packed; -struct afe_av_dev_drift_get_param { - struct apr_hdr hdr; - struct afe_port_cmd_get_param_v2 get_param; - struct afe_port_param_data_v2 pdata; - struct afe_param_id_dev_timing_stats timing_stats; -} __packed; - struct afe_av_dev_drift_get_param_resp { uint32_t status; - struct afe_port_param_data_v2 pdata; + struct param_hdr_v3 pdata; struct afe_param_id_dev_timing_stats timing_stats; } __packed; @@ -10780,7 +10597,7 @@ union asm_session_mtmx_strtr_param_config { struct asm_mtmx_strtr_params { struct apr_hdr hdr; struct asm_session_cmd_set_mtmx_strstr_params_v2 param; - struct asm_stream_param_data_v2 data; + struct param_hdr_v1 data; union asm_session_mtmx_strtr_param_config config; } __packed; @@ -10890,7 +10707,7 @@ struct asm_mtmx_strtr_get_params { struct asm_mtmx_strtr_get_params_cmdrsp { uint32_t err_code; - struct asm_stream_param_data_v2 param_info; + struct param_hdr_v1 param_info; union asm_session_mtmx_strtr_data_type param_data; } __packed; @@ -10910,18 +10727,14 @@ enum { #define AUDPROC_PARAM_ID_COMPRESSED_MUTE 0x00010771 struct adm_set_compressed_device_mute { - struct adm_cmd_set_pp_params_v5 command; - struct adm_param_data_v5 params; - u32 mute_on; + u32 mute_on; } __packed; #define AUDPROC_MODULE_ID_COMPRESSED_LATENCY 0x0001076E #define AUDPROC_PARAM_ID_COMPRESSED_LATENCY 0x0001076F struct adm_set_compressed_device_latency { - struct adm_cmd_set_pp_params_v5 command; - struct adm_param_data_v5 params; - u32 latency; + u32 latency; } __packed; #define VOICEPROC_MODULE_ID_GENERIC_TX 0x00010EF6 @@ -10951,12 +10764,6 @@ struct adm_param_fluence_soundfocus_t { uint16_t reserved; } __packed; -struct adm_set_fluence_soundfocus_param { - struct adm_cmd_set_pp_params_v5 params; - struct adm_param_data_v5 data; - struct adm_param_fluence_soundfocus_t soundfocus_data; -} __packed; - struct adm_param_fluence_sourcetracking_t { uint8_t vad[MAX_SECTORS]; uint16_t doa_speech; @@ -10986,10 +10793,4 @@ struct admx_sec_primary_mic_ch { uint16_t reserved1; } __packed; - -struct adm_set_sec_primary_ch_params { - struct adm_cmd_set_pp_params_v5 params; - struct adm_param_data_v5 data; - struct admx_sec_primary_mic_ch sec_primary_mic_ch_data; -} __packed; #endif /*_APR_AUDIO_V2_H_ */ diff --git a/include/sound/q6adm-v2.h b/include/sound/q6adm-v2.h index 65c42ee18914..84087de3d4d8 100644 --- a/include/sound/q6adm-v2.h +++ b/include/sound/q6adm-v2.h @@ -25,6 +25,8 @@ #define MAX_MODULES_IN_TOPO 16 #define ADM_GET_TOPO_MODULE_LIST_LENGTH\ ((MAX_MODULES_IN_TOPO + 1) * sizeof(uint32_t)) +#define ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH \ + ((MAX_MODULES_IN_TOPO + 1) * 2 * sizeof(uint32_t)) #define AUD_PROC_BLOCK_SIZE 4096 #define AUD_VOL_BLOCK_SIZE 4096 #define AUDIO_RX_CALIBRATION_SIZE (AUD_PROC_BLOCK_SIZE + \ @@ -101,12 +103,24 @@ void adm_copp_mfc_cfg(int port_id, int copp_idx, int dst_sample_rate); int adm_get_params(int port_id, int copp_idx, uint32_t module_id, uint32_t param_id, uint32_t params_length, char *params); +int adm_get_pp_params(int port_id, int copp_idx, uint32_t client_id, + struct mem_mapping_hdr *mem_hdr, + struct param_hdr_v3 *param_hdr, u8 *returned_param_data); + int adm_send_params_v5(int port_id, int copp_idx, char *params, uint32_t params_length); int adm_dolby_dap_send_params(int port_id, int copp_idx, char *params, uint32_t params_length); +int adm_set_pp_params(int port_id, int copp_idx, + struct mem_mapping_hdr *mem_hdr, u8 *param_data, + u32 params_size); + +int adm_pack_and_set_one_pp_param(int port_id, int copp_idx, + struct param_hdr_v3 param_hdr, + u8 *param_data); + int adm_open(int port, int path, int rate, int mode, int topology, int perf_mode, uint16_t bits_per_sample, int app_type, int acdbdev_id); @@ -157,6 +171,10 @@ int adm_set_downmix_params(int port_id, int copp_idx, int adm_get_pp_topo_module_list(int port_id, int copp_idx, int32_t param_length, char *params); +int adm_get_pp_topo_module_list_v2(int port_id, int copp_idx, + int32_t param_length, + int32_t *returned_params); + int adm_set_volume(int port_id, int copp_idx, int volume); int adm_set_softvolume(int port_id, int copp_idx, @@ -169,6 +187,9 @@ int adm_send_set_multichannel_ec_primary_mic_ch(int port_id, int copp_idx, int adm_param_enable(int port_id, int copp_idx, int module_id, int enable); +int adm_param_enable_v2(int port_id, int copp_idx, + struct module_instance_info mod_inst_info, int enable); + int adm_send_calibration(int port_id, int copp_idx, int path, int perf_mode, int cal_type, char *params, int size); diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h index 9ddd02cac9ac..285d32e249b8 100644 --- a/include/sound/q6asm-v2.h +++ b/include/sound/q6asm-v2.h @@ -265,6 +265,17 @@ int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir int q6asm_audio_client_buf_free_contiguous(unsigned int dir, struct audio_client *ac); +int q6asm_set_pp_params(struct audio_client *ac, + struct mem_mapping_hdr *mem_hdr, u8 *param_data, + u32 param_size); + +int q6asm_pack_and_set_pp_param_in_band(struct audio_client *ac, + struct param_hdr_v3 param_hdr, + u8 *param_data); + +int q6asm_set_soft_volume_module_instance_ids(int instance, + struct param_hdr_v3 *param_hdr); + int q6asm_open_read(struct audio_client *ac, uint32_t format /*, uint16_t bits_per_sample*/); diff --git a/include/sound/q6lsm.h b/include/sound/q6lsm.h index 4805246766d6..c046cd468b49 100644 --- a/include/sound/q6lsm.h +++ b/include/sound/q6lsm.h @@ -112,31 +112,27 @@ struct lsm_custom_topologies { uint32_t buffer_size; } __packed; -struct lsm_param_size_reserved { - uint16_t param_size; - uint16_t reserved; -} __packed; - -union lsm_param_size { - uint32_t param_size; - struct lsm_param_size_reserved sr; +struct lsm_session_cmd_set_params_v2 { + struct apr_hdr apr_hdr; + uint32_t payload_size; + struct mem_mapping_hdr mem_hdr; + u32 param_data[0]; } __packed; -struct lsm_param_payload_common { - uint32_t module_id; - uint32_t param_id; - union lsm_param_size p_size; +struct lsm_session_cmd_set_params_v3 { + struct apr_hdr apr_hdr; + struct mem_mapping_hdr mem_hdr; + uint32_t payload_size; + u32 param_data[0]; } __packed; struct lsm_param_op_mode { - struct lsm_param_payload_common common; uint32_t minor_version; uint16_t mode; uint16_t reserved; } __packed; struct lsm_param_connect_to_port { - struct lsm_param_payload_common common; uint32_t minor_version; /* AFE port id that receives voice wake up data */ uint16_t port_id; @@ -144,20 +140,17 @@ struct lsm_param_connect_to_port { } __packed; struct lsm_param_poll_enable { - struct lsm_param_payload_common common; uint32_t minor_version; /* indicates to voice wakeup that HW MAD/SW polling is enabled or not */ uint32_t polling_enable; } __packed; struct lsm_param_fwk_mode_cfg { - struct lsm_param_payload_common common; uint32_t minor_version; uint32_t mode; } __packed; struct lsm_param_media_fmt { - struct lsm_param_payload_common common; uint32_t minor_version; uint32_t sample_rate; uint16_t num_channels; @@ -165,78 +158,23 @@ struct lsm_param_media_fmt { uint8_t channel_mapping[LSM_MAX_NUM_CHANNELS]; } __packed; -/* - * This param cannot be sent in this format. - * The actual number of confidence level values - * need to appended to this param payload. - */ -struct lsm_param_min_confidence_levels { - struct lsm_param_payload_common common; - uint8_t num_confidence_levels; -} __packed; - -struct lsm_set_params_hdr { - uint32_t data_payload_size; - uint32_t data_payload_addr_lsw; - uint32_t data_payload_addr_msw; - uint32_t mem_map_handle; -} __packed; - -struct lsm_cmd_set_params { - struct apr_hdr msg_hdr; - struct lsm_set_params_hdr param_hdr; -} __packed; - -struct lsm_cmd_set_params_conf { - struct apr_hdr msg_hdr; - struct lsm_set_params_hdr params_hdr; - struct lsm_param_min_confidence_levels conf_payload; -} __packed; - -struct lsm_cmd_set_params_opmode { - struct apr_hdr msg_hdr; - struct lsm_set_params_hdr params_hdr; - struct lsm_param_op_mode op_mode; -} __packed; - -struct lsm_cmd_set_connectport { - struct apr_hdr msg_hdr; - struct lsm_set_params_hdr params_hdr; - struct lsm_param_connect_to_port connect_to_port; -} __packed; - -struct lsm_cmd_poll_enable { - struct apr_hdr msg_hdr; - struct lsm_set_params_hdr params_hdr; - struct lsm_param_poll_enable poll_enable; +struct lsm_param_confidence_levels { + uint8_t num_confidence_levels; + uint8_t confidence_levels[0]; } __packed; struct lsm_param_epd_thres { - struct lsm_param_payload_common common; uint32_t minor_version; uint32_t epd_begin; uint32_t epd_end; } __packed; -struct lsm_cmd_set_epd_threshold { - struct apr_hdr msg_hdr; - struct lsm_set_params_hdr param_hdr; - struct lsm_param_epd_thres epd_thres; -} __packed; - struct lsm_param_gain { - struct lsm_param_payload_common common; uint32_t minor_version; uint16_t gain; uint16_t reserved; } __packed; -struct lsm_cmd_set_gain { - struct apr_hdr msg_hdr; - struct lsm_set_params_hdr param_hdr; - struct lsm_param_gain lsm_gain; -} __packed; - struct lsm_cmd_reg_snd_model { struct apr_hdr hdr; uint32_t model_size; @@ -245,31 +183,16 @@ struct lsm_cmd_reg_snd_model { uint32_t mem_map_handle; } __packed; -struct lsm_lab_enable { - struct lsm_param_payload_common common; +struct lsm_param_lab_enable { uint16_t enable; uint16_t reserved; } __packed; -struct lsm_params_lab_enable { - struct apr_hdr msg_hdr; - struct lsm_set_params_hdr params_hdr; - struct lsm_lab_enable lab_enable; -} __packed; - -struct lsm_lab_config { - struct lsm_param_payload_common common; +struct lsm_param_lab_config { uint32_t minor_version; uint32_t wake_up_latency_ms; } __packed; - -struct lsm_params_lab_config { - struct apr_hdr msg_hdr; - struct lsm_set_params_hdr params_hdr; - struct lsm_lab_config lab_config; -} __packed; - struct lsm_cmd_read { struct apr_hdr hdr; uint32_t buf_addr_lsw; @@ -291,19 +214,6 @@ struct lsm_cmd_read_done { uint32_t flags; } __packed; -struct lsm_cmd_set_fwk_mode_cfg { - struct apr_hdr msg_hdr; - struct lsm_set_params_hdr params_hdr; - struct lsm_param_fwk_mode_cfg fwk_mode_cfg; -} __packed; - -struct lsm_cmd_set_media_fmt { - struct apr_hdr msg_hdr; - struct lsm_set_params_hdr params_hdr; - struct lsm_param_media_fmt media_fmt; -} __packed; - - struct lsm_client *q6lsm_client_alloc(lsm_app_cb cb, void *priv); void q6lsm_client_free(struct lsm_client *client); int q6lsm_open(struct lsm_client *client, uint16_t app_id); diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h index feb58d455560..4b9ee3009aa0 100644 --- a/include/sound/seq_kernel.h +++ b/include/sound/seq_kernel.h @@ -49,7 +49,8 @@ typedef union snd_seq_timestamp snd_seq_timestamp_t; #define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS 200 /* max delivery path length */ -#define SNDRV_SEQ_MAX_HOPS 10 +/* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */ +#define SNDRV_SEQ_MAX_HOPS 8 /* max size of event size */ #define SNDRV_SEQ_MAX_EVENT_LEN 0x3fffffff diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 1adf8739980c..8555321306fb 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -199,6 +199,7 @@ enum tcm_tmreq_table { TMR_LUN_RESET = 5, TMR_TARGET_WARM_RESET = 6, TMR_TARGET_COLD_RESET = 7, + TMR_UNKNOWN = 0xff, }; /* fabric independent task management response values */ diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 7063bbcca03b..589df6f73789 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -128,6 +128,18 @@ TRACE_DEFINE_ENUM(CP_TRIMMED); { CP_DISCARD, "Discard" }, \ { CP_UMOUNT | CP_TRIMMED, "Umount,Trimmed" }) +#define show_fsync_cpreason(type) \ + __print_symbolic(type, \ + { CP_NO_NEEDED, "no needed" }, \ + { CP_NON_REGULAR, "non regular" }, \ + { CP_HARDLINK, "hardlink" }, \ + { CP_SB_NEED_CP, "sb needs cp" }, \ + { CP_WRONG_PINO, "wrong pino" }, \ + { CP_NO_SPC_ROLL, "no space roll forward" }, \ + { CP_NODE_NEED_CP, "node needs cp" }, \ + { CP_FASTBOOT_MODE, "fastboot mode" }, \ + { CP_SPEC_LOG_NUM, "log type is 2" }) + struct victim_sel_policy; struct f2fs_map_blocks; @@ -202,14 +214,14 @@ DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter, TRACE_EVENT(f2fs_sync_file_exit, - TP_PROTO(struct inode *inode, int need_cp, int datasync, int ret), + TP_PROTO(struct inode *inode, int cp_reason, int datasync, int ret), - TP_ARGS(inode, need_cp, datasync, ret), + TP_ARGS(inode, cp_reason, datasync, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) - __field(int, need_cp) + __field(int, cp_reason) __field(int, datasync) __field(int, ret) ), @@ -217,15 +229,15 @@ TRACE_EVENT(f2fs_sync_file_exit, TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; - __entry->need_cp = need_cp; + __entry->cp_reason = cp_reason; __entry->datasync = datasync; __entry->ret = ret; ), - TP_printk("dev = (%d,%d), ino = %lu, checkpoint is %s, " + TP_printk("dev = (%d,%d), ino = %lu, cp_reason: %s, " "datasync = %d, ret = %d", show_dev_ino(__entry), - __entry->need_cp ? "needed" : "not needed", + show_fsync_cpreason(__entry->cp_reason), __entry->datasync, __entry->ret) ); @@ -716,6 +728,91 @@ TRACE_EVENT(f2fs_get_victim, __entry->free) ); +TRACE_EVENT(f2fs_lookup_start, + + TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags), + + TP_ARGS(dir, dentry, flags), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(const char *, name) + __field(unsigned int, flags) + ), + + TP_fast_assign( + __entry->dev = dir->i_sb->s_dev; + __entry->ino = dir->i_ino; + __entry->name = dentry->d_name.name; + __entry->flags = flags; + ), + + TP_printk("dev = (%d,%d), pino = %lu, name:%s, flags:%u", + show_dev_ino(__entry), + __entry->name, + __entry->flags) +); + +TRACE_EVENT(f2fs_lookup_end, + + TP_PROTO(struct inode *dir, struct dentry *dentry, nid_t ino, + int err), + + TP_ARGS(dir, dentry, ino, err), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(const char *, name) + __field(nid_t, cino) + __field(int, err) + ), + + TP_fast_assign( + __entry->dev = dir->i_sb->s_dev; + __entry->ino = dir->i_ino; + __entry->name = dentry->d_name.name; + __entry->cino = ino; + __entry->err = err; + ), + + TP_printk("dev = (%d,%d), pino = %lu, name:%s, ino:%u, err:%d", + show_dev_ino(__entry), + __entry->name, + __entry->cino, + __entry->err) +); + +TRACE_EVENT(f2fs_readdir, + + TP_PROTO(struct inode *dir, loff_t start_pos, loff_t end_pos, int err), + + TP_ARGS(dir, start_pos, end_pos, err), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(loff_t, start) + __field(loff_t, end) + __field(int, err) + ), + + TP_fast_assign( + __entry->dev = dir->i_sb->s_dev; + __entry->ino = dir->i_ino; + __entry->start = start_pos; + __entry->end = end_pos; + __entry->err = err; + ), + + TP_printk("dev = (%d,%d), ino = %lu, start_pos:%llu, end_pos:%llu, err:%d", + show_dev_ino(__entry), + __entry->start, + __entry->end, + __entry->err) +); + TRACE_EVENT(f2fs_fallocate, TP_PROTO(struct inode *inode, int mode, @@ -1274,6 +1371,13 @@ DEFINE_EVENT(f2fs_discard, f2fs_issue_discard, TP_ARGS(dev, blkstart, blklen) ); +DEFINE_EVENT(f2fs_discard, f2fs_remove_discard, + + TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen), + + TP_ARGS(dev, blkstart, blklen) +); + TRACE_EVENT(f2fs_issue_reset_zone, TP_PROTO(struct block_device *dev, block_t blkstart), diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 739bcb89f602..cc0ebe6867a5 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -436,9 +436,9 @@ TRACE_EVENT(sched_update_task_ravg, TRACE_EVENT(sched_get_task_cpu_cycles, - TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time), + TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time, struct task_struct *p), - TP_ARGS(cpu, event, cycles, exec_time), + TP_ARGS(cpu, event, cycles, exec_time, p), TP_STRUCT__entry( __field(int, cpu ) @@ -448,6 +448,8 @@ TRACE_EVENT(sched_get_task_cpu_cycles, __field(u32, freq ) __field(u32, legacy_freq ) __field(u32, max_freq) + __field(pid_t, pid ) + __array(char, comm, TASK_COMM_LEN ) ), TP_fast_assign( @@ -458,12 +460,14 @@ TRACE_EVENT(sched_get_task_cpu_cycles, __entry->freq = cpu_cycles_to_freq(cycles, exec_time); __entry->legacy_freq = cpu_cur_freq(cpu); __entry->max_freq = cpu_max_freq(cpu); + __entry->pid = p->pid; + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); ), - TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u max_freq=%u", + TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u max_freq=%u task=%d (%s)", __entry->cpu, __entry->event, __entry->cycles, __entry->exec_time, __entry->freq, __entry->legacy_freq, - __entry->max_freq) + __entry->max_freq, __entry->pid, __entry->comm) ); TRACE_EVENT(sched_update_history, diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 5664ca07c9c7..a01a076ea060 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -455,20 +455,22 @@ TRACE_EVENT(svc_recv, TP_ARGS(rqst, status), TP_STRUCT__entry( - __field(struct sockaddr *, addr) __field(__be32, xid) __field(int, status) __field(unsigned long, flags) + __dynamic_array(unsigned char, addr, rqst->rq_addrlen) ), TP_fast_assign( - __entry->addr = (struct sockaddr *)&rqst->rq_addr; __entry->xid = status > 0 ? rqst->rq_xid : 0; __entry->status = status; __entry->flags = rqst->rq_flags; + memcpy(__get_dynamic_array(addr), + &rqst->rq_addr, rqst->rq_addrlen); ), - TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr, + TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", + (struct sockaddr *)__get_dynamic_array(addr), be32_to_cpu(__entry->xid), __entry->status, show_rqstp_flags(__entry->flags)) ); @@ -480,22 +482,23 @@ DECLARE_EVENT_CLASS(svc_rqst_status, TP_ARGS(rqst, status), TP_STRUCT__entry( - __field(struct sockaddr *, addr) __field(__be32, xid) - __field(int, dropme) __field(int, status) __field(unsigned long, flags) + __dynamic_array(unsigned char, addr, rqst->rq_addrlen) ), TP_fast_assign( - __entry->addr = (struct sockaddr *)&rqst->rq_addr; __entry->xid = rqst->rq_xid; __entry->status = status; __entry->flags = rqst->rq_flags; + memcpy(__get_dynamic_array(addr), + &rqst->rq_addr, rqst->rq_addrlen); ), TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s", - __entry->addr, be32_to_cpu(__entry->xid), + (struct sockaddr *)__get_dynamic_array(addr), + be32_to_cpu(__entry->xid), __entry->status, show_rqstp_flags(__entry->flags)) ); diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 0630e0f64b9c..f693b5b5b7ab 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -80,6 +80,20 @@ extern "C" { #define DRM_MODE_FLAG_SUPPORTS_RGB (1<<20) #define DRM_MODE_FLAG_SUPPORTS_YUV (1<<21) +/* Picture aspect ratio options */ +#define DRM_MODE_PICTURE_ASPECT_NONE 0 +#define DRM_MODE_PICTURE_ASPECT_4_3 1 +#define DRM_MODE_PICTURE_ASPECT_16_9 2 + +/* Aspect ratio flag bitmask (4 bits 27:24) */ +#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<24) +#define DRM_MODE_FLAG_PIC_AR_NONE \ + (DRM_MODE_PICTURE_ASPECT_NONE<<24) +#define DRM_MODE_FLAG_PIC_AR_4_3 \ + (DRM_MODE_PICTURE_ASPECT_4_3<<24) +#define DRM_MODE_FLAG_PIC_AR_16_9 \ + (DRM_MODE_PICTURE_ASPECT_16_9<<24) + /* DPMS flags */ /* bit compatible with the xorg definitions. */ #define DRM_MODE_DPMS_ON 0 @@ -94,11 +108,6 @@ extern "C" { #define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ #define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ -/* Picture aspect ratio options */ -#define DRM_MODE_PICTURE_ASPECT_NONE 0 -#define DRM_MODE_PICTURE_ASPECT_4_3 1 -#define DRM_MODE_PICTURE_ASPECT_16_9 2 - /* Dithering mode options */ #define DRM_MODE_DITHERING_OFF 0 #define DRM_MODE_DITHERING_ON 1 diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h index 22b6ad31c706..8562b1cb776b 100644 --- a/include/uapi/linux/bcache.h +++ b/include/uapi/linux/bcache.h @@ -90,7 +90,7 @@ PTR_FIELD(PTR_GEN, 0, 8) #define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1) -#define PTR(gen, offset, dev) \ +#define MAKE_PTR(gen, offset, dev) \ ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen) /* Bkey utility code */ diff --git a/include/uapi/linux/habmm.h b/include/uapi/linux/habmm.h index 902bd35ee474..59b603a0fcf7 100644 --- a/include/uapi/linux/habmm.h +++ b/include/uapi/linux/habmm.h @@ -73,8 +73,9 @@ struct hab_unimport { #define MM_AUD_END 105 #define MM_CAM_START 200 -#define MM_CAM 201 -#define MM_CAM_END 202 +#define MM_CAM_1 201 +#define MM_CAM_2 202 +#define MM_CAM_END 203 #define MM_DISP_START 300 #define MM_DISP_1 301 @@ -102,7 +103,13 @@ struct hab_unimport { #define MM_QCPE_VM3 703 #define MM_QCPE_VM4 704 #define MM_QCPE_END 705 -#define MM_ID_MAX 706 + +#define MM_CLK_START 800 +#define MM_CLK_VM1 801 +#define MM_CLK_VM2 802 +#define MM_CLK_END 803 + +#define MM_ID_MAX 804 #define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE 0x00000000 #define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_DOMU 0x00000001 @@ -110,6 +117,14 @@ struct hab_unimport { #define HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING 0x00000001 +/* + * Collect cross-VM stats: client provides stat-buffer large enough to allow 2 + * ets of a 2-uint64_t pair to collect seconds and nano-seconds at the + * beginning of the stat-buffer. Stats are collected when the stat-buffer leaves + * VM1, then enters VM2 + */ +#define HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT 0x00000002 + #define HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING 0x00000001 #define HABMM_EXP_MEM_TYPE_DMA 0x00000001 diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h index 08f894d2ddbd..7b5e2aac86ac 100644 --- a/include/uapi/linux/ip.h +++ b/include/uapi/linux/ip.h @@ -165,6 +165,7 @@ enum IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL, IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL, IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN, + IPV4_DEVCONF_NF_IPV4_DEFRAG_SKIP, __IPV4_DEVCONF_MAX }; diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index 7af20a136429..804c9b2bfce3 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -104,8 +104,8 @@ #define RDS_INFO_LAST 10010 struct rds_info_counter { - uint8_t name[32]; - uint64_t value; + __u8 name[32]; + __u64 value; } __attribute__((packed)); #define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 @@ -115,35 +115,35 @@ struct rds_info_counter { #define TRANSNAMSIZ 16 struct rds_info_connection { - uint64_t next_tx_seq; - uint64_t next_rx_seq; + __u64 next_tx_seq; + __u64 next_rx_seq; __be32 laddr; __be32 faddr; - uint8_t transport[TRANSNAMSIZ]; /* null term ascii */ - uint8_t flags; + __u8 transport[TRANSNAMSIZ]; /* null term ascii */ + __u8 flags; } __attribute__((packed)); #define RDS_INFO_MESSAGE_FLAG_ACK 0x01 #define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 struct rds_info_message { - uint64_t seq; - uint32_t len; + __u64 seq; + __u32 len; __be32 laddr; __be32 faddr; __be16 lport; __be16 fport; - uint8_t flags; + __u8 flags; } __attribute__((packed)); struct rds_info_socket { - uint32_t sndbuf; + __u32 sndbuf; __be32 bound_addr; __be32 connected_addr; __be16 bound_port; __be16 connected_port; - uint32_t rcvbuf; - uint64_t inum; + __u32 rcvbuf; + __u64 inum; } __attribute__((packed)); struct rds_info_tcp_socket { @@ -151,25 +151,25 @@ struct rds_info_tcp_socket { __be16 local_port; __be32 peer_addr; __be16 peer_port; - uint64_t hdr_rem; - uint64_t data_rem; - uint32_t last_sent_nxt; - uint32_t last_expected_una; - uint32_t last_seen_una; + __u64 hdr_rem; + __u64 data_rem; + __u32 last_sent_nxt; + __u32 last_expected_una; + __u32 last_seen_una; } __attribute__((packed)); #define RDS_IB_GID_LEN 16 struct rds_info_rdma_connection { __be32 src_addr; __be32 dst_addr; - uint8_t src_gid[RDS_IB_GID_LEN]; - uint8_t dst_gid[RDS_IB_GID_LEN]; + __u8 src_gid[RDS_IB_GID_LEN]; + __u8 dst_gid[RDS_IB_GID_LEN]; - uint32_t max_send_wr; - uint32_t max_recv_wr; - uint32_t max_send_sge; - uint32_t rdma_mr_max; - uint32_t rdma_mr_size; + __u32 max_send_wr; + __u32 max_recv_wr; + __u32 max_send_sge; + __u32 rdma_mr_max; + __u32 rdma_mr_size; }; /* @@ -210,70 +210,70 @@ struct rds_info_rdma_connection { * (so that the application does not have to worry about * alignment). */ -typedef uint64_t rds_rdma_cookie_t; +typedef __u64 rds_rdma_cookie_t; struct rds_iovec { - uint64_t addr; - uint64_t bytes; + __u64 addr; + __u64 bytes; }; struct rds_get_mr_args { struct rds_iovec vec; - uint64_t cookie_addr; - uint64_t flags; + __u64 cookie_addr; + __u64 flags; }; struct rds_get_mr_for_dest_args { struct __kernel_sockaddr_storage dest_addr; struct rds_iovec vec; - uint64_t cookie_addr; - uint64_t flags; + __u64 cookie_addr; + __u64 flags; }; struct rds_free_mr_args { rds_rdma_cookie_t cookie; - uint64_t flags; + __u64 flags; }; struct rds_rdma_args { rds_rdma_cookie_t cookie; struct rds_iovec remote_vec; - uint64_t local_vec_addr; - uint64_t nr_local; - uint64_t flags; - uint64_t user_token; + __u64 local_vec_addr; + __u64 nr_local; + __u64 flags; + __u64 user_token; }; struct rds_atomic_args { rds_rdma_cookie_t cookie; - uint64_t local_addr; - uint64_t remote_addr; + __u64 local_addr; + __u64 remote_addr; union { struct { - uint64_t compare; - uint64_t swap; + __u64 compare; + __u64 swap; } cswp; struct { - uint64_t add; + __u64 add; } fadd; struct { - uint64_t compare; - uint64_t swap; - uint64_t compare_mask; - uint64_t swap_mask; + __u64 compare; + __u64 swap; + __u64 compare_mask; + __u64 swap_mask; } m_cswp; struct { - uint64_t add; - uint64_t nocarry_mask; + __u64 add; + __u64 nocarry_mask; } m_fadd; }; - uint64_t flags; - uint64_t user_token; + __u64 flags; + __u64 user_token; }; struct rds_rdma_notify { - uint64_t user_token; - int32_t status; + __u64 user_token; + __s32 status; }; #define RDS_RDMA_SUCCESS 0 diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index 01eb22ca6b3d..47e0de1df362 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -483,6 +483,7 @@ enum NET_IPV4_CONF_PROMOTE_SECONDARIES=20, NET_IPV4_CONF_ARP_ACCEPT=21, NET_IPV4_CONF_ARP_NOTIFY=22, + NET_IPV4_CONF_NF_IPV4_DEFRAG_SKIP = 23, }; /* /proc/sys/net/ipv4/netfilter */ diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h new file mode 100644 index 000000000000..370d8845ab21 --- /dev/null +++ b/include/uapi/linux/tee.h @@ -0,0 +1,346 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __TEE_H +#define __TEE_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +/* + * This file describes the API provided by a TEE driver to user space. + * + * Each TEE driver defines a TEE specific protocol which is used for the + * data passed back and forth using TEE_IOC_CMD. + */ + +/* Helpers to make the ioctl defines */ +#define TEE_IOC_MAGIC 0xa4 +#define TEE_IOC_BASE 0 + +/* Flags relating to shared memory */ +#define TEE_IOCTL_SHM_MAPPED 0x1 /* memory mapped in normal world */ +#define TEE_IOCTL_SHM_DMA_BUF 0x2 /* dma-buf handle on shared memory */ + +#define TEE_MAX_ARG_SIZE 1024 + +#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */ + +/* + * TEE Implementation ID + */ +#define TEE_IMPL_ID_OPTEE 1 + +/* + * OP-TEE specific capabilities + */ +#define TEE_OPTEE_CAP_TZ (1 << 0) + +/** + * struct tee_ioctl_version_data - TEE version + * @impl_id: [out] TEE implementation id + * @impl_caps: [out] Implementation specific capabilities + * @gen_caps: [out] Generic capabilities, defined by TEE_GEN_CAPS_* above + * + * Identifies the TEE implementation, @impl_id is one of TEE_IMPL_ID_* above. + * @impl_caps is implementation specific, for example TEE_OPTEE_CAP_* + * is valid when @impl_id == TEE_IMPL_ID_OPTEE. + */ +struct tee_ioctl_version_data { + __u32 impl_id; + __u32 impl_caps; + __u32 gen_caps; +}; + +/** + * TEE_IOC_VERSION - query version of TEE + * + * Takes a tee_ioctl_version_data struct and returns with the TEE version + * data filled in. + */ +#define TEE_IOC_VERSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 0, \ + struct tee_ioctl_version_data) + +/** + * struct tee_ioctl_shm_alloc_data - Shared memory allocate argument + * @size: [in/out] Size of shared memory to allocate + * @flags: [in/out] Flags to/from allocation. + * @id: [out] Identifier of the shared memory + * + * The flags field should currently be zero as input. Updated by the call + * with actual flags as defined by TEE_IOCTL_SHM_* above. + * This structure is used as argument for TEE_IOC_SHM_ALLOC below. + */ +struct tee_ioctl_shm_alloc_data { + __u64 size; + __u32 flags; + __s32 id; +}; + +/** + * TEE_IOC_SHM_ALLOC - allocate shared memory + * + * Allocates shared memory between the user space process and secure OS. + * + * Returns a file descriptor on success or < 0 on failure + * + * The returned file descriptor is used to map the shared memory into user + * space. The shared memory is freed when the descriptor is closed and the + * memory is unmapped. + */ +#define TEE_IOC_SHM_ALLOC _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 1, \ + struct tee_ioctl_shm_alloc_data) + +/** + * struct tee_ioctl_buf_data - Variable sized buffer + * @buf_ptr: [in] A __user pointer to a buffer + * @buf_len: [in] Length of the buffer above + * + * Used as argument for TEE_IOC_OPEN_SESSION, TEE_IOC_INVOKE, + * TEE_IOC_SUPPL_RECV, and TEE_IOC_SUPPL_SEND below. + */ +struct tee_ioctl_buf_data { + __u64 buf_ptr; + __u64 buf_len; +}; + +/* + * Attributes for struct tee_ioctl_param, selects field in the union + */ +#define TEE_IOCTL_PARAM_ATTR_TYPE_NONE 0 /* parameter not used */ + +/* + * These defines value parameters (struct tee_ioctl_param_value) + */ +#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT 1 +#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT 2 +#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT 3 /* input and output */ + +/* + * These defines shared memory reference parameters (struct + * tee_ioctl_param_memref) + */ +#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT 5 +#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT 6 +#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT 7 /* input and output */ + +/* + * Mask for the type part of the attribute, leaves room for more types + */ +#define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff + +/* + * Matches TEEC_LOGIN_* in GP TEE Client API + * Are only defined for GP compliant TEEs + */ +#define TEE_IOCTL_LOGIN_PUBLIC 0 +#define TEE_IOCTL_LOGIN_USER 1 +#define TEE_IOCTL_LOGIN_GROUP 2 +#define TEE_IOCTL_LOGIN_APPLICATION 4 +#define TEE_IOCTL_LOGIN_USER_APPLICATION 5 +#define TEE_IOCTL_LOGIN_GROUP_APPLICATION 6 + +/** + * struct tee_ioctl_param - parameter + * @attr: attributes + * @a: if a memref, offset into the shared memory object, else a value parameter + * @b: if a memref, size of the buffer, else a value parameter + * @c: if a memref, shared memory identifier, else a value parameter + * + * @attr & TEE_PARAM_ATTR_TYPE_MASK indicates if memref or value is used in + * the union. TEE_PARAM_ATTR_TYPE_VALUE_* indicates value and + * TEE_PARAM_ATTR_TYPE_MEMREF_* indicates memref. TEE_PARAM_ATTR_TYPE_NONE + * indicates that none of the members are used. + * + * Shared memory is allocated with TEE_IOC_SHM_ALLOC which returns an + * identifier representing the shared memory object. A memref can reference + * a part of a shared memory by specifying an offset (@a) and size (@b) of + * the object. To supply the entire shared memory object set the offset + * (@a) to 0 and size (@b) to the previously returned size of the object. + */ +struct tee_ioctl_param { + __u64 attr; + __u64 a; + __u64 b; + __u64 c; +}; + +#define TEE_IOCTL_UUID_LEN 16 + +/** + * struct tee_ioctl_open_session_arg - Open session argument + * @uuid: [in] UUID of the Trusted Application + * @clnt_uuid: [in] UUID of client + * @clnt_login: [in] Login class of client, TEE_IOCTL_LOGIN_* above + * @cancel_id: [in] Cancellation id, a unique value to identify this request + * @session: [out] Session id + * @ret: [out] return value + * @ret_origin [out] origin of the return value + * @num_params [in] number of parameters following this struct + */ +struct tee_ioctl_open_session_arg { + __u8 uuid[TEE_IOCTL_UUID_LEN]; + __u8 clnt_uuid[TEE_IOCTL_UUID_LEN]; + __u32 clnt_login; + __u32 cancel_id; + __u32 session; + __u32 ret; + __u32 ret_origin; + __u32 num_params; + /* num_params tells the actual number of element in params */ + struct tee_ioctl_param params[]; +}; + +/** + * TEE_IOC_OPEN_SESSION - opens a session to a Trusted Application + * + * Takes a struct tee_ioctl_buf_data which contains a struct + * tee_ioctl_open_session_arg followed by any array of struct + * tee_ioctl_param + */ +#define TEE_IOC_OPEN_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 2, \ + struct tee_ioctl_buf_data) + +/** + * struct tee_ioctl_invoke_func_arg - Invokes a function in a Trusted + * Application + * @func: [in] Trusted Application function, specific to the TA + * @session: [in] Session id + * @cancel_id: [in] Cancellation id, a unique value to identify this request + * @ret: [out] return value + * @ret_origin [out] origin of the return value + * @num_params [in] number of parameters following this struct + */ +struct tee_ioctl_invoke_arg { + __u32 func; + __u32 session; + __u32 cancel_id; + __u32 ret; + __u32 ret_origin; + __u32 num_params; + /* num_params tells the actual number of element in params */ + struct tee_ioctl_param params[]; +}; + +/** + * TEE_IOC_INVOKE - Invokes a function in a Trusted Application + * + * Takes a struct tee_ioctl_buf_data which contains a struct + * tee_invoke_func_arg followed by any array of struct tee_param + */ +#define TEE_IOC_INVOKE _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 3, \ + struct tee_ioctl_buf_data) + +/** + * struct tee_ioctl_cancel_arg - Cancels an open session or invoke ioctl + * @cancel_id: [in] Cancellation id, a unique value to identify this request + * @session: [in] Session id, if the session is opened, else set to 0 + */ +struct tee_ioctl_cancel_arg { + __u32 cancel_id; + __u32 session; +}; + +/** + * TEE_IOC_CANCEL - Cancels an open session or invoke + */ +#define TEE_IOC_CANCEL _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 4, \ + struct tee_ioctl_cancel_arg) + +/** + * struct tee_ioctl_close_session_arg - Closes an open session + * @session: [in] Session id + */ +struct tee_ioctl_close_session_arg { + __u32 session; +}; + +/** + * TEE_IOC_CLOSE_SESSION - Closes a session + */ +#define TEE_IOC_CLOSE_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 5, \ + struct tee_ioctl_close_session_arg) + +/** + * struct tee_iocl_supp_recv_arg - Receive a request for a supplicant function + * @func: [in] supplicant function + * @num_params [in/out] number of parameters following this struct + * + * @num_params is the number of params that tee-supplicant has room to + * receive when input, @num_params is the number of actual params + * tee-supplicant receives when output. + */ +struct tee_iocl_supp_recv_arg { + __u32 func; + __u32 num_params; + /* num_params tells the actual number of element in params */ + struct tee_ioctl_param params[]; +}; + +/** + * TEE_IOC_SUPPL_RECV - Receive a request for a supplicant function + * + * Takes a struct tee_ioctl_buf_data which contains a struct + * tee_iocl_supp_recv_arg followed by any array of struct tee_param + */ +#define TEE_IOC_SUPPL_RECV _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 6, \ + struct tee_ioctl_buf_data) + +/** + * struct tee_iocl_supp_send_arg - Send a response to a received request + * @ret: [out] return value + * @num_params [in] number of parameters following this struct + */ +struct tee_iocl_supp_send_arg { + __u32 ret; + __u32 num_params; + /* num_params tells the actual number of element in params */ + struct tee_ioctl_param params[]; +}; + +/** + * TEE_IOC_SUPPL_SEND - Receive a request for a supplicant function + * + * Takes a struct tee_ioctl_buf_data which contains a struct + * tee_iocl_supp_send_arg followed by any array of struct tee_param + */ +#define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \ + struct tee_ioctl_buf_data) + +/* + * Five syscalls are used when communicating with the TEE driver. + * open(): opens the device associated with the driver + * ioctl(): as described above operating on the file descriptor from open() + * close(): two cases + * - closes the device file descriptor + * - closes a file descriptor connected to allocated shared memory + * mmap(): maps shared memory into user space using information from struct + * tee_ioctl_shm_alloc_data + * munmap(): unmaps previously shared memory + */ + +#endif /*__TEE_H*/ diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index b6c14b1ebdaf..7778723e4405 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -812,6 +812,8 @@ struct usb_wireless_cap_descriptor { /* Ultra Wide Band */ __u8 bReserved; } __attribute__((packed)); +#define USB_DT_USB_WIRELESS_CAP_SIZE 11 + /* USB 2.0 Extension descriptor */ #define USB_CAP_TYPE_EXT 2 @@ -896,6 +898,17 @@ struct usb_ssp_cap_descriptor { } __attribute__((packed)); /* + * Precision time measurement capability descriptor: advertised by devices and + * hubs that support PTM + */ +#define USB_PTM_CAP_TYPE 0xb +struct usb_ptm_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; +} __attribute__((packed)); + +/* * Configuration Summary descriptors: Defines a list of functions in the * configuration. This descriptor may be used by Host software to decide * which Configuration to use to obtain the desired functionality. @@ -919,6 +932,12 @@ struct usb_config_summary_descriptor { struct function_class_info cs_info[]; } __attribute__((packed)); +/* + * The size of the descriptor for the Sublink Speed Attribute Count + * (SSAC) specified in bmAttributes[4:0]. + */ +#define USB_DT_USB_SSP_CAP_SIZE(ssac) (16 + ssac * 4) + /*-------------------------------------------------------------------------*/ /* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with @@ -1014,6 +1033,7 @@ enum usb3_link_state { USB3_LPM_U3 }; +#define USB_DT_USB_PTM_ID_SIZE 3 /* * A U1 timeout of 0x0 means the parent hub will reject any transitions to U1. * 0xff means the parent hub will accept transitions to U1, but will not diff --git a/include/uapi/sound/audio_effects.h b/include/uapi/sound/audio_effects.h index 6565acff4073..147e877db71e 100644 --- a/include/uapi/sound/audio_effects.h +++ b/include/uapi/sound/audio_effects.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2015, 2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -159,8 +159,12 @@ #define PBE_ENABLE_PARAM_LEN 1 #define PBE_CONFIG_PARAM_LEN 28 +/* Command Payload length and size for Non-IID commands */ #define COMMAND_PAYLOAD_LEN 3 #define COMMAND_PAYLOAD_SZ (COMMAND_PAYLOAD_LEN * sizeof(uint32_t)) +/* Command Payload length and size for IID commands */ +#define COMMAND_IID_PAYLOAD_LEN 4 +#define COMMAND_IID_PAYLOAD_SZ (COMMAND_IID_PAYLOAD_LEN * sizeof(uint32_t)) #define MAX_INBAND_PARAM_SZ 4096 #define Q27_UNITY (1 << 27) #define Q8_UNITY (1 << 8) diff --git a/init/initramfs.c b/init/initramfs.c index f8ce812ba43e..52059169f64d 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -621,8 +621,11 @@ static int __init populate_rootfs(void) { char *err; - if (do_skip_initramfs) + if (do_skip_initramfs) { + if (initrd_start) + free_initrd(); return default_rootfs(); + } err = unpack_to_rootfs(__initramfs_start, __initramfs_size); if (err) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1d91c012b5d8..2370e7631728 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2325,11 +2325,11 @@ void sched_exit(struct task_struct *p) reset_task_stats(p); p->ravg.mark_start = wallclock; p->ravg.sum_history[0] = EXITING_TASK_MARKER; - free_task_load_ptrs(p); enqueue_task(rq, p, 0); clear_ed_task(p, rq); task_rq_unlock(rq, p, &flags); + free_task_load_ptrs(p); } #endif /* CONFIG_SCHED_HMP */ @@ -4914,6 +4914,15 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) raw_spin_lock_irqsave(&p->pi_lock, flags); cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); + + /* + * The userspace tasks are forbidden to run on + * isolated CPUs. So exclude isolated CPUs from + * the getaffinity. + */ + if (!(p->flags & PF_KTHREAD)) + cpumask_andnot(mask, mask, cpu_isolated_mask); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); out_unlock: @@ -6632,6 +6641,12 @@ static int init_rootdomain(struct root_domain *rd) if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) goto free_dlo_mask; +#ifdef HAVE_RT_PUSH_IPI + rd->rto_cpu = -1; + raw_spin_lock_init(&rd->rto_lock); + init_irq_work(&rd->rto_push_work, rto_push_irq_work_func); +#endif + init_dl_bw(&rd->dl_bw); if (cpudl_init(&rd->cpudl) != 0) goto free_dlo_mask; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e515311aa93c..6fc5de10673e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3657,6 +3657,68 @@ static inline int migration_needed(struct task_struct *p, int cpu) return 0; } +static inline int +kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu) +{ + unsigned long flags; + int rc = 0; + + /* Invoke active balance to force migrate currently running task */ + raw_spin_lock_irqsave(&rq->lock, flags); + if (!rq->active_balance) { + rq->active_balance = 1; + rq->push_cpu = new_cpu; + get_task_struct(p); + rq->push_task = p; + rc = 1; + } + raw_spin_unlock_irqrestore(&rq->lock, flags); + + return rc; +} + +static DEFINE_RAW_SPINLOCK(migration_lock); + +static bool do_migration(int reason, int new_cpu, int cpu) +{ + if ((reason == UP_MIGRATION || reason == DOWN_MIGRATION) + && same_cluster(new_cpu, cpu)) + return false; + + /* Inter cluster high irqload migrations are OK */ + return new_cpu != cpu; +} + +/* + * Check if currently running task should be migrated to a better cpu. + * + * Todo: Effect this via changes to nohz_balancer_kick() and load balance? + */ +void check_for_migration(struct rq *rq, struct task_struct *p) +{ + int cpu = cpu_of(rq), new_cpu; + int active_balance = 0, reason; + + reason = migration_needed(p, cpu); + if (!reason) + return; + + raw_spin_lock(&migration_lock); + new_cpu = select_best_cpu(p, cpu, reason, 0); + + if (do_migration(reason, new_cpu, cpu)) { + active_balance = kick_active_balance(rq, p, new_cpu); + if (active_balance) + mark_reserved(new_cpu); + } + + raw_spin_unlock(&migration_lock); + + if (active_balance) + stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq, + &rq->active_balance_work); +} + #ifdef CONFIG_CFS_BANDWIDTH static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq) @@ -11436,47 +11498,6 @@ static void rq_offline_fair(struct rq *rq) unthrottle_offline_cfs_rqs(rq); } -static inline int -kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu) -{ - int rc = 0; - - /* Invoke active balance to force migrate currently running task */ - raw_spin_lock(&rq->lock); - if (!rq->active_balance) { - rq->active_balance = 1; - rq->push_cpu = new_cpu; - get_task_struct(p); - rq->push_task = p; - rc = 1; - } - raw_spin_unlock(&rq->lock); - - return rc; -} - -void check_for_migration(struct rq *rq, struct task_struct *p) -{ - int new_cpu; - int active_balance; - int cpu = task_cpu(p); - - if (rq->misfit_task) { - if (rq->curr->state != TASK_RUNNING || - rq->curr->nr_cpus_allowed == 1) - return; - - new_cpu = select_energy_cpu_brute(p, cpu, 0); - if (capacity_orig_of(new_cpu) > capacity_orig_of(cpu)) { - active_balance = kick_active_balance(rq, p, new_cpu); - if (active_balance) - stop_one_cpu_nowait(cpu, - active_load_balance_cpu_stop, - rq, &rq->active_balance_work); - } - } -} - #endif /* CONFIG_SMP */ /* diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index ae6876e62c0f..ea066ab8376b 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -1526,6 +1526,10 @@ unsigned int cpu_temp(int cpu) return 0; } +/* + * kfree() may wakeup kswapd. So this function should NOT be called + * with any CPU's rq->lock acquired. + */ void free_task_load_ptrs(struct task_struct *p) { kfree(p->ravg.curr_window_cpu); @@ -2608,7 +2612,8 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event, p->cpu_cycles = cur_cycles; - trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time); + trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, + rq->cc.time, p); } static int diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index af6a7f424d94..c290db7f289a 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -68,10 +68,6 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) raw_spin_unlock(&rt_b->rt_runtime_lock); } -#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI) -static void push_irq_work_func(struct irq_work *work); -#endif - void init_rt_rq(struct rt_rq *rt_rq) { struct rt_prio_array *array; @@ -91,13 +87,6 @@ void init_rt_rq(struct rt_rq *rt_rq) rt_rq->rt_nr_migratory = 0; rt_rq->overloaded = 0; plist_head_init(&rt_rq->pushable_tasks); - -#ifdef HAVE_RT_PUSH_IPI - rt_rq->push_flags = 0; - rt_rq->push_cpu = nr_cpu_ids; - raw_spin_lock_init(&rt_rq->push_lock); - init_irq_work(&rt_rq->push_work, push_irq_work_func); -#endif #endif /* CONFIG_SMP */ /* We start is dequeued state, because no RT tasks are queued */ rt_rq->rt_queued = 0; @@ -2104,160 +2093,166 @@ static void push_rt_tasks(struct rq *rq) } #ifdef HAVE_RT_PUSH_IPI + /* - * The search for the next cpu always starts at rq->cpu and ends - * when we reach rq->cpu again. It will never return rq->cpu. - * This returns the next cpu to check, or nr_cpu_ids if the loop - * is complete. + * When a high priority task schedules out from a CPU and a lower priority + * task is scheduled in, a check is made to see if there's any RT tasks + * on other CPUs that are waiting to run because a higher priority RT task + * is currently running on its CPU. In this case, the CPU with multiple RT + * tasks queued on it (overloaded) needs to be notified that a CPU has opened + * up that may be able to run one of its non-running queued RT tasks. + * + * All CPUs with overloaded RT tasks need to be notified as there is currently + * no way to know which of these CPUs have the highest priority task waiting + * to run. Instead of trying to take a spinlock on each of these CPUs, + * which has shown to cause large latency when done on machines with many + * CPUs, sending an IPI to the CPUs to have them push off the overloaded + * RT tasks waiting to run. + * + * Just sending an IPI to each of the CPUs is also an issue, as on large + * count CPU machines, this can cause an IPI storm on a CPU, especially + * if its the only CPU with multiple RT tasks queued, and a large number + * of CPUs scheduling a lower priority task at the same time. + * + * Each root domain has its own irq work function that can iterate over + * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT + * tassk must be checked if there's one or many CPUs that are lowering + * their priority, there's a single irq work iterator that will try to + * push off RT tasks that are waiting to run. + * + * When a CPU schedules a lower priority task, it will kick off the + * irq work iterator that will jump to each CPU with overloaded RT tasks. + * As it only takes the first CPU that schedules a lower priority task + * to start the process, the rto_start variable is incremented and if + * the atomic result is one, then that CPU will try to take the rto_lock. + * This prevents high contention on the lock as the process handles all + * CPUs scheduling lower priority tasks. + * + * All CPUs that are scheduling a lower priority task will increment the + * rt_loop_next variable. This will make sure that the irq work iterator + * checks all RT overloaded CPUs whenever a CPU schedules a new lower + * priority task, even if the iterator is in the middle of a scan. Incrementing + * the rt_loop_next will cause the iterator to perform another scan. * - * rq->rt.push_cpu holds the last cpu returned by this function, - * or if this is the first instance, it must hold rq->cpu. */ static int rto_next_cpu(struct rq *rq) { - int prev_cpu = rq->rt.push_cpu; + struct root_domain *rd = rq->rd; + int next; int cpu; - cpu = cpumask_next(prev_cpu, rq->rd->rto_mask); - /* - * If the previous cpu is less than the rq's CPU, then it already - * passed the end of the mask, and has started from the beginning. - * We end if the next CPU is greater or equal to rq's CPU. + * When starting the IPI RT pushing, the rto_cpu is set to -1, + * rt_next_cpu() will simply return the first CPU found in + * the rto_mask. + * + * If rto_next_cpu() is called with rto_cpu is a valid cpu, it + * will return the next CPU found in the rto_mask. + * + * If there are no more CPUs left in the rto_mask, then a check is made + * against rto_loop and rto_loop_next. rto_loop is only updated with + * the rto_lock held, but any CPU may increment the rto_loop_next + * without any locking. */ - if (prev_cpu < rq->cpu) { - if (cpu >= rq->cpu) - return nr_cpu_ids; + for (;;) { - } else if (cpu >= nr_cpu_ids) { - /* - * We passed the end of the mask, start at the beginning. - * If the result is greater or equal to the rq's CPU, then - * the loop is finished. - */ - cpu = cpumask_first(rq->rd->rto_mask); - if (cpu >= rq->cpu) - return nr_cpu_ids; - } - rq->rt.push_cpu = cpu; + /* When rto_cpu is -1 this acts like cpumask_first() */ + cpu = cpumask_next(rd->rto_cpu, rd->rto_mask); - /* Return cpu to let the caller know if the loop is finished or not */ - return cpu; -} + rd->rto_cpu = cpu; -static int find_next_push_cpu(struct rq *rq) -{ - struct rq *next_rq; - int cpu; + if (cpu < nr_cpu_ids) + return cpu; - while (1) { - cpu = rto_next_cpu(rq); - if (cpu >= nr_cpu_ids) - break; - next_rq = cpu_rq(cpu); + rd->rto_cpu = -1; - /* Make sure the next rq can push to this rq */ - if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr) + /* + * ACQUIRE ensures we see the @rto_mask changes + * made prior to the @next value observed. + * + * Matches WMB in rt_set_overload(). + */ + next = atomic_read_acquire(&rd->rto_loop_next); + + if (rd->rto_loop == next) break; + + rd->rto_loop = next; } - return cpu; + return -1; +} + +static inline bool rto_start_trylock(atomic_t *v) +{ + return !atomic_cmpxchg_acquire(v, 0, 1); } -#define RT_PUSH_IPI_EXECUTING 1 -#define RT_PUSH_IPI_RESTART 2 +static inline void rto_start_unlock(atomic_t *v) +{ + atomic_set_release(v, 0); +} static void tell_cpu_to_push(struct rq *rq) { - int cpu; + int cpu = -1; - if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) { - raw_spin_lock(&rq->rt.push_lock); - /* Make sure it's still executing */ - if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) { - /* - * Tell the IPI to restart the loop as things have - * changed since it started. - */ - rq->rt.push_flags |= RT_PUSH_IPI_RESTART; - raw_spin_unlock(&rq->rt.push_lock); - return; - } - raw_spin_unlock(&rq->rt.push_lock); - } + /* Keep the loop going if the IPI is currently active */ + atomic_inc(&rq->rd->rto_loop_next); - /* When here, there's no IPI going around */ - - rq->rt.push_cpu = rq->cpu; - cpu = find_next_push_cpu(rq); - if (cpu >= nr_cpu_ids) + /* Only one CPU can initiate a loop at a time */ + if (!rto_start_trylock(&rq->rd->rto_loop_start)) return; - rq->rt.push_flags = RT_PUSH_IPI_EXECUTING; + raw_spin_lock(&rq->rd->rto_lock); + + /* + * The rto_cpu is updated under the lock, if it has a valid cpu + * then the IPI is still running and will continue due to the + * update to loop_next, and nothing needs to be done here. + * Otherwise it is finishing up and an ipi needs to be sent. + */ + if (rq->rd->rto_cpu < 0) + cpu = rto_next_cpu(rq); + + raw_spin_unlock(&rq->rd->rto_lock); - irq_work_queue_on(&rq->rt.push_work, cpu); + rto_start_unlock(&rq->rd->rto_loop_start); + + if (cpu >= 0) + irq_work_queue_on(&rq->rd->rto_push_work, cpu); } /* Called from hardirq context */ -static void try_to_push_tasks(void *arg) +void rto_push_irq_work_func(struct irq_work *work) { - struct rt_rq *rt_rq = arg; - struct rq *rq, *src_rq; - int this_cpu; + struct rq *rq; int cpu; - this_cpu = rt_rq->push_cpu; + rq = this_rq(); - /* Paranoid check */ - BUG_ON(this_cpu != smp_processor_id()); - - rq = cpu_rq(this_cpu); - src_rq = rq_of_rt_rq(rt_rq); - -again: + /* + * We do not need to grab the lock to check for has_pushable_tasks. + * When it gets updated, a check is made if a push is possible. + */ if (has_pushable_tasks(rq)) { raw_spin_lock(&rq->lock); - push_rt_task(rq); + push_rt_tasks(rq); raw_spin_unlock(&rq->lock); } - /* Pass the IPI to the next rt overloaded queue */ - raw_spin_lock(&rt_rq->push_lock); - /* - * If the source queue changed since the IPI went out, - * we need to restart the search from that CPU again. - */ - if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) { - rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART; - rt_rq->push_cpu = src_rq->cpu; - } + raw_spin_lock(&rq->rd->rto_lock); - cpu = find_next_push_cpu(src_rq); + /* Pass the IPI to the next rt overloaded queue */ + cpu = rto_next_cpu(rq); - if (cpu >= nr_cpu_ids) - rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING; - raw_spin_unlock(&rt_rq->push_lock); + raw_spin_unlock(&rq->rd->rto_lock); - if (cpu >= nr_cpu_ids) + if (cpu < 0) return; - /* - * It is possible that a restart caused this CPU to be - * chosen again. Don't bother with an IPI, just see if we - * have more to push. - */ - if (unlikely(cpu == rq->cpu)) - goto again; - /* Try the next RT overloaded CPU */ - irq_work_queue_on(&rt_rq->push_work, cpu); -} - -static void push_irq_work_func(struct irq_work *work) -{ - struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work); - - try_to_push_tasks(rt_rq); + irq_work_queue_on(&rq->rd->rto_push_work, cpu); } #endif /* HAVE_RT_PUSH_IPI */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7426ae4dced3..ca2294d06f44 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -32,10 +32,8 @@ extern long calc_load_fold_active(struct rq *this_rq); #ifdef CONFIG_SMP extern void update_cpu_load_active(struct rq *this_rq); -extern void check_for_migration(struct rq *rq, struct task_struct *p); #else static inline void update_cpu_load_active(struct rq *this_rq) { } -static inline void check_for_migration(struct rq *rq, struct task_struct *p) { } #endif /* @@ -534,7 +532,7 @@ static inline int rt_bandwidth_enabled(void) } /* RT IPI pull logic requires IRQ_WORK */ -#ifdef CONFIG_IRQ_WORK +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) # define HAVE_RT_PUSH_IPI #endif @@ -555,12 +553,6 @@ struct rt_rq { unsigned long rt_nr_total; int overloaded; struct plist_head pushable_tasks; -#ifdef HAVE_RT_PUSH_IPI - int push_flags; - int push_cpu; - struct irq_work push_work; - raw_spinlock_t push_lock; -#endif #endif /* CONFIG_SMP */ int rt_queued; @@ -653,6 +645,19 @@ struct root_domain { struct dl_bw dl_bw; struct cpudl cpudl; +#ifdef HAVE_RT_PUSH_IPI + /* + * For IPI pull requests, loop across the rto_mask. + */ + struct irq_work rto_push_work; + raw_spinlock_t rto_lock; + /* These are only updated and read within rto_lock */ + int rto_loop; + int rto_cpu; + /* These atomics are updated outside of a lock */ + atomic_t rto_loop_next; + atomic_t rto_loop_start; +#endif /* * The "RT overload" flag: it gets set if a CPU has more than * one runnable RT task. @@ -669,6 +674,9 @@ struct root_domain { extern struct root_domain def_root_domain; +#ifdef HAVE_RT_PUSH_IPI +extern void rto_push_irq_work_func(struct irq_work *work); +#endif #endif /* CONFIG_SMP */ /* @@ -1249,7 +1257,7 @@ static inline int cpu_min_power_cost(int cpu) return cpu_rq(cpu)->cluster->min_power_cost; } -static inline u32 cpu_cycles_to_freq(u64 cycles, u32 period) +static inline u32 cpu_cycles_to_freq(u64 cycles, u64 period) { return div64_u64(cycles, period); } @@ -1449,6 +1457,7 @@ static inline bool is_short_burst_task(struct task_struct *p) p->ravg.avg_sleep_time > sysctl_sched_short_sleep; } +extern void check_for_migration(struct rq *rq, struct task_struct *p); extern void pre_big_task_count_change(const struct cpumask *cpus); extern void post_big_task_count_change(const struct cpumask *cpus); extern void set_hmp_defaults(void); @@ -1708,6 +1717,7 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu) return 1; } +static inline void check_for_migration(struct rq *rq, struct task_struct *p) { } static inline void pre_big_task_count_change(void) { } static inline void post_big_task_count_change(void) { } static inline void set_hmp_defaults(void) { } diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 4a816bab38a2..d7612fcba10a 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c @@ -255,6 +255,7 @@ static const struct bin_table bin_net_ipv4_conf_vars_table[] = { { CTL_INT, NET_IPV4_CONF_NOPOLICY, "disable_policy" }, { CTL_INT, NET_IPV4_CONF_FORCE_IGMP_VERSION, "force_igmp_version" }, { CTL_INT, NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" }, + { CTL_INT, NET_IPV4_CONF_NF_IPV4_DEFRAG_SKIP, "nf_ipv4_defrag_skip" }, {} }; diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 738f3467d169..fc86fdcce932 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -70,6 +70,10 @@ static inline void tk_normalize_xtime(struct timekeeper *tk) tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift; tk->xtime_sec++; } + while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) { + tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift; + tk->raw_sec++; + } } static inline struct timespec64 tk_xtime(struct timekeeper *tk) @@ -277,18 +281,19 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) /* Go back from cycles -> shifted ns */ tk->xtime_interval = (u64) interval * clock->mult; tk->xtime_remainder = ntpinterval - tk->xtime_interval; - tk->raw_interval = - ((u64) interval * clock->mult) >> clock->shift; + tk->raw_interval = interval * clock->mult; /* if changing clocks, convert xtime_nsec shift units */ if (old_clock) { int shift_change = clock->shift - old_clock->shift; - if (shift_change < 0) + if (shift_change < 0) { tk->tkr_mono.xtime_nsec >>= -shift_change; - else + tk->tkr_raw.xtime_nsec >>= -shift_change; + } else { tk->tkr_mono.xtime_nsec <<= shift_change; + tk->tkr_raw.xtime_nsec <<= shift_change; + } } - tk->tkr_raw.xtime_nsec = 0; tk->tkr_mono.shift = clock->shift; tk->tkr_raw.shift = clock->shift; @@ -617,9 +622,6 @@ static inline void tk_update_ktime_data(struct timekeeper *tk) nsec = (u32) tk->wall_to_monotonic.tv_nsec; tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); - /* Update the monotonic raw base */ - tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time); - /* * The sum of the nanoseconds portions of xtime and * wall_to_monotonic can be greater/equal one second. Take @@ -629,6 +631,11 @@ static inline void tk_update_ktime_data(struct timekeeper *tk) if (nsec >= NSEC_PER_SEC) seconds++; tk->ktime_sec = seconds; + + /* Update the monotonic raw base */ + seconds = tk->raw_sec; + nsec = (u32)(tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift); + tk->tkr_raw.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); } /* must hold timekeeper_lock */ @@ -670,7 +677,6 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) static void timekeeping_forward_now(struct timekeeper *tk) { cycle_t cycle_now, delta; - s64 nsec; cycle_now = tk_clock_read(&tk->tkr_mono); delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); @@ -682,10 +688,13 @@ static void timekeeping_forward_now(struct timekeeper *tk) /* If arch requires, add in get_arch_timeoffset() */ tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift; - tk_normalize_xtime(tk); - nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift); - timespec64_add_ns(&tk->raw_time, nsec); + tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult; + + /* If arch requires, add in get_arch_timeoffset() */ + tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift; + + tk_normalize_xtime(tk); } /** @@ -1179,19 +1188,18 @@ int timekeeping_notify(struct clocksource *clock) void getrawmonotonic64(struct timespec64 *ts) { struct timekeeper *tk = &tk_core.timekeeper; - struct timespec64 ts64; unsigned long seq; s64 nsecs; do { seq = read_seqcount_begin(&tk_core.seq); + ts->tv_sec = tk->raw_sec; nsecs = timekeeping_get_ns(&tk->tkr_raw); - ts64 = tk->raw_time; } while (read_seqcount_retry(&tk_core.seq, seq)); - timespec64_add_ns(&ts64, nsecs); - *ts = ts64; + ts->tv_nsec = 0; + timespec64_add_ns(ts, nsecs); } EXPORT_SYMBOL(getrawmonotonic64); @@ -1315,8 +1323,7 @@ void __init timekeeping_init(void) tk_setup_internals(tk, clock); tk_set_xtime(tk, &now); - tk->raw_time.tv_sec = 0; - tk->raw_time.tv_nsec = 0; + tk->raw_sec = 0; if (boot.tv_sec == 0 && boot.tv_nsec == 0) boot = tk_xtime(tk); @@ -1796,7 +1803,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, unsigned int *clock_set) { cycle_t interval = tk->cycle_interval << shift; - u64 raw_nsecs; + u64 snsec_per_sec; /* If the offset is smaller than a shifted interval, do nothing */ if (offset < interval) @@ -1811,14 +1818,12 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, *clock_set |= accumulate_nsecs_to_secs(tk); /* Accumulate raw time */ - raw_nsecs = (u64)tk->raw_interval << shift; - raw_nsecs += tk->raw_time.tv_nsec; - if (raw_nsecs >= NSEC_PER_SEC) { - u64 raw_secs = raw_nsecs; - raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); - tk->raw_time.tv_sec += raw_secs; + tk->tkr_raw.xtime_nsec += tk->raw_interval << shift; + snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift; + while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) { + tk->tkr_raw.xtime_nsec -= snsec_per_sec; + tk->raw_sec++; } - tk->raw_time.tv_nsec = raw_nsecs; /* Accumulate error between NTP and clock interval */ tk->ntp_error += tk->ntp_tick << shift; diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h index 45215870ac6c..3fa9c146fccb 100644 --- a/kernel/workqueue_internal.h +++ b/kernel/workqueue_internal.h @@ -9,6 +9,7 @@ #include <linux/workqueue.h> #include <linux/kthread.h> +#include <linux/preempt.h> struct worker_pool; @@ -59,7 +60,7 @@ struct worker { */ static inline struct worker *current_wq_worker(void) { - if (current->flags & PF_WQ_WORKER) + if (in_task() && (current->flags & PF_WQ_WORKER)) return kthread_data(current); return NULL; } diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c index faa2a3f017f8..4fa2e54b3f59 100644 --- a/lib/asn1_decoder.c +++ b/lib/asn1_decoder.c @@ -227,7 +227,7 @@ next_op: hdr = 2; /* Extract a tag from the data */ - if (unlikely(dp >= datalen - 1)) + if (unlikely(datalen - dp < 2)) goto data_overrun_error; tag = data[dp++]; if (unlikely((tag & 0x1f) == ASN1_LONG_TAG)) @@ -273,7 +273,7 @@ next_op: int n = len - 0x80; if (unlikely(n > 2)) goto length_too_long; - if (unlikely(dp >= datalen - n)) + if (unlikely(n > datalen - dp)) goto data_overrun_error; hdr += n; for (len = 0; n > 0; n--) { diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c index e24388a863a7..468fb7cd1221 100644 --- a/lib/mpi/mpi-pow.c +++ b/lib/mpi/mpi-pow.c @@ -26,6 +26,7 @@ * however I decided to publish this code under the plain GPL. */ +#include <linux/sched.h> #include <linux/string.h> #include "mpi-internal.h" #include "longlong.h" @@ -256,6 +257,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) } e <<= 1; c--; + cond_resched(); } i--; diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 86374c1c49a4..841191061816 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -65,14 +65,19 @@ static ssize_t trigger_request_store(struct device *dev, release_firmware(test_firmware); test_firmware = NULL; rc = request_firmware(&test_firmware, name, dev); - if (rc) + if (rc) { pr_info("load of '%s' failed: %d\n", name, rc); - pr_info("loaded: %zu\n", test_firmware ? test_firmware->size : 0); + goto out; + } + pr_info("loaded: %zu\n", test_firmware->size); + rc = count; + +out: mutex_unlock(&test_fw_mutex); kfree(name); - return count; + return rc; } static DEVICE_ATTR_WO(trigger_request); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 6c6f5ccfcda1..8f3769ec8575 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1304,17 +1304,11 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, VM_BUG_ON_PAGE(!PageHead(page), page); if (flags & FOLL_TOUCH) { pmd_t _pmd; - /* - * We should set the dirty bit only for FOLL_WRITE but - * for now the dirty bit in the pmd is meaningless. - * And if the dirty bit will become meaningful and - * we'll only set it with FOLL_WRITE, an atomic - * set_bit will be required on the pmd to set the - * young bit, instead of the current set_pmd_at. - */ - _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); + _pmd = pmd_mkyoung(*pmd); + if (flags & FOLL_WRITE) + _pmd = pmd_mkdirty(_pmd); if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, - pmd, _pmd, 1)) + pmd, _pmd, flags & FOLL_WRITE)) update_mmu_cache_pmd(vma, addr, pmd); } if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { diff --git a/mm/madvise.c b/mm/madvise.c index c154e1076303..b04f2d26cdb8 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -223,15 +223,14 @@ static long madvise_willneed(struct vm_area_struct *vma, { struct file *file = vma->vm_file; + *prev = vma; #ifdef CONFIG_SWAP if (!file) { - *prev = vma; force_swapin_readahead(vma, start, end); return 0; } if (shmem_mapping(file->f_mapping)) { - *prev = vma; force_shm_swapin_readahead(vma, start, end, file->f_mapping); return 0; @@ -246,7 +245,6 @@ static long madvise_willneed(struct vm_area_struct *vma, return 0; } - *prev = vma; start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; if (end > vma->vm_end) end = vma->vm_end; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 915c60258935..2ea77b967709 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -286,28 +286,37 @@ EXPORT_SYMBOL(nr_online_nodes); int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT + +/* + * Determine how many pages need to be initialized durig early boot + * (non-deferred initialization). + * The value of first_deferred_pfn will be set later, once non-deferred pages + * are initialized, but for now set it ULONG_MAX. + */ static inline void reset_deferred_meminit(pg_data_t *pgdat) { - unsigned long max_initialise; - unsigned long reserved_lowmem; + phys_addr_t start_addr, end_addr; + unsigned long max_pgcnt; + unsigned long reserved; /* * Initialise at least 2G of a node but also take into account that * two large system hashes that can take up 1GB for 0.25TB/node. */ - max_initialise = max(2UL << (30 - PAGE_SHIFT), - (pgdat->node_spanned_pages >> 8)); + max_pgcnt = max(2UL << (30 - PAGE_SHIFT), + (pgdat->node_spanned_pages >> 8)); /* * Compensate the all the memblock reservations (e.g. crash kernel) * from the initial estimation to make sure we will initialize enough * memory to boot. */ - reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn, - pgdat->node_start_pfn + max_initialise); - max_initialise += reserved_lowmem; + start_addr = PFN_PHYS(pgdat->node_start_pfn); + end_addr = PFN_PHYS(pgdat->node_start_pfn + max_pgcnt); + reserved = memblock_reserved_memory_within(start_addr, end_addr); + max_pgcnt += PHYS_PFN(reserved); - pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages); + pgdat->static_init_pgcnt = min(max_pgcnt, pgdat->node_spanned_pages); pgdat->first_deferred_pfn = ULONG_MAX; } @@ -343,7 +352,7 @@ static inline bool update_defer_init(pg_data_t *pgdat, return true; /* Initialise at least 2G of the highest zone */ (*nr_initialised)++; - if ((*nr_initialised > pgdat->static_init_size) && + if ((*nr_initialised > pgdat->static_init_pgcnt) && (pfn & (PAGES_PER_SECTION - 1)) == 0) { pgdat->first_deferred_pfn = pfn; return false; diff --git a/mm/page_ext.c b/mm/page_ext.c index 916accfec86a..f02ad1cc7d24 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -103,7 +103,6 @@ struct page_ext *lookup_page_ext(struct page *page) struct page_ext *base; base = NODE_DATA(page_to_nid(page))->node_page_ext; -#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING) /* * The sanity checks the page allocator does upon freeing a * page can reach here before the page_ext arrays are @@ -115,7 +114,6 @@ struct page_ext *lookup_page_ext(struct page *page) */ if (unlikely(!base)) return NULL; -#endif offset = pfn - round_down(node_start_pfn(page_to_nid(page)), MAX_ORDER_NR_PAGES); return base + offset; @@ -180,7 +178,6 @@ struct page_ext *lookup_page_ext(struct page *page) { unsigned long pfn = page_to_pfn(page); struct mem_section *section = __pfn_to_section(pfn); -#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING) /* * The sanity checks the page allocator does upon freeing a * page can reach here before the page_ext arrays are @@ -192,7 +189,6 @@ struct page_ext *lookup_page_ext(struct page *page) */ if (!section->page_ext) return NULL; -#endif return section->page_ext + pfn; } diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 29f2f8b853ae..c2cbd2620169 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -142,8 +142,12 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end, do { next = hugetlb_entry_end(h, addr, end); pte = huge_pte_offset(walk->mm, addr & hmask); - if (pte && walk->hugetlb_entry) + + if (pte) err = walk->hugetlb_entry(pte, hmask, addr, next, walk); + else if (walk->pte_hole) + err = walk->pte_hole(addr, next, walk); + if (err) break; } while (addr = next, addr != end); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 5e4199d5a388..01abb6431fd9 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -376,6 +376,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, dev->name); vlan_vid_add(dev, htons(ETH_P_8021Q), 0); } + if (event == NETDEV_DOWN && + (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + vlan_vid_del(dev, htons(ETH_P_8021Q), 0); vlan_info = rtnl_dereference(dev->vlan_info); if (!vlan_info) @@ -423,9 +426,6 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, struct net_device *tmp; LIST_HEAD(close_list); - if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) - vlan_vid_del(dev, htons(ETH_P_8021Q), 0); - /* Put all VLANs for this dev in the down state too. */ vlan_group_for_each_dev(grp, i, vlandev) { flgs = vlandev->flags; diff --git a/net/9p/client.c b/net/9p/client.c index f5feac4ff4ec..3ff26eb1ea20 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -749,8 +749,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) } again: /* Wait for the response */ - err = wait_event_interruptible(*req->wq, - req->status >= REQ_STATUS_RCVD); + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD); /* * Make sure our req is coherent with regard to updates in other diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 6e70ddb158b4..2ddeecca5b12 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -290,8 +290,8 @@ req_retry: if (err == -ENOSPC) { chan->ring_bufs_avail = 0; spin_unlock_irqrestore(&chan->lock, flags); - err = wait_event_interruptible(*chan->vc_wq, - chan->ring_bufs_avail); + err = wait_event_killable(*chan->vc_wq, + chan->ring_bufs_avail); if (err == -ERESTARTSYS) return err; @@ -331,7 +331,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan, * Other zc request to finish here */ if (atomic_read(&vp_pinned) >= chan->p9_max_pages) { - err = wait_event_interruptible(vp_wq, + err = wait_event_killable(vp_wq, (atomic_read(&vp_pinned) < chan->p9_max_pages)); if (err == -ERESTARTSYS) return err; @@ -475,8 +475,8 @@ req_retry_pinned: if (err == -ENOSPC) { chan->ring_bufs_avail = 0; spin_unlock_irqrestore(&chan->lock, flags); - err = wait_event_interruptible(*chan->vc_wq, - chan->ring_bufs_avail); + err = wait_event_killable(*chan->vc_wq, + chan->ring_bufs_avail); if (err == -ERESTARTSYS) goto err_out; @@ -493,8 +493,7 @@ req_retry_pinned: virtqueue_kick(chan->vq); spin_unlock_irqrestore(&chan->lock, flags); p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n"); - err = wait_event_interruptible(*req->wq, - req->status >= REQ_STATUS_RCVD); + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD); /* * Non kernel buffers are pinned, unpin them */ diff --git a/net/core/dev.c b/net/core/dev.c index 8fc44bf48b9c..b26aaa5729d9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1113,9 +1113,8 @@ static int dev_alloc_name_ns(struct net *net, return ret; } -static int dev_get_valid_name(struct net *net, - struct net_device *dev, - const char *name) +int dev_get_valid_name(struct net *net, struct net_device *dev, + const char *name) { BUG_ON(!net); @@ -1131,6 +1130,7 @@ static int dev_get_valid_name(struct net *net, return 0; } +EXPORT_SYMBOL(dev_get_valid_name); /** * dev_change_name - change name of a device diff --git a/net/core/skbuff.c b/net/core/skbuff.c index fedcee8263b6..c3ec257493bc 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4255,6 +4255,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) if (!xnet) return; + ipvs_reset(skb); skb_orphan(skb); skb->mark = 0; } diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index e217f17997a4..6eb2bbf9873b 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -414,8 +414,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk, sk_daddr_set(newsk, ireq->ir_rmt_addr); sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); newinet->inet_saddr = ireq->ir_loc_addr; - newinet->inet_opt = ireq->opt; - ireq->opt = NULL; + RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt)); newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; newinet->inet_id = jiffies; @@ -430,7 +429,10 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk, if (__inet_inherit_port(sk, newsk) < 0) goto put_and_exit; *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); - + if (*own_req) + ireq->ireq_opt = NULL; + else + newinet->inet_opt = NULL; return newsk; exit_overflow: @@ -441,6 +443,7 @@ exit: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return NULL; put_and_exit: + newinet->inet_opt = NULL; inet_csk_prepare_forced_close(newsk); dccp_done(newsk); goto exit; @@ -492,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req ireq->ir_rmt_addr); err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, ireq->ir_rmt_addr, - ireq->opt); + ireq_opt_deref(ireq)); err = net_xmit_eval(err); } @@ -546,7 +549,7 @@ out: static void dccp_v4_reqsk_destructor(struct request_sock *req) { dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); - kfree(inet_rsk(req)->opt); + kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1)); } void dccp_syn_ack_timeout(const struct request_sock *req) diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index ff7736f7ff42..fc0c09e770e6 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -1,12 +1,13 @@ config HAVE_NET_DSA def_bool y - depends on NETDEVICES && !S390 + depends on INET && NETDEVICES && !S390 # Drivers must select NET_DSA and the appropriate tagging format config NET_DSA tristate "Distributed Switch Architecture" - depends on HAVE_NET_DSA && NET_SWITCHDEV + depends on HAVE_NET_DSA + select NET_SWITCHDEV select PHYLIB ---help--- Say Y if you want to enable support for the hardware switches supported diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index f2a71025a770..22377c8ff14b 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -270,6 +270,9 @@ static void ah_input_done(struct crypto_async_request *base, int err) int ihl = ip_hdrlen(skb); int ah_hlen = (ah->hdrlen + 2) << 2; + if (err) + goto out; + work_iph = AH_SKB_CB(skb)->tmp; auth_data = ah_tmp_auth(work_iph, ihl); icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len); diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 6cc3e1d602fb..5f3b81941a6f 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -2012,7 +2012,7 @@ int cipso_v4_req_setattr(struct request_sock *req, buf = NULL; req_inet = inet_rsk(req); - opt = xchg(&req_inet->opt, opt); + opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt); if (opt) kfree_rcu(opt, rcu); @@ -2034,11 +2034,13 @@ req_setattr_failure: * values on failure. * */ -static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) +static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr) { + struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1); int hdr_delta = 0; - struct ip_options_rcu *opt = *opt_ptr; + if (!opt || opt->opt.cipso == 0) + return 0; if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { u8 cipso_len; u8 cipso_off; @@ -2100,14 +2102,10 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) */ void cipso_v4_sock_delattr(struct sock *sk) { - int hdr_delta; - struct ip_options_rcu *opt; struct inet_sock *sk_inet; + int hdr_delta; sk_inet = inet_sk(sk); - opt = rcu_dereference_protected(sk_inet->inet_opt, 1); - if (!opt || opt->opt.cipso == 0) - return; hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); if (sk_inet->is_icsk && hdr_delta > 0) { @@ -2127,15 +2125,7 @@ void cipso_v4_sock_delattr(struct sock *sk) */ void cipso_v4_req_delattr(struct request_sock *req) { - struct ip_options_rcu *opt; - struct inet_request_sock *req_inet; - - req_inet = inet_rsk(req); - opt = req_inet->opt; - if (!opt || opt->opt.cipso == 0) - return; - - cipso_v4_delopt(&req_inet->opt); + cipso_v4_delopt(&inet_rsk(req)->ireq_opt); } /** diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 0212591b0077..1110e70e0ec6 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -2196,6 +2196,8 @@ static struct devinet_sysctl_table { "promote_secondaries"), DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET, "route_localnet"), + DEVINET_SYSCTL_RW_ENTRY(NF_IPV4_DEFRAG_SKIP, + "nf_ipv4_defrag_skip"), }, }; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 81fcff83d309..6640547df8f5 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -419,9 +419,11 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk, { const struct inet_request_sock *ireq = inet_rsk(req); struct net *net = read_pnet(&ireq->ireq_net); - struct ip_options_rcu *opt = ireq->opt; + struct ip_options_rcu *opt; struct rtable *rt; + opt = ireq_opt_deref(ireq); + flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, sk->sk_protocol, inet_sk_flowi_flags(sk), @@ -455,10 +457,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, struct flowi4 *fl4; struct rtable *rt; + opt = rcu_dereference(ireq->ireq_opt); fl4 = &newinet->cork.fl.u.ip4; - rcu_read_lock(); - opt = rcu_dereference(newinet->inet_opt); flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, sk->sk_protocol, inet_sk_flowi_flags(sk), @@ -471,13 +472,11 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, goto no_route; if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) goto route_err; - rcu_read_unlock(); return &rt->dst; route_err: ip_rt_put(rt); no_route: - rcu_read_unlock(); IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); return NULL; } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index f300d1cbfa91..097a1243c16c 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -808,6 +808,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, { struct ip_mreqn mreq; struct net_device *dev = NULL; + int midx; if (sk->sk_type == SOCK_STREAM) goto e_inval; @@ -852,11 +853,15 @@ static int do_ip_setsockopt(struct sock *sk, int level, err = -EADDRNOTAVAIL; if (!dev) break; + + midx = l3mdev_master_ifindex(dev); + dev_put(dev); err = -EINVAL; if (sk->sk_bound_dev_if && - mreq.imr_ifindex != sk->sk_bound_dev_if) + mreq.imr_ifindex != sk->sk_bound_dev_if && + (!midx || midx != sk->sk_bound_dev_if)) break; inet->mc_index = mreq.imr_ifindex; diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index a09fb0dec725..486b283a6cd1 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -129,42 +129,68 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly; static int ipip_err(struct sk_buff *skb, u32 info) { -/* All the routers (except for Linux) return only - 8 bytes of packet payload. It means, that precise relaying of - ICMP in the real Internet is absolutely infeasible. - */ + /* All the routers (except for Linux) return only + 8 bytes of packet payload. It means, that precise relaying of + ICMP in the real Internet is absolutely infeasible. + */ struct net *net = dev_net(skb->dev); struct ip_tunnel_net *itn = net_generic(net, ipip_net_id); const struct iphdr *iph = (const struct iphdr *)skb->data; - struct ip_tunnel *t; - int err; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; + struct ip_tunnel *t; + int err = 0; + + switch (type) { + case ICMP_DEST_UNREACH: + switch (code) { + case ICMP_SR_FAILED: + /* Impossible event. */ + goto out; + default: + /* All others are translated to HOST_UNREACH. + * rfc2003 contains "deep thoughts" about NET_UNREACH, + * I believe they are just ether pollution. --ANK + */ + break; + } + break; + + case ICMP_TIME_EXCEEDED: + if (code != ICMP_EXC_TTL) + goto out; + break; + + case ICMP_REDIRECT: + break; + + default: + goto out; + } - err = -ENOENT; t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, iph->daddr, iph->saddr, 0); - if (!t) + if (!t) { + err = -ENOENT; goto out; + } if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { - ipv4_update_pmtu(skb, dev_net(skb->dev), info, - t->parms.link, 0, IPPROTO_IPIP, 0); - err = 0; + ipv4_update_pmtu(skb, net, info, t->parms.link, 0, + iph->protocol, 0); goto out; } if (type == ICMP_REDIRECT) { - ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, - IPPROTO_IPIP, 0); - err = 0; + ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0); goto out; } - if (t->parms.iph.daddr == 0) + if (t->parms.iph.daddr == 0) { + err = -ENOENT; goto out; + } - err = 0; if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) goto out; diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c index a04dee536b8e..39455484bd13 100644 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c @@ -11,6 +11,7 @@ #include <linux/netfilter.h> #include <linux/module.h> #include <linux/skbuff.h> +#include <linux/inetdevice.h> #include <net/route.h> #include <net/ip.h> @@ -80,8 +81,13 @@ static unsigned int ipv4_conntrack_defrag(void *priv, #endif /* Gather fragments. */ if (ip_is_fragment(ip_hdr(skb))) { - enum ip_defrag_users user = - nf_ct_defrag_user(state->hook, skb); + enum ip_defrag_users user; + + if (skb->dev && + IN_DEV_NF_IPV4_DEFRAG_SKIP(__in_dev_get_rcu(skb->dev))) + return NF_ACCEPT; + + user = nf_ct_defrag_user(state->hook, skb); if (nf_ct_ipv4_gather_frags(state->net, skb, user)) return NF_STOLEN; diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index a2e1142145df..57bbcd5b650a 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -357,7 +357,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) /* We throwed the options of the initial SYN away, so we hope * the ACK carries the same options again (see RFC1122 4.2.3.8) */ - ireq->opt = tcp_v4_save_options(skb); + RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(skb)); if (security_inet_conn_request(sk, skb, req)) { reqsk_free(req); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0047b151e8e8..277e502ff253 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4943,7 +4943,7 @@ static void tcp_check_space(struct sock *sk) if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); /* pairs with tcp_poll() */ - smp_mb__after_atomic(); + smp_mb(); if (sk->sk_socket && test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) tcp_new_space(sk); @@ -6107,7 +6107,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, struct inet_request_sock *ireq = inet_rsk(req); kmemcheck_annotate_bitfield(ireq, flags); - ireq->opt = NULL; + ireq->ireq_opt = NULL; atomic64_set(&ireq->ir_cookie, 0); ireq->ireq_state = TCP_NEW_SYN_RECV; write_pnet(&ireq->ireq_net, sock_net(sk_listener)); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3845ab04a9b4..30d4e38a6241 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -860,7 +860,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, ireq->ir_rmt_addr, - ireq->opt); + ireq_opt_deref(ireq)); err = net_xmit_eval(err); } @@ -872,7 +872,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, */ static void tcp_v4_reqsk_destructor(struct request_sock *req) { - kfree(inet_rsk(req)->opt); + kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1)); } @@ -1201,7 +1201,7 @@ static void tcp_v4_init_req(struct request_sock *req, sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); ireq->no_srccheck = inet_sk(sk_listener)->transparent; - ireq->opt = tcp_v4_save_options(skb); + RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(skb)); } static struct dst_entry *tcp_v4_route_req(const struct sock *sk, @@ -1297,10 +1297,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, ireq = inet_rsk(req); sk_daddr_set(newsk, ireq->ir_rmt_addr); sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); - newinet->inet_saddr = ireq->ir_loc_addr; - inet_opt = ireq->opt; - rcu_assign_pointer(newinet->inet_opt, inet_opt); - ireq->opt = NULL; + newinet->inet_saddr = ireq->ir_loc_addr; + inet_opt = rcu_dereference(ireq->ireq_opt); + RCU_INIT_POINTER(newinet->inet_opt, inet_opt); newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; newinet->rcv_tos = ip_hdr(skb)->tos; @@ -1348,9 +1347,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, if (__inet_inherit_port(sk, newsk) < 0) goto put_and_exit; *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); - if (*own_req) + if (likely(*own_req)) { tcp_move_syn(newtp, req); - + ireq->ireq_opt = NULL; + } else { + newinet->inet_opt = NULL; + } return newsk; exit_overflow: @@ -1361,6 +1363,7 @@ exit: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return NULL; put_and_exit: + newinet->inet_opt = NULL; inet_csk_prepare_forced_close(newsk); tcp_done(newsk); goto exit; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 4e88f93f71c8..7d82c172db78 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1951,6 +1951,7 @@ static int tcp_mtu_probe(struct sock *sk) nskb->ip_summed = skb->ip_summed; tcp_insert_write_queue_before(nskb, skb, sk); + tcp_highest_sack_replace(sk, skb, nskb); len = 0; tcp_for_write_queue_from_safe(skb, next, sk) { @@ -2464,7 +2465,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); - tcp_highest_sack_combine(sk, next_skb, skb); + tcp_highest_sack_replace(sk, next_skb, skb); tcp_unlink_write_queue(next_skb, sk); @@ -3017,13 +3018,8 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, tcp_ecn_make_synack(req, th); th->source = htons(ireq->ir_num); th->dest = ireq->ir_rmt_port; - /* Setting of flags are superfluous here for callers (and ECE is - * not even correctly set) - */ - tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, - TCPHDR_SYN | TCPHDR_ACK); - - th->seq = htonl(TCP_SKB_CB(skb)->seq); + skb->ip_summed = CHECKSUM_PARTIAL; + th->seq = htonl(tcp_rsk(req)->snt_isn); /* XXX data is queued and acked as is. No buffer/window check */ th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index dc2db4f7b182..f3a0a9c0f61e 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, } opt_space->dst1opt = fopt->dst1opt; opt_space->opt_flen = fopt->opt_flen; + opt_space->tot_len = fopt->tot_len; return opt_space; } EXPORT_SYMBOL_GPL(fl6_merge_options); diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 6150a038711b..9d1a54de33f2 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -409,13 +409,16 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, case ICMPV6_DEST_UNREACH: net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", t->parms.name); - break; + if (code != ICMPV6_PORT_UNREACH) + break; + return; case ICMPV6_TIME_EXCEED: if (code == ICMPV6_EXC_HOPLIMIT) { net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", t->parms.name); + break; } - break; + return; case ICMPV6_PARAMPROB: teli = 0; if (code == ICMPV6_HDR_FIELD) @@ -431,13 +434,13 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", t->parms.name); } - break; + return; case ICMPV6_PKT_TOOBIG: mtu = be32_to_cpu(info) - offset; if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; t->dev->mtu = mtu; - break; + return; } if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO)) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index e22339fad10b..71624cf26832 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1201,11 +1201,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, if (WARN_ON(v6_cork->opt)) return -EINVAL; - v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation); + v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation); if (unlikely(!v6_cork->opt)) return -ENOBUFS; - v6_cork->opt->tot_len = opt->tot_len; + v6_cork->opt->tot_len = sizeof(*opt); v6_cork->opt->opt_flen = opt->opt_flen; v6_cork->opt->opt_nflen = opt->opt_nflen; diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index f615f982961a..1831fb108ad1 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -189,12 +189,12 @@ static int vti6_tnl_create2(struct net_device *dev) struct vti6_net *ip6n = net_generic(net, vti6_net_id); int err; + dev->rtnl_link_ops = &vti6_link_ops; err = register_netdevice(dev); if (err < 0) goto out; strcpy(t->parms.name, dev->name); - dev->rtnl_link_ops = &vti6_link_ops; dev_hold(dev); vti6_tnl_link(ip6n, t); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 4449ad1f8114..a4a30d2ca66f 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -583,16 +583,24 @@ done: if (val) { struct net_device *dev; + int midx; - if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) - goto e_inval; + rcu_read_lock(); - dev = dev_get_by_index(net, val); + dev = dev_get_by_index_rcu(net, val); if (!dev) { + rcu_read_unlock(); retv = -ENODEV; break; } - dev_put(dev); + midx = l3mdev_master_ifindex_rcu(dev); + + rcu_read_unlock(); + + if (sk->sk_bound_dev_if && + sk->sk_bound_dev_if != val && + (!midx || midx != sk->sk_bound_dev_if)) + goto e_inval; } np->mcast_oif = val; retv = 0; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 5c710f78163e..e367ce026db3 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3380,7 +3380,11 @@ static int ip6_route_dev_notify(struct notifier_block *this, net->ipv6.ip6_blk_hole_entry->dst.dev = dev; net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); #endif - } else if (event == NETDEV_UNREGISTER) { + } else if (event == NETDEV_UNREGISTER && + dev->reg_state != NETREG_UNREGISTERED) { + /* NETDEV_UNREGISTER could be fired for multiple times by + * netdev_wait_allrefs(). Make sure we only call this once. + */ in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev); diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 8ab9c5d74416..67f2e72723b2 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1015,6 +1015,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, session->name, cmd, arg); sk = ps->sock; + if (!sk) + return -EBADR; + sock_hold(sk); switch (cmd) { diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 67fede656ea5..424aca76a192 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -682,7 +682,6 @@ struct ieee80211_if_mesh { const struct ieee80211_mesh_sync_ops *sync_ops; s64 sync_offset_clockdrift_max; spinlock_t sync_offset_lock; - bool adjusting_tbtt; /* mesh power save */ enum nl80211_mesh_power_mode nonpeer_pm; int ps_peers_light_sleep; diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 44388d6a1d8e..4a72c0d1e56f 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -4,6 +4,7 @@ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -18,6 +19,7 @@ #include <linux/slab.h> #include <linux/export.h> #include <net/mac80211.h> +#include <crypto/algapi.h> #include <asm/unaligned.h> #include "ieee80211_i.h" #include "driver-ops.h" @@ -606,6 +608,39 @@ void ieee80211_key_free_unused(struct ieee80211_key *key) ieee80211_key_free_common(key); } +static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata, + struct ieee80211_key *old, + struct ieee80211_key *new) +{ + u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP]; + u8 *tk_old, *tk_new; + + if (!old || new->conf.keylen != old->conf.keylen) + return false; + + tk_old = old->conf.key; + tk_new = new->conf.key; + + /* + * In station mode, don't compare the TX MIC key, as it's never used + * and offloaded rekeying may not care to send it to the host. This + * is the case in iwlwifi, for example. + */ + if (sdata->vif.type == NL80211_IFTYPE_STATION && + new->conf.cipher == WLAN_CIPHER_SUITE_TKIP && + new->conf.keylen == WLAN_KEY_LEN_TKIP && + !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) { + memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP); + memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP); + memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); + memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); + tk_old = tkip_old; + tk_new = tkip_new; + } + + return !crypto_memneq(tk_old, tk_new, new->conf.keylen); +} + int ieee80211_key_link(struct ieee80211_key *key, struct ieee80211_sub_if_data *sdata, struct sta_info *sta) @@ -617,9 +652,6 @@ int ieee80211_key_link(struct ieee80211_key *key, pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; idx = key->conf.keyidx; - key->local = sdata->local; - key->sdata = sdata; - key->sta = sta; mutex_lock(&sdata->local->key_mtx); @@ -630,6 +662,20 @@ int ieee80211_key_link(struct ieee80211_key *key, else old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]); + /* + * Silently accept key re-installation without really installing the + * new version of the key to avoid nonce reuse or replay issues. + */ + if (ieee80211_key_identical(sdata, old_key, key)) { + ieee80211_key_free_unused(key); + ret = 0; + goto out; + } + + key->local = sdata->local; + key->sdata = sdata; + key->sta = sta; + increment_tailroom_need_count(sdata); ieee80211_key_replace(sdata, sta, pairwise, old_key, key); @@ -645,6 +691,7 @@ int ieee80211_key_link(struct ieee80211_key *key, ret = 0; } + out: mutex_unlock(&sdata->local->key_mtx); return ret; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 9063e8e736ad..9e1ded80a992 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -295,8 +295,6 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */ *pos |= ifmsh->ps_peers_deep_sleep ? IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00; - *pos++ |= ifmsh->adjusting_tbtt ? - IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00; *pos++ = 0x00; return 0; @@ -866,7 +864,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) ifmsh->mesh_cc_id = 0; /* Disabled */ /* register sync ops from extensible synchronization framework */ ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id); - ifmsh->adjusting_tbtt = false; ifmsh->sync_offset_clockdrift_max = 0; set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); ieee80211_mesh_root_setup(ifmsh); diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index bd3d55eb21d4..9f02e54ad2a5 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -495,12 +495,14 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr, /* Userspace handles station allocation */ if (sdata->u.mesh.user_mpm || - sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) - cfg80211_notify_new_peer_candidate(sdata->dev, addr, - elems->ie_start, - elems->total_len, - GFP_KERNEL); - else + sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) { + if (mesh_peer_accepts_plinks(elems) && + mesh_plink_availables(sdata)) + cfg80211_notify_new_peer_candidate(sdata->dev, addr, + elems->ie_start, + elems->total_len, + GFP_KERNEL); + } else sta = __mesh_sta_info_alloc(sdata, addr); return sta; diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c index 64bc22ad9496..16ed43fe4841 100644 --- a/net/mac80211/mesh_sync.c +++ b/net/mac80211/mesh_sync.c @@ -119,7 +119,6 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, */ if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) { - clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); msync_dbg(sdata, "STA %pM : is adjusting TBTT\n", sta->sta.addr); goto no_sync; @@ -168,11 +167,9 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata, struct beacon_data *beacon) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - u8 cap; WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET); WARN_ON(!rcu_read_lock_held()); - cap = beacon->meshconf->meshconf_cap; spin_lock_bh(&ifmsh->sync_offset_lock); @@ -186,21 +183,13 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata, "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n", ifmsh->sync_offset_clockdrift_max); set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags); - - ifmsh->adjusting_tbtt = true; } else { msync_dbg(sdata, "TBTT : max clockdrift=%lld; too small to adjust\n", (long long)ifmsh->sync_offset_clockdrift_max); ifmsh->sync_offset_clockdrift_max = 0; - - ifmsh->adjusting_tbtt = false; } spin_unlock_bh(&ifmsh->sync_offset_lock); - - beacon->meshconf->meshconf_cap = ifmsh->adjusting_tbtt ? - IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING | cap : - ~IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING & cap; } static const struct sync_method sync_methods[] = { diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 2cb429d34c03..120e9ae04db3 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1996,7 +1996,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, * is called on error from nf_tables_newrule(). */ expr = nft_expr_first(rule); - while (expr->ops && expr != nft_expr_last(rule)) { + while (expr != nft_expr_last(rule) && expr->ops) { nf_tables_expr_destroy(ctx, expr); expr = nft_expr_next(expr); } diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 9dfaf4d55ee0..a97a5bf716be 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -151,8 +151,34 @@ void nft_meta_get_eval(const struct nft_expr *expr, else *dest = PACKET_BROADCAST; break; + case NFPROTO_NETDEV: + switch (skb->protocol) { + case htons(ETH_P_IP): { + int noff = skb_network_offset(skb); + struct iphdr *iph, _iph; + + iph = skb_header_pointer(skb, noff, + sizeof(_iph), &_iph); + if (!iph) + goto err; + + if (ipv4_is_multicast(iph->daddr)) + *dest = PACKET_MULTICAST; + else + *dest = PACKET_BROADCAST; + + break; + } + case htons(ETH_P_IPV6): + *dest = PACKET_MULTICAST; + break; + default: + WARN_ON_ONCE(1); + goto err; + } + break; default: - WARN_ON(1); + WARN_ON_ONCE(1); goto err; } break; diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c index 61d216eb7917..5d189c11d208 100644 --- a/net/netfilter/nft_queue.c +++ b/net/netfilter/nft_queue.c @@ -37,7 +37,7 @@ static void nft_queue_eval(const struct nft_expr *expr, if (priv->queues_total > 1) { if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) { - int cpu = smp_processor_id(); + int cpu = raw_smp_processor_id(); queue = priv->queuenum + cpu % priv->queues_total; } else { diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2141d047301d..862e088905cc 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2057,7 +2057,7 @@ static int netlink_dump(struct sock *sk) struct sk_buff *skb = NULL; struct nlmsghdr *nlh; struct module *module; - int len, err = -ENOBUFS; + int err = -ENOBUFS; int alloc_min_size; int alloc_size; @@ -2105,9 +2105,11 @@ static int netlink_dump(struct sock *sk) skb_reserve(skb, skb_tailroom(skb) - alloc_size); netlink_skb_set_owner_r(skb, sk); - len = cb->dump(skb, cb); + if (nlk->dump_done_errno > 0) + nlk->dump_done_errno = cb->dump(skb, cb); - if (len > 0) { + if (nlk->dump_done_errno > 0 || + skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) { mutex_unlock(nlk->cb_mutex); if (sk_filter(sk, skb)) @@ -2117,13 +2119,15 @@ static int netlink_dump(struct sock *sk) return 0; } - nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI); - if (!nlh) + nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, + sizeof(nlk->dump_done_errno), NLM_F_MULTI); + if (WARN_ON(!nlh)) goto errout_skb; nl_dump_check_consistent(cb, nlh); - memcpy(nlmsg_data(nlh), &len, sizeof(len)); + memcpy(nlmsg_data(nlh), &nlk->dump_done_errno, + sizeof(nlk->dump_done_errno)); if (sk_filter(sk, skb)) kfree_skb(skb); @@ -2179,6 +2183,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, cb = &nlk->cb; memset(cb, 0, sizeof(*cb)); + cb->start = control->start; cb->dump = control->dump; cb->done = control->done; cb->nlh = nlh; @@ -2188,9 +2193,13 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, cb->skb = skb; nlk->cb_running = true; + nlk->dump_done_errno = INT_MAX; mutex_unlock(nlk->cb_mutex); + if (cb->start) + cb->start(cb); + ret = netlink_dump(sk); sock_put(sk); diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index 15e62973cfc6..4de7e97d8fb2 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -38,6 +38,7 @@ struct netlink_sock { wait_queue_head_t wait; bool bound; bool cb_running; + int dump_done_errno; struct netlink_callback cb; struct mutex *cb_mutex; struct mutex cb_def_mutex; diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 80649934cf3b..b2cde0e09809 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -513,6 +513,20 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, } EXPORT_SYMBOL(genlmsg_put); +static int genl_lock_start(struct netlink_callback *cb) +{ + /* our ops are always const - netlink API doesn't propagate that */ + const struct genl_ops *ops = cb->data; + int rc = 0; + + if (ops->start) { + genl_lock(); + rc = ops->start(cb); + genl_unlock(); + } + return rc; +} + static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { /* our ops are always const - netlink API doesn't propagate that */ @@ -577,6 +591,7 @@ static int genl_family_rcv_msg(struct genl_family *family, .module = family->module, /* we have const, but the netlink API doesn't */ .data = (void *)ops, + .start = genl_lock_start, .dump = genl_lock_dumpit, .done = genl_lock_done, }; @@ -588,6 +603,7 @@ static int genl_family_rcv_msg(struct genl_family *family, } else { struct netlink_dump_control c = { .module = family->module, + .start = ops->start, .dump = ops->dumpit, .done = ops->done, }; diff --git a/net/nfc/core.c b/net/nfc/core.c index c5a2c7e733b3..1471e4b0aa2c 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -1093,7 +1093,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, err_free_dev: kfree(dev); - return ERR_PTR(rc); + return NULL; } EXPORT_SYMBOL(nfc_allocate_device); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 241f69039a72..1584f89c456a 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1724,7 +1724,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) out: if (err && rollover) { - kfree(rollover); + kfree_rcu(rollover, rcu); po->rollover = NULL; } mutex_unlock(&fanout_mutex); @@ -1751,8 +1751,10 @@ static struct packet_fanout *fanout_release(struct sock *sk) else f = NULL; - if (po->rollover) + if (po->rollover) { kfree_rcu(po->rollover, rcu); + po->rollover = NULL; + } } mutex_unlock(&fanout_mutex); @@ -3769,6 +3771,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, void *data = &val; union tpacket_stats_u st; struct tpacket_rollover_stats rstats; + struct packet_rollover *rollover; if (level != SOL_PACKET) return -ENOPROTOOPT; @@ -3847,13 +3850,18 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, 0); break; case PACKET_ROLLOVER_STATS: - if (!po->rollover) + rcu_read_lock(); + rollover = rcu_dereference(po->rollover); + if (rollover) { + rstats.tp_all = atomic_long_read(&rollover->num); + rstats.tp_huge = atomic_long_read(&rollover->num_huge); + rstats.tp_failed = atomic_long_read(&rollover->num_failed); + data = &rstats; + lv = sizeof(rstats); + } + rcu_read_unlock(); + if (!rollover) return -EINVAL; - rstats.tp_all = atomic_long_read(&po->rollover->num); - rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); - rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); - data = &rstats; - lv = sizeof(rstats); break; case PACKET_TX_HAS_OFF: val = po->tp_tx_has_off; diff --git a/net/rds/send.c b/net/rds/send.c index 6815f03324d7..1a3c6acdd3f8 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -959,6 +959,11 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, ret = rds_cmsg_rdma_map(rs, rm, cmsg); if (!ret) *allocated_mr = 1; + else if (ret == -ENODEV) + /* Accommodate the get_mr() case which can fail + * if connection isn't established yet. + */ + ret = -EAGAIN; break; case RDS_CMSG_ATOMIC_CSWP: case RDS_CMSG_ATOMIC_FADD: @@ -1072,8 +1077,12 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) /* Parse any control messages the user may have included. */ ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); - if (ret) + if (ret) { + /* Trigger connection so that its ready for the next retry */ + if (ret == -EAGAIN) + rds_conn_connect_if_down(conn); goto out; + } if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", diff --git a/net/sctp/debug.c b/net/sctp/debug.c index 95d7b15dad21..e371a0d90068 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c @@ -166,7 +166,7 @@ static const char *const sctp_timer_tbl[] = { /* Lookup timer debug name. */ const char *sctp_tname(const sctp_subtype_t id) { - if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX) + if (id.timeout < ARRAY_SIZE(sctp_timer_tbl)) return sctp_timer_tbl[id.timeout]; return "unknown_timer"; } diff --git a/net/sctp/input.c b/net/sctp/input.c index 2d7859c03fd2..71c2ef84c5b0 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -420,7 +420,7 @@ void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t, { struct dst_entry *dst; - if (!t) + if (sock_owned_by_user(sk) || !t) return; dst = sctp_transport_dst_check(t); if (dst) diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index e33e9bd4ed5a..8a61ccc37e12 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -806,6 +806,8 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname, if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) { struct sctp_ulpevent *ev = sctp_skb2event(skb); addr->v6.sin6_scope_id = ev->iif; + } else { + addr->v6.sin6_scope_id = 0; } } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 3ebf3b652d60..7f0f689b8d2b 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -168,6 +168,36 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) sk_mem_charge(sk, chunk->skb->truesize); } +static void sctp_clear_owner_w(struct sctp_chunk *chunk) +{ + skb_orphan(chunk->skb); +} + +static void sctp_for_each_tx_datachunk(struct sctp_association *asoc, + void (*cb)(struct sctp_chunk *)) + +{ + struct sctp_outq *q = &asoc->outqueue; + struct sctp_transport *t; + struct sctp_chunk *chunk; + + list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) + list_for_each_entry(chunk, &t->transmitted, transmitted_list) + cb(chunk); + + list_for_each_entry(chunk, &q->retransmit, list) + cb(chunk); + + list_for_each_entry(chunk, &q->sacked, list) + cb(chunk); + + list_for_each_entry(chunk, &q->abandoned, list) + cb(chunk); + + list_for_each_entry(chunk, &q->out_chunk_list, list) + cb(chunk); +} + /* Verify that this is a valid address. */ static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, int len) @@ -4423,6 +4453,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) struct socket *sock; int err = 0; + /* Do not peel off from one netns to another one. */ + if (!net_eq(current->nsproxy->net_ns, sock_net(sk))) + return -EINVAL; + if (!asoc) return -EINVAL; @@ -7362,7 +7396,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, * paths won't try to lock it and then oldsk. */ lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); + sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w); sctp_assoc_migrate(assoc, newsk); + sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w); /* If the association on the newsk is already closed before accept() * is called, set RCV_SHUTDOWN flag. diff --git a/net/tipc/link.c b/net/tipc/link.c index 72268eac4ec7..736fffb28ab6 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1084,25 +1084,6 @@ drop: return rc; } -/* - * Send protocol message to the other endpoint. - */ -void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg, - u32 gap, u32 tolerance, u32 priority) -{ - struct sk_buff *skb = NULL; - struct sk_buff_head xmitq; - - __skb_queue_head_init(&xmitq); - tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap, - tolerance, priority, &xmitq); - skb = __skb_dequeue(&xmitq); - if (!skb) - return; - tipc_bearer_xmit_skb(l->net, l->bearer_id, skb, l->media_addr); - l->rcv_unacked = 0; -} - static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, u16 rcvgap, int tolerance, int priority, struct sk_buff_head *xmitq) @@ -1636,9 +1617,12 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) char *name; struct tipc_link *link; struct tipc_node *node; + struct sk_buff_head xmitq; struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; struct net *net = sock_net(skb->sk); + __skb_queue_head_init(&xmitq); + if (!info->attrs[TIPC_NLA_LINK]) return -EINVAL; @@ -1683,14 +1667,14 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); link->tolerance = tol; - tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0); + tipc_link_build_proto_msg(link, STATE_MSG, 0, 0, tol, 0, &xmitq); } if (props[TIPC_NLA_PROP_PRIO]) { u32 prio; prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); link->priority = prio; - tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio); + tipc_link_build_proto_msg(link, STATE_MSG, 0, 0, 0, prio, &xmitq); } if (props[TIPC_NLA_PROP_WIN]) { u32 win; @@ -1702,7 +1686,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) out: tipc_node_unlock(node); - + tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr); return res; } diff --git a/net/tipc/link.h b/net/tipc/link.h index 66d859b66c84..2a0d58671e88 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -153,7 +153,6 @@ struct tipc_stats { struct tipc_link { u32 addr; char name[TIPC_MAX_LINK_NAME]; - struct tipc_media_addr *media_addr; struct net *net; /* Management and link supervision data */ diff --git a/net/tipc/server.c b/net/tipc/server.c index 50f5b0ca7b3c..c416e5184a3f 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c @@ -618,14 +618,12 @@ int tipc_server_start(struct tipc_server *s) void tipc_server_stop(struct tipc_server *s) { struct tipc_conn *con; - int total = 0; int id; spin_lock_bh(&s->idr_lock); - for (id = 0; total < s->idr_in_use; id++) { + for (id = 0; s->idr_in_use; id++) { con = idr_find(&s->conn_idr, id); if (con) { - total++; spin_unlock_bh(&s->idr_lock); tipc_close_conn(con); spin_lock_bh(&s->idr_lock); diff --git a/net/unix/diag.c b/net/unix/diag.c index 4d9679701a6d..384c84e83462 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -257,6 +257,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb, err = -ENOENT; if (sk == NULL) goto out_nosk; + if (!net_eq(sock_net(sk), net)) + goto out; err = sock_diag_check_cookie(sk, req->udiag_cookie); if (err) diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 9b5bd6d142dc..60324f7c72bd 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1209,10 +1209,14 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, if (signal_pending(current)) { err = sock_intr_errno(timeout); - goto out_wait_error; + sk->sk_state = SS_UNCONNECTED; + sock->state = SS_UNCONNECTED; + goto out_wait; } else if (timeout == 0) { err = -ETIMEDOUT; - goto out_wait_error; + sk->sk_state = SS_UNCONNECTED; + sock->state = SS_UNCONNECTED; + goto out_wait; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); @@ -1220,20 +1224,17 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, if (sk->sk_err) { err = -sk->sk_err; - goto out_wait_error; - } else + sk->sk_state = SS_UNCONNECTED; + sock->state = SS_UNCONNECTED; + } else { err = 0; + } out_wait: finish_wait(sk_sleep(sk), &wait); out: release_sock(sk); return err; - -out_wait_error: - sk->sk_state = SS_UNCONNECTED; - sock->state = SS_UNCONNECTED; - goto out_wait; } static int vsock_accept(struct socket *sock, struct socket *newsock, int flags) @@ -1270,18 +1271,20 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags) listener->sk_err == 0) { release_sock(listener); timeout = schedule_timeout(timeout); + finish_wait(sk_sleep(listener), &wait); lock_sock(listener); if (signal_pending(current)) { err = sock_intr_errno(timeout); - goto out_wait; + goto out; } else if (timeout == 0) { err = -EAGAIN; - goto out_wait; + goto out; } prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); } + finish_wait(sk_sleep(listener), &wait); if (listener->sk_err) err = -listener->sk_err; @@ -1301,19 +1304,15 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags) */ if (err) { vconnected->rejected = true; - release_sock(connected); - sock_put(connected); - goto out_wait; + } else { + newsock->state = SS_CONNECTED; + sock_graft(connected, newsock); } - newsock->state = SS_CONNECTED; - sock_graft(connected, newsock); release_sock(connected); sock_put(connected); } -out_wait: - finish_wait(sk_sleep(listener), &wait); out: release_sock(listener); return err; @@ -1513,8 +1512,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, long timeout; int err; struct vsock_transport_send_notify_data send_data; - - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); sk = sock->sk; vsk = vsock_sk(sk); @@ -1557,11 +1555,10 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, if (err < 0) goto out; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); - while (total_written < len) { ssize_t written; + add_wait_queue(sk_sleep(sk), &wait); while (vsock_stream_has_space(vsk) == 0 && sk->sk_err == 0 && !(sk->sk_shutdown & SEND_SHUTDOWN) && @@ -1570,27 +1567,30 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, /* Don't wait for non-blocking sockets. */ if (timeout == 0) { err = -EAGAIN; - goto out_wait; + remove_wait_queue(sk_sleep(sk), &wait); + goto out_err; } err = transport->notify_send_pre_block(vsk, &send_data); - if (err < 0) - goto out_wait; + if (err < 0) { + remove_wait_queue(sk_sleep(sk), &wait); + goto out_err; + } release_sock(sk); - timeout = schedule_timeout(timeout); + timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout); lock_sock(sk); if (signal_pending(current)) { err = sock_intr_errno(timeout); - goto out_wait; + remove_wait_queue(sk_sleep(sk), &wait); + goto out_err; } else if (timeout == 0) { err = -EAGAIN; - goto out_wait; + remove_wait_queue(sk_sleep(sk), &wait); + goto out_err; } - - prepare_to_wait(sk_sleep(sk), &wait, - TASK_INTERRUPTIBLE); } + remove_wait_queue(sk_sleep(sk), &wait); /* These checks occur both as part of and after the loop * conditional since we need to check before and after @@ -1598,16 +1598,16 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, */ if (sk->sk_err) { err = -sk->sk_err; - goto out_wait; + goto out_err; } else if ((sk->sk_shutdown & SEND_SHUTDOWN) || (vsk->peer_shutdown & RCV_SHUTDOWN)) { err = -EPIPE; - goto out_wait; + goto out_err; } err = transport->notify_send_pre_enqueue(vsk, &send_data); if (err < 0) - goto out_wait; + goto out_err; /* Note that enqueue will only write as many bytes as are free * in the produce queue, so we don't need to ensure len is @@ -1620,7 +1620,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, len - total_written); if (written < 0) { err = -ENOMEM; - goto out_wait; + goto out_err; } total_written += written; @@ -1628,14 +1628,13 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, err = transport->notify_send_post_enqueue( vsk, written, &send_data); if (err < 0) - goto out_wait; + goto out_err; } -out_wait: +out_err: if (total_written > 0) err = total_written; - finish_wait(sk_sleep(sk), &wait); out: release_sock(sk); return err; @@ -1716,21 +1715,61 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (err < 0) goto out; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); while (1) { - s64 ready = vsock_stream_has_data(vsk); + s64 ready; - if (ready < 0) { - /* Invalid queue pair content. XXX This should be - * changed to a connection reset in a later change. - */ + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + ready = vsock_stream_has_data(vsk); - err = -ENOMEM; - goto out_wait; - } else if (ready > 0) { + if (ready == 0) { + if (sk->sk_err != 0 || + (sk->sk_shutdown & RCV_SHUTDOWN) || + (vsk->peer_shutdown & SEND_SHUTDOWN)) { + finish_wait(sk_sleep(sk), &wait); + break; + } + /* Don't wait for non-blocking sockets. */ + if (timeout == 0) { + err = -EAGAIN; + finish_wait(sk_sleep(sk), &wait); + break; + } + + err = transport->notify_recv_pre_block( + vsk, target, &recv_data); + if (err < 0) { + finish_wait(sk_sleep(sk), &wait); + break; + } + release_sock(sk); + timeout = schedule_timeout(timeout); + lock_sock(sk); + + if (signal_pending(current)) { + err = sock_intr_errno(timeout); + finish_wait(sk_sleep(sk), &wait); + break; + } else if (timeout == 0) { + err = -EAGAIN; + finish_wait(sk_sleep(sk), &wait); + break; + } + } else { ssize_t read; + finish_wait(sk_sleep(sk), &wait); + + if (ready < 0) { + /* Invalid queue pair content. XXX This should + * be changed to a connection reset in a later + * change. + */ + + err = -ENOMEM; + goto out; + } + err = transport->notify_recv_pre_dequeue( vsk, target, &recv_data); if (err < 0) @@ -1750,42 +1789,12 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, vsk, target, read, !(flags & MSG_PEEK), &recv_data); if (err < 0) - goto out_wait; + goto out; if (read >= target || flags & MSG_PEEK) break; target -= read; - } else { - if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN) - || (vsk->peer_shutdown & SEND_SHUTDOWN)) { - break; - } - /* Don't wait for non-blocking sockets. */ - if (timeout == 0) { - err = -EAGAIN; - break; - } - - err = transport->notify_recv_pre_block( - vsk, target, &recv_data); - if (err < 0) - break; - - release_sock(sk); - timeout = schedule_timeout(timeout); - lock_sock(sk); - - if (signal_pending(current)) { - err = sock_intr_errno(timeout); - break; - } else if (timeout == 0) { - err = -EAGAIN; - break; - } - - prepare_to_wait(sk_sleep(sk), &wait, - TASK_INTERRUPTIBLE); } } @@ -1797,8 +1806,6 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (copied > 0) err = copied; -out_wait: - finish_wait(sk_sleep(sk), &wait); out: release_sock(sk); return err; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 4096f699ba00..5b3e5f54c79e 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1307,7 +1307,7 @@ EXPORT_SYMBOL(xfrm_policy_delete); int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) { - struct net *net = xp_net(pol); + struct net *net = sock_net(sk); struct xfrm_policy *old_pol; #ifdef CONFIG_XFRM_SUB_POLICY diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index c0aa1b18fe8f..7944daeb7378 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1845,6 +1845,13 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen struct xfrm_mgr *km; struct xfrm_policy *pol = NULL; + if (!optval && !optlen) { + xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); + xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL); + __sk_dst_reset(sk); + return 0; + } + if (optlen <= 0 || optlen > PAGE_SIZE) return -EMSGSIZE; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 68010a01ea36..8b71b09e5ab6 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1660,32 +1660,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr static int xfrm_dump_policy_done(struct netlink_callback *cb) { - struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; + struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; struct net *net = sock_net(cb->skb->sk); xfrm_policy_walk_done(walk, net); return 0; } +static int xfrm_dump_policy_start(struct netlink_callback *cb) +{ + struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; + + BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args)); + + xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); + return 0; +} + static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); - struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; + struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; struct xfrm_dump_info info; - BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) > - sizeof(cb->args) - sizeof(cb->args[0])); - info.in_skb = cb->skb; info.out_skb = skb; info.nlmsg_seq = cb->nlh->nlmsg_seq; info.nlmsg_flags = NLM_F_MULTI; - if (!cb->args[0]) { - cb->args[0] = 1; - xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); - } - (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); return skb->len; @@ -2437,6 +2439,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { static const struct xfrm_link { int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); + int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); const struct nla_policy *nla_pol; @@ -2450,6 +2453,7 @@ static const struct xfrm_link { [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, + .start = xfrm_dump_policy_start, .dump = xfrm_dump_policy, .done = xfrm_dump_policy_done }, [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, @@ -2501,6 +2505,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { struct netlink_dump_control c = { + .start = link->start, .dump = link->dump, .done = link->done, }; diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 9ce9d5003dcc..19014293f927 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -297,6 +297,9 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file) if (iint->flags & IMA_DIGSIG) return; + if (iint->ima_file_status != INTEGRITY_PASS) + return; + rc = ima_collect_measurement(iint, file, NULL, NULL); if (rc < 0) return; diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index c21f09bf8b99..98289ba2a2e6 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -52,6 +52,8 @@ static int __init hash_setup(char *str) ima_hash_algo = HASH_ALGO_SHA1; else if (strncmp(str, "md5", 3) == 0) ima_hash_algo = HASH_ALGO_MD5; + else + return 1; goto out; } @@ -61,6 +63,8 @@ static int __init hash_setup(char *str) break; } } + if (i == HASH_ALGO__LAST) + return 1; out: hash_setup_done = 1; return 1; diff --git a/security/keys/Kconfig b/security/keys/Kconfig index 72483b8f1be5..1edb37eea81d 100644 --- a/security/keys/Kconfig +++ b/security/keys/Kconfig @@ -20,6 +20,10 @@ config KEYS If you are unsure as to whether this is required, answer N. +config KEYS_COMPAT + def_bool y + depends on COMPAT && KEYS + config PERSISTENT_KEYRINGS bool "Enable register of persistent per-UID keyrings" depends on KEYS diff --git a/security/keys/trusted.c b/security/keys/trusted.c index 509aedcf8310..214ae2dc7f64 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c @@ -69,7 +69,7 @@ static int TSS_sha1(const unsigned char *data, unsigned int datalen, } ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest); - kfree(sdesc); + kzfree(sdesc); return ret; } @@ -113,7 +113,7 @@ static int TSS_rawhmac(unsigned char *digest, const unsigned char *key, if (!ret) ret = crypto_shash_final(&sdesc->shash, digest); out: - kfree(sdesc); + kzfree(sdesc); return ret; } @@ -164,7 +164,7 @@ static int TSS_authhmac(unsigned char *digest, const unsigned char *key, paramdigest, TPM_NONCE_SIZE, h1, TPM_NONCE_SIZE, h2, 1, &c, 0, 0); out: - kfree(sdesc); + kzfree(sdesc); return ret; } @@ -245,7 +245,7 @@ static int TSS_checkhmac1(unsigned char *buffer, if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: - kfree(sdesc); + kzfree(sdesc); return ret; } @@ -346,7 +346,7 @@ static int TSS_checkhmac2(unsigned char *buffer, if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: - kfree(sdesc); + kzfree(sdesc); return ret; } @@ -563,7 +563,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype, *bloblen = storedsize; } out: - kfree(td); + kzfree(td); return ret; } @@ -677,7 +677,7 @@ static int key_seal(struct trusted_key_payload *p, if (ret < 0) pr_info("trusted_key: srkseal failed (%d)\n", ret); - kfree(tb); + kzfree(tb); return ret; } @@ -702,7 +702,7 @@ static int key_unseal(struct trusted_key_payload *p, /* pull migratable flag out of sealed key */ p->migratable = p->key[--p->key_len]; - kfree(tb); + kzfree(tb); return ret; } @@ -984,12 +984,12 @@ static int trusted_instantiate(struct key *key, if (!ret && options->pcrlock) ret = pcrlock(options->pcrlock); out: - kfree(datablob); - kfree(options); + kzfree(datablob); + kzfree(options); if (!ret) rcu_assign_keypointer(key, payload); else - kfree(payload); + kzfree(payload); return ret; } @@ -998,8 +998,7 @@ static void trusted_rcu_free(struct rcu_head *rcu) struct trusted_key_payload *p; p = container_of(rcu, struct trusted_key_payload, rcu); - memset(p->key, 0, p->key_len); - kfree(p); + kzfree(p); } /* @@ -1041,13 +1040,13 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep) ret = datablob_parse(datablob, new_p, new_o); if (ret != Opt_update) { ret = -EINVAL; - kfree(new_p); + kzfree(new_p); goto out; } if (!new_o->keyhandle) { ret = -EINVAL; - kfree(new_p); + kzfree(new_p); goto out; } @@ -1061,22 +1060,22 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep) ret = key_seal(new_p, new_o); if (ret < 0) { pr_info("trusted_key: key_seal failed (%d)\n", ret); - kfree(new_p); + kzfree(new_p); goto out; } if (new_o->pcrlock) { ret = pcrlock(new_o->pcrlock); if (ret < 0) { pr_info("trusted_key: pcrlock failed (%d)\n", ret); - kfree(new_p); + kzfree(new_p); goto out; } } rcu_assign_keypointer(key, new_p); call_rcu(&p->rcu, trusted_rcu_free); out: - kfree(datablob); - kfree(new_o); + kzfree(datablob); + kzfree(new_o); return ret; } @@ -1095,34 +1094,30 @@ static long trusted_read(const struct key *key, char __user *buffer, p = rcu_dereference_key(key); if (!p) return -EINVAL; - if (!buffer || buflen <= 0) - return 2 * p->blob_len; - ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL); - if (!ascii_buf) - return -ENOMEM; - bufp = ascii_buf; - for (i = 0; i < p->blob_len; i++) - bufp = hex_byte_pack(bufp, p->blob[i]); - if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) { - kfree(ascii_buf); - return -EFAULT; + if (buffer && buflen >= 2 * p->blob_len) { + ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL); + if (!ascii_buf) + return -ENOMEM; + + bufp = ascii_buf; + for (i = 0; i < p->blob_len; i++) + bufp = hex_byte_pack(bufp, p->blob[i]); + if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) { + kzfree(ascii_buf); + return -EFAULT; + } + kzfree(ascii_buf); } - kfree(ascii_buf); return 2 * p->blob_len; } /* - * trusted_destroy - before freeing the key, clear the decrypted data + * trusted_destroy - clear and free the key's payload */ static void trusted_destroy(struct key *key) { - struct trusted_key_payload *p = key->payload.data[0]; - - if (!p) - return; - memset(p->key, 0, p->key_len); - kfree(key->payload.data[0]); + kzfree(key->payload.data[0]); } struct key_type key_type_trusted = { diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 23009cabcd88..db2c1cdd93b7 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -267,8 +267,10 @@ static void update_audio_tstamp(struct snd_pcm_substream *substream, runtime->rate); *audio_tstamp = ns_to_timespec(audio_nsecs); } - runtime->status->audio_tstamp = *audio_tstamp; - runtime->status->tstamp = *curr_tstamp; + if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) { + runtime->status->audio_tstamp = *audio_tstamp; + runtime->status->tstamp = *curr_tstamp; + } /* * re-take a driver timestamp to let apps detect if the reference tstamp diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c index aaff9ee32695..b30b2139e3f0 100644 --- a/sound/core/seq/oss/seq_oss_midi.c +++ b/sound/core/seq/oss/seq_oss_midi.c @@ -612,9 +612,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq if (!dp->timer->running) len = snd_seq_oss_timer_start(dp->timer); if (ev->type == SNDRV_SEQ_EVENT_SYSEX) { - if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE) - snd_seq_oss_readq_puts(dp->readq, mdev->seq_device, - ev->data.ext.ptr, ev->data.ext.len); + snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev); } else { len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev); if (len > 0) diff --git a/sound/core/seq/oss/seq_oss_readq.c b/sound/core/seq/oss/seq_oss_readq.c index 046cb586fb2f..06b21226b4e7 100644 --- a/sound/core/seq/oss/seq_oss_readq.c +++ b/sound/core/seq/oss/seq_oss_readq.c @@ -118,6 +118,35 @@ snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, in } /* + * put MIDI sysex bytes; the event buffer may be chained, thus it has + * to be expanded via snd_seq_dump_var_event(). + */ +struct readq_sysex_ctx { + struct seq_oss_readq *readq; + int dev; +}; + +static int readq_dump_sysex(void *ptr, void *buf, int count) +{ + struct readq_sysex_ctx *ctx = ptr; + + return snd_seq_oss_readq_puts(ctx->readq, ctx->dev, buf, count); +} + +int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev, + struct snd_seq_event *ev) +{ + struct readq_sysex_ctx ctx = { + .readq = q, + .dev = dev + }; + + if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) + return 0; + return snd_seq_dump_var_event(ev, readq_dump_sysex, &ctx); +} + +/* * copy an event to input queue: * return zero if enqueued */ diff --git a/sound/core/seq/oss/seq_oss_readq.h b/sound/core/seq/oss/seq_oss_readq.h index f1463f1f449e..8d033ca2d23f 100644 --- a/sound/core/seq/oss/seq_oss_readq.h +++ b/sound/core/seq/oss/seq_oss_readq.h @@ -44,6 +44,8 @@ void snd_seq_oss_readq_delete(struct seq_oss_readq *q); void snd_seq_oss_readq_clear(struct seq_oss_readq *readq); unsigned int snd_seq_oss_readq_poll(struct seq_oss_readq *readq, struct file *file, poll_table *wait); int snd_seq_oss_readq_puts(struct seq_oss_readq *readq, int dev, unsigned char *data, int len); +int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev, + struct snd_seq_event *ev); int snd_seq_oss_readq_put_event(struct seq_oss_readq *readq, union evrec *ev); int snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *readq, unsigned long curt, int seq_mode); int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec); diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c index c4acf17e9f5e..e40a2cba5002 100644 --- a/sound/core/seq/seq_device.c +++ b/sound/core/seq/seq_device.c @@ -148,8 +148,10 @@ void snd_seq_device_load_drivers(void) flush_work(&autoload_work); } EXPORT_SYMBOL(snd_seq_device_load_drivers); +#define cancel_autoload_drivers() cancel_work_sync(&autoload_work) #else #define queue_autoload_drivers() /* NOP */ +#define cancel_autoload_drivers() /* NOP */ #endif /* @@ -159,6 +161,7 @@ static int snd_seq_device_dev_free(struct snd_device *device) { struct snd_seq_device *dev = device->device_data; + cancel_autoload_drivers(); put_device(&dev->dev); return 0; } diff --git a/sound/core/timer.c b/sound/core/timer.c index f0675acecc93..0e51e5cd33fe 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -318,8 +318,6 @@ int snd_timer_open(struct snd_timer_instance **ti, return 0; } -static int _snd_timer_stop(struct snd_timer_instance *timeri, int event); - /* * close a timer instance */ @@ -408,7 +406,6 @@ unsigned long snd_timer_resolution(struct snd_timer_instance *timeri) static void snd_timer_notify1(struct snd_timer_instance *ti, int event) { struct snd_timer *timer; - unsigned long flags; unsigned long resolution = 0; struct snd_timer_instance *ts; struct timespec tstamp; @@ -432,34 +429,66 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event) return; if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) return; - spin_lock_irqsave(&timer->lock, flags); list_for_each_entry(ts, &ti->slave_active_head, active_list) if (ts->ccallback) ts->ccallback(ts, event + 100, &tstamp, resolution); - spin_unlock_irqrestore(&timer->lock, flags); } -static int snd_timer_start1(struct snd_timer *timer, struct snd_timer_instance *timeri, - unsigned long sticks) +/* start/continue a master timer */ +static int snd_timer_start1(struct snd_timer_instance *timeri, + bool start, unsigned long ticks) { + struct snd_timer *timer; + int result; + unsigned long flags; + + timer = timeri->timer; + if (!timer) + return -EINVAL; + + spin_lock_irqsave(&timer->lock, flags); + if (timer->card && timer->card->shutdown) { + result = -ENODEV; + goto unlock; + } + if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING | + SNDRV_TIMER_IFLG_START)) { + result = -EBUSY; + goto unlock; + } + + if (start) + timeri->ticks = timeri->cticks = ticks; + else if (!timeri->cticks) + timeri->cticks = 1; + timeri->pticks = 0; + list_move_tail(&timeri->active_list, &timer->active_list_head); if (timer->running) { if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) goto __start_now; timer->flags |= SNDRV_TIMER_FLG_RESCHED; timeri->flags |= SNDRV_TIMER_IFLG_START; - return 1; /* delayed start */ + result = 1; /* delayed start */ } else { - timer->sticks = sticks; + if (start) + timer->sticks = ticks; timer->hw.start(timer); __start_now: timer->running++; timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; - return 0; + result = 0; } + snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START : + SNDRV_TIMER_EVENT_CONTINUE); + unlock: + spin_unlock_irqrestore(&timer->lock, flags); + return result; } -static int snd_timer_start_slave(struct snd_timer_instance *timeri) +/* start/continue a slave timer */ +static int snd_timer_start_slave(struct snd_timer_instance *timeri, + bool start) { unsigned long flags; @@ -473,88 +502,37 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri) spin_lock(&timeri->timer->lock); list_add_tail(&timeri->active_list, &timeri->master->slave_active_head); + snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START : + SNDRV_TIMER_EVENT_CONTINUE); spin_unlock(&timeri->timer->lock); } spin_unlock_irqrestore(&slave_active_lock, flags); return 1; /* delayed start */ } -/* - * start the timer instance - */ -int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks) +/* stop/pause a master timer */ +static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop) { struct snd_timer *timer; - int result = -EINVAL; + int result = 0; unsigned long flags; - if (timeri == NULL || ticks < 1) - return -EINVAL; - if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { - result = snd_timer_start_slave(timeri); - if (result >= 0) - snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); - return result; - } - timer = timeri->timer; - if (timer == NULL) - return -EINVAL; - if (timer->card && timer->card->shutdown) - return -ENODEV; - spin_lock_irqsave(&timer->lock, flags); - if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING | - SNDRV_TIMER_IFLG_START)) { - result = -EBUSY; - goto unlock; - } - timeri->ticks = timeri->cticks = ticks; - timeri->pticks = 0; - result = snd_timer_start1(timer, timeri, ticks); - unlock: - spin_unlock_irqrestore(&timer->lock, flags); - if (result >= 0) - snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); - return result; -} - -static int _snd_timer_stop(struct snd_timer_instance *timeri, int event) -{ - struct snd_timer *timer; - unsigned long flags; - - if (snd_BUG_ON(!timeri)) - return -ENXIO; - - if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { - spin_lock_irqsave(&slave_active_lock, flags); - if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) { - spin_unlock_irqrestore(&slave_active_lock, flags); - return -EBUSY; - } - if (timeri->timer) - spin_lock(&timeri->timer->lock); - timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING; - list_del_init(&timeri->ack_list); - list_del_init(&timeri->active_list); - if (timeri->timer) - spin_unlock(&timeri->timer->lock); - spin_unlock_irqrestore(&slave_active_lock, flags); - goto __end; - } timer = timeri->timer; if (!timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START))) { - spin_unlock_irqrestore(&timer->lock, flags); - return -EBUSY; + result = -EBUSY; + goto unlock; } list_del_init(&timeri->ack_list); list_del_init(&timeri->active_list); - if (timer->card && timer->card->shutdown) { - spin_unlock_irqrestore(&timer->lock, flags); - return 0; + if (timer->card && timer->card->shutdown) + goto unlock; + if (stop) { + timeri->cticks = timeri->ticks; + timeri->pticks = 0; } if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) && !(--timer->running)) { @@ -569,35 +547,60 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event) } } timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START); + snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : + SNDRV_TIMER_EVENT_CONTINUE); + unlock: spin_unlock_irqrestore(&timer->lock, flags); - __end: - if (event != SNDRV_TIMER_EVENT_RESOLUTION) - snd_timer_notify1(timeri, event); + return result; +} + +/* stop/pause a slave timer */ +static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop) +{ + unsigned long flags; + + spin_lock_irqsave(&slave_active_lock, flags); + if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) { + spin_unlock_irqrestore(&slave_active_lock, flags); + return -EBUSY; + } + timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING; + if (timeri->timer) { + spin_lock(&timeri->timer->lock); + list_del_init(&timeri->ack_list); + list_del_init(&timeri->active_list); + snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : + SNDRV_TIMER_EVENT_CONTINUE); + spin_unlock(&timeri->timer->lock); + } + spin_unlock_irqrestore(&slave_active_lock, flags); return 0; } /* + * start the timer instance + */ +int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks) +{ + if (timeri == NULL || ticks < 1) + return -EINVAL; + if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) + return snd_timer_start_slave(timeri, true); + else + return snd_timer_start1(timeri, true, ticks); +} + +/* * stop the timer instance. * * do not call this from the timer callback! */ int snd_timer_stop(struct snd_timer_instance *timeri) { - struct snd_timer *timer; - unsigned long flags; - int err; - - err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP); - if (err < 0) - return err; - timer = timeri->timer; - if (!timer) - return -EINVAL; - spin_lock_irqsave(&timer->lock, flags); - timeri->cticks = timeri->ticks; - timeri->pticks = 0; - spin_unlock_irqrestore(&timer->lock, flags); - return 0; + if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) + return snd_timer_stop_slave(timeri, true); + else + return snd_timer_stop1(timeri, true); } /* @@ -605,32 +608,10 @@ int snd_timer_stop(struct snd_timer_instance *timeri) */ int snd_timer_continue(struct snd_timer_instance *timeri) { - struct snd_timer *timer; - int result = -EINVAL; - unsigned long flags; - - if (timeri == NULL) - return result; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) - return snd_timer_start_slave(timeri); - timer = timeri->timer; - if (! timer) - return -EINVAL; - if (timer->card && timer->card->shutdown) - return -ENODEV; - spin_lock_irqsave(&timer->lock, flags); - if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) { - result = -EBUSY; - goto unlock; - } - if (!timeri->cticks) - timeri->cticks = 1; - timeri->pticks = 0; - result = snd_timer_start1(timer, timeri, timer->sticks); - unlock: - spin_unlock_irqrestore(&timer->lock, flags); - snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE); - return result; + return snd_timer_start_slave(timeri, false); + else + return snd_timer_start1(timeri, false, 0); } /* @@ -638,7 +619,10 @@ int snd_timer_continue(struct snd_timer_instance *timeri) */ int snd_timer_pause(struct snd_timer_instance * timeri) { - return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE); + if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) + return snd_timer_stop_slave(timeri, false); + else + return snd_timer_stop1(timeri, false); } /* diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c index 0b4b028e8e98..de9155eed727 100644 --- a/sound/core/timer_compat.c +++ b/sound/core/timer_compat.c @@ -40,11 +40,11 @@ static int snd_timer_user_info_compat(struct file *file, struct snd_timer *t; tu = file->private_data; - if (snd_BUG_ON(!tu->timeri)) - return -ENXIO; + if (!tu->timeri) + return -EBADFD; t = tu->timeri->timer; - if (snd_BUG_ON(!t)) - return -ENXIO; + if (!t) + return -EBADFD; memset(&info, 0, sizeof(info)); info.card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) @@ -73,8 +73,8 @@ static int snd_timer_user_status_compat(struct file *file, struct snd_timer_status32 status; tu = file->private_data; - if (snd_BUG_ON(!tu->timeri)) - return -ENXIO; + if (!tu->timeri) + return -EBADFD; memset(&status, 0, sizeof(status)); status.tstamp.tv_sec = tu->tstamp.tv_sec; status.tstamp.tv_nsec = tu->tstamp.tv_nsec; diff --git a/sound/drivers/vx/vx_pcm.c b/sound/drivers/vx/vx_pcm.c index 11467272089e..ea7b377f0378 100644 --- a/sound/drivers/vx/vx_pcm.c +++ b/sound/drivers/vx/vx_pcm.c @@ -1015,7 +1015,7 @@ static void vx_pcm_capture_update(struct vx_core *chip, struct snd_pcm_substream int size, space, count; struct snd_pcm_runtime *runtime = subs->runtime; - if (! pipe->prepared || (chip->chip_status & VX_STAT_IS_STALE)) + if (!pipe->running || (chip->chip_status & VX_STAT_IS_STALE)) return; size = runtime->buffer_size - snd_pcm_capture_avail(runtime); @@ -1048,8 +1048,10 @@ static void vx_pcm_capture_update(struct vx_core *chip, struct snd_pcm_substream /* ok, let's accelerate! */ int align = pipe->align * 3; space = (count / align) * align; - vx_pseudo_dma_read(chip, runtime, pipe, space); - count -= space; + if (space > 0) { + vx_pseudo_dma_read(chip, runtime, pipe, space); + count -= space; + } } /* read the rest of bytes */ while (count > 0) { diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index e6de496bffbe..e2e08fc73b50 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2316,6 +2316,9 @@ static const struct pci_device_id azx_ids[] = { /* AMD Hudson */ { PCI_DEVICE(0x1022, 0x780d), .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, + /* AMD Raven */ + { PCI_DEVICE(0x1022, 0x15e3), + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, /* ATI HDMI */ { PCI_DEVICE(0x1002, 0x0002), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index a83688f8672e..e5730a7d0480 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -338,6 +338,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0288: case 0x10ec0295: case 0x10ec0298: + case 0x10ec0299: alc_update_coef_idx(codec, 0x10, 1<<9, 0); break; case 0x10ec0285: @@ -914,6 +915,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = { { 0x10ec0256, 0x1028, 0, "ALC3246" }, { 0x10ec0225, 0x1028, 0, "ALC3253" }, { 0x10ec0295, 0x1028, 0, "ALC3254" }, + { 0x10ec0299, 0x1028, 0, "ALC3271" }, { 0x10ec0670, 0x1025, 0, "ALC669X" }, { 0x10ec0676, 0x1025, 0, "ALC679X" }, { 0x10ec0282, 0x1043, 0, "ALC3229" }, @@ -3721,6 +3723,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) break; case 0x10ec0225: case 0x10ec0295: + case 0x10ec0299: alc_process_coef_fw(codec, coef0225); break; } @@ -3823,6 +3826,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, break; case 0x10ec0225: case 0x10ec0295: + case 0x10ec0299: alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10); snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0225); @@ -3881,6 +3885,7 @@ static void alc_headset_mode_default(struct hda_codec *codec) switch (codec->core.vendor_id) { case 0x10ec0225: case 0x10ec0295: + case 0x10ec0299: alc_process_coef_fw(codec, coef0225); break; case 0x10ec0236: @@ -3995,6 +4000,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) break; case 0x10ec0225: case 0x10ec0295: + case 0x10ec0299: alc_process_coef_fw(codec, coef0225); break; } @@ -4086,6 +4092,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) break; case 0x10ec0225: case 0x10ec0295: + case 0x10ec0299: alc_process_coef_fw(codec, coef0225); break; } @@ -4171,6 +4178,7 @@ static void alc_determine_headset_type(struct hda_codec *codec) break; case 0x10ec0225: case 0x10ec0295: + case 0x10ec0299: alc_process_coef_fw(codec, coef0225); msleep(800); val = alc_read_coef_idx(codec, 0x46); @@ -4396,7 +4404,7 @@ static void alc_no_shutup(struct hda_codec *codec) static void alc_fixup_no_shutup(struct hda_codec *codec, const struct hda_fixup *fix, int action) { - if (action == HDA_FIXUP_ACT_PRE_PROBE) { + if (action == HDA_FIXUP_ACT_PROBE) { struct alc_spec *spec = codec->spec; spec->shutup = alc_no_shutup; } @@ -6233,6 +6241,7 @@ static int patch_alc269(struct hda_codec *codec) break; case 0x10ec0225: case 0x10ec0295: + case 0x10ec0299: spec->codec_variant = ALC269_TYPE_ALC225; break; case 0x10ec0234: @@ -6245,7 +6254,7 @@ static int patch_alc269(struct hda_codec *codec) case 0x10ec0703: spec->codec_variant = ALC269_TYPE_ALC700; spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ - alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */ + alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */ break; } @@ -7191,6 +7200,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0294, "ALC294", patch_alc269), HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269), HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269), HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861), HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd), HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861), diff --git a/sound/pci/vx222/vx222_ops.c b/sound/pci/vx222/vx222_ops.c index af83b3b38052..8e457ea27f89 100644 --- a/sound/pci/vx222/vx222_ops.c +++ b/sound/pci/vx222/vx222_ops.c @@ -269,12 +269,12 @@ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, /* Transfer using pseudo-dma. */ - if (offset + count > pipe->buffer_bytes) { + if (offset + count >= pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ - while (length-- > 0) { + for (; length > 0; length--) { outl(cpu_to_le32(*addr), port); addr++; } @@ -284,7 +284,7 @@ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, pipe->hw_ptr += count; count >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ - while (count-- > 0) { + for (; count > 0; count--) { outl(cpu_to_le32(*addr), port); addr++; } @@ -307,12 +307,12 @@ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, vx2_setup_pseudo_dma(chip, 0); /* Transfer using pseudo-dma. */ - if (offset + count > pipe->buffer_bytes) { + if (offset + count >= pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ - while (length-- > 0) + for (; length > 0; length--) *addr++ = le32_to_cpu(inl(port)); addr = (u32 *)runtime->dma_area; pipe->hw_ptr = 0; @@ -320,7 +320,7 @@ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, pipe->hw_ptr += count; count >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ - while (count-- > 0) + for (; count > 0; count--) *addr++ = le32_to_cpu(inl(port)); vx2_release_pseudo_dma(chip); diff --git a/sound/pcmcia/vx/vxp_ops.c b/sound/pcmcia/vx/vxp_ops.c index 281972913c32..56aa1ba73ccc 100644 --- a/sound/pcmcia/vx/vxp_ops.c +++ b/sound/pcmcia/vx/vxp_ops.c @@ -369,12 +369,12 @@ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, unsigned short *addr = (unsigned short *)(runtime->dma_area + offset); vx_setup_pseudo_dma(chip, 1); - if (offset + count > pipe->buffer_bytes) { + if (offset + count >= pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ - while (length-- > 0) { + for (; length > 0; length--) { outw(cpu_to_le16(*addr), port); addr++; } @@ -384,7 +384,7 @@ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, pipe->hw_ptr += count; count >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ - while (count-- > 0) { + for (; count > 0; count--) { outw(cpu_to_le16(*addr), port); addr++; } @@ -411,12 +411,12 @@ static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, if (snd_BUG_ON(count % 2)) return; vx_setup_pseudo_dma(chip, 0); - if (offset + count > pipe->buffer_bytes) { + if (offset + count >= pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ - while (length-- > 0) + for (; length > 0; length--) *addr++ = le16_to_cpu(inw(port)); addr = (unsigned short *)runtime->dma_area; pipe->hw_ptr = 0; @@ -424,7 +424,7 @@ static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, pipe->hw_ptr += count; count >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ - while (count-- > 1) + for (; count > 1; count--) *addr++ = le16_to_cpu(inw(port)); /* Disable DMA */ pchip->regDIALOG &= ~VXP_DLG_DMAREAD_SEL_MASK; diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 0bb415a28723..f1f990b325ad 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -1060,7 +1060,7 @@ static int wm_adsp_load(struct wm_adsp *dsp) const struct wmfw_region *region; const struct wm_adsp_region *mem; const char *region_name; - char *file, *text; + char *file, *text = NULL; struct wm_adsp_buf *buf; unsigned int reg; int regions = 0; @@ -1221,10 +1221,21 @@ static int wm_adsp_load(struct wm_adsp *dsp) regions, le32_to_cpu(region->len), offset, region_name); + if ((pos + le32_to_cpu(region->len) + sizeof(*region)) > + firmware->size) { + adsp_err(dsp, + "%s.%d: %s region len %d bytes exceeds file length %zu\n", + file, regions, region_name, + le32_to_cpu(region->len), firmware->size); + ret = -EINVAL; + goto out_fw; + } + if (text) { memcpy(text, region->data, le32_to_cpu(region->len)); adsp_info(dsp, "%s: %s\n", file, text); kfree(text); + text = NULL; } if (reg) { @@ -1269,6 +1280,7 @@ out_fw: regmap_async_complete(regmap); wm_adsp_buf_free(&buf_list); release_firmware(firmware); + kfree(text); out: kfree(file); @@ -1730,6 +1742,17 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp) } if (reg) { + if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) > + firmware->size) { + adsp_err(dsp, + "%s.%d: %s region len %d bytes exceeds file length %zu\n", + file, blocks, region_name, + le32_to_cpu(blk->len), + firmware->size); + ret = -EINVAL; + goto out_fw; + } + buf = wm_adsp_buf_alloc(blk->data, le32_to_cpu(blk->len), &buf_list); diff --git a/sound/soc/msm/apq8096-auto.c b/sound/soc/msm/apq8096-auto.c index 1fbdc7049d13..4bd4469d4904 100644 --- a/sound/soc/msm/apq8096-auto.c +++ b/sound/soc/msm/apq8096-auto.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -150,6 +150,9 @@ static int msm_pri_tdm_rate = SAMPLING_RATE_48KHZ; static int msm_pri_tdm_slot_width = 32; static int msm_pri_tdm_slot_num = 8; +static int msm_sec_tdm_slot_width = 32; +static int msm_sec_tdm_slot_num = 8; + /* EC Reference default values are set in mixer_paths.xml */ static int msm_ec_ref_ch = 4; static int msm_ec_ref_bit_format = SNDRV_PCM_FORMAT_S16_LE; @@ -494,13 +497,12 @@ static const char *const ec_ref_rate_text[] = {"0", "8000", "16000", static const char *const mi2s_rate_text[] = {"32000", "44100", "48000"}; -static const char *const pri_tdm_rate_text[] = {"8000", "16000", "48000"}; +static const char *const tdm_rate_text[] = {"8000", "16000", "48000"}; -static const char *const pri_tdm_slot_num_text[] = {"One", "Two", "Four", +static const char *const tdm_slot_num_text[] = {"One", "Two", "Four", "Eight", "Sixteen", "Thirtytwo"}; - -static const char *const pri_tdm_slot_width_text[] = {"16", "24", "32"}; +static const char *const tdm_slot_width_text[] = {"16", "24", "32"}; static struct afe_clk_set sec_mi2s_tx_clk = { AFE_API_VERSION_I2S_CONFIG, @@ -1142,6 +1144,98 @@ static int msm_pri_tdm_slot_num_put(struct snd_kcontrol *kcontrol, return 0; } +static int msm_sec_tdm_slot_width_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + ucontrol->value.integer.value[0] = msm_sec_tdm_slot_width; + pr_debug("%s: msm_sec_tdm_slot_width = %d\n", + __func__, msm_sec_tdm_slot_width); + return 0; +} + +static int msm_sec_tdm_slot_width_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (ucontrol->value.integer.value[0]) { + case 0: + msm_sec_tdm_slot_width = 16; + break; + case 1: + msm_sec_tdm_slot_width = 24; + break; + case 2: + msm_sec_tdm_slot_width = 32; + break; + default: + msm_sec_tdm_slot_width = 32; + break; + } + pr_debug("%s: msm_sec_tdm_slot_width= %d\n", + __func__, msm_sec_tdm_slot_width); + return 0; +} + +static int msm_sec_tdm_slot_num_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (msm_sec_tdm_slot_num) { + case 1: + ucontrol->value.integer.value[0] = 0; + break; + case 2: + ucontrol->value.integer.value[0] = 1; + break; + case 4: + ucontrol->value.integer.value[0] = 2; + break; + case 8: + ucontrol->value.integer.value[0] = 3; + break; + case 16: + ucontrol->value.integer.value[0] = 4; + break; + case 32: + default: + ucontrol->value.integer.value[0] = 5; + break; + } + + pr_debug("%s: msm_sec_tdm_slot_num = %d\n", + __func__, msm_sec_tdm_slot_num); + return 0; +} + +static int msm_sec_tdm_slot_num_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (ucontrol->value.integer.value[0]) { + case 0: + msm_sec_tdm_slot_num = 1; + break; + case 1: + msm_sec_tdm_slot_num = 2; + break; + case 2: + msm_sec_tdm_slot_num = 4; + break; + case 3: + msm_sec_tdm_slot_num = 8; + break; + case 4: + msm_sec_tdm_slot_num = 16; + break; + case 5: + msm_sec_tdm_slot_num = 32; + break; + default: + msm_sec_tdm_slot_num = 8; + break; + } + pr_debug("%s: msm_sec_tdm_slot_num = %d\n", + __func__, msm_sec_tdm_slot_num); + return 0; +} + static int msm_tdm_slot_mapping_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -3336,7 +3430,7 @@ static unsigned int tdm_param_set_slot_mask(int slots) unsigned int slot_mask = 0; unsigned int i = 0; - if ((slots != 16) && (slots != 8)) { + if ((slots <= 0) || (slots > 32)) { pr_err("%s: invalid slot number %d\n", __func__, slots); return -EINVAL; } @@ -3470,51 +3564,83 @@ static int apq8096_tdm_snd_hw_params(struct snd_pcm_substream *substream, slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_7]; break; case AFE_PORT_ID_SECONDARY_TDM_RX: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_0]; break; case AFE_PORT_ID_SECONDARY_TDM_RX_1: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_1]; break; case AFE_PORT_ID_SECONDARY_TDM_RX_2: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_2]; break; case AFE_PORT_ID_SECONDARY_TDM_RX_3: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_3]; break; case AFE_PORT_ID_SECONDARY_TDM_RX_4: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_4]; break; case AFE_PORT_ID_SECONDARY_TDM_RX_5: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_5]; break; case AFE_PORT_ID_SECONDARY_TDM_RX_6: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_6]; break; case AFE_PORT_ID_SECONDARY_TDM_RX_7: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_7]; break; case AFE_PORT_ID_SECONDARY_TDM_TX: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_0]; break; case AFE_PORT_ID_SECONDARY_TDM_TX_1: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_1]; break; case AFE_PORT_ID_SECONDARY_TDM_TX_2: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_2]; break; case AFE_PORT_ID_SECONDARY_TDM_TX_3: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_3]; break; case AFE_PORT_ID_SECONDARY_TDM_TX_4: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_4]; break; case AFE_PORT_ID_SECONDARY_TDM_TX_5: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_5]; break; case AFE_PORT_ID_SECONDARY_TDM_TX_6: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_6]; break; case AFE_PORT_ID_SECONDARY_TDM_TX_7: + slots = msm_sec_tdm_slot_num; + slot_width = msm_sec_tdm_slot_width; slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_7]; break; case AFE_PORT_ID_TERTIARY_TDM_RX: @@ -3707,9 +3833,9 @@ static const struct soc_enum msm_snd_enum[] = { SOC_ENUM_SINGLE_EXT(3, ec_ref_bit_format_text), SOC_ENUM_SINGLE_EXT(9, ec_ref_rate_text), SOC_ENUM_SINGLE_EXT(3, mi2s_rate_text), - SOC_ENUM_SINGLE_EXT(3, pri_tdm_rate_text), - SOC_ENUM_SINGLE_EXT(6, pri_tdm_slot_num_text), - SOC_ENUM_SINGLE_EXT(3, pri_tdm_slot_width_text), + SOC_ENUM_SINGLE_EXT(3, tdm_rate_text), + SOC_ENUM_SINGLE_EXT(6, tdm_slot_num_text), + SOC_ENUM_SINGLE_EXT(3, tdm_slot_width_text), }; static const struct snd_kcontrol_new msm_snd_controls[] = { @@ -3908,6 +4034,10 @@ static const struct snd_kcontrol_new msm_snd_controls[] = { msm_pri_tdm_slot_num_get, msm_pri_tdm_slot_num_put), SOC_ENUM_EXT("PRI_TDM Slot Width", msm_snd_enum[14], msm_pri_tdm_slot_width_get, msm_pri_tdm_slot_width_put), + SOC_ENUM_EXT("SEC_TDM Slot Number", msm_snd_enum[13], + msm_sec_tdm_slot_num_get, msm_sec_tdm_slot_num_put), + SOC_ENUM_EXT("SEC_TDM Slot Width", msm_snd_enum[14], + msm_sec_tdm_slot_width_get, msm_sec_tdm_slot_width_put), SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_0 Slot Mapping", SND_SOC_NOPM, PRIMARY_TDM_RX_0, 0xFFFF, 0, 8, msm_tdm_slot_mapping_get, diff --git a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c index 1286d3185780..37c43253a5bd 100644 --- a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c @@ -16,6 +16,7 @@ #include <sound/compress_params.h> #include <sound/msm-audio-effects-q6-v2.h> #include <sound/devdep_params.h> +#include <sound/q6common.h> #define MAX_ENABLE_CMD_SIZE 32 @@ -61,44 +62,35 @@ int msm_audio_effects_enable_extn(struct audio_client *ac, struct msm_nt_eff_all_config *effects, bool flag) { - uint32_t updt_params[MAX_ENABLE_CMD_SIZE] = {0}; - uint32_t params_length; + u32 flag_param = flag ? 1 : 0; + struct param_hdr_v3 param_hdr = {0}; int rc = 0; pr_debug("%s\n", __func__); - if (!ac) { - pr_err("%s: cannot set audio effects\n", __func__); - return -EINVAL; - } - params_length = 0; - updt_params[0] = AUDPROC_MODULE_ID_VIRTUALIZER; - updt_params[1] = AUDPROC_PARAM_ID_ENABLE; - updt_params[2] = VIRTUALIZER_ENABLE_PARAM_SZ; - updt_params[3] = flag; - params_length += COMMAND_PAYLOAD_SZ + VIRTUALIZER_ENABLE_PARAM_SZ; + param_hdr.module_id = AUDPROC_MODULE_ID_VIRTUALIZER; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AUDPROC_PARAM_ID_ENABLE; + param_hdr.param_size = VIRTUALIZER_ENABLE_PARAM_SZ; if (effects->virtualizer.enable_flag) - q6asm_send_audio_effects_params(ac, (char *)&updt_params[0], - params_length); - memset(updt_params, 0, MAX_ENABLE_CMD_SIZE); - params_length = 0; - updt_params[0] = AUDPROC_MODULE_ID_BASS_BOOST; - updt_params[1] = AUDPROC_PARAM_ID_ENABLE; - updt_params[2] = BASS_BOOST_ENABLE_PARAM_SZ; - updt_params[3] = flag; - params_length += COMMAND_PAYLOAD_SZ + BASS_BOOST_ENABLE_PARAM_SZ; + rc = q6asm_pack_and_set_pp_param_in_band(ac, param_hdr, + (u8 *) &flag_param); + + param_hdr.module_id = AUDPROC_MODULE_ID_BASS_BOOST; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AUDPROC_PARAM_ID_ENABLE; + param_hdr.param_size = BASS_BOOST_ENABLE_PARAM_SZ; if (effects->bass_boost.enable_flag) - q6asm_send_audio_effects_params(ac, (char *)&updt_params[0], - params_length); - memset(updt_params, 0, MAX_ENABLE_CMD_SIZE); - params_length = 0; - updt_params[0] = AUDPROC_MODULE_ID_POPLESS_EQUALIZER; - updt_params[1] = AUDPROC_PARAM_ID_ENABLE; - updt_params[2] = EQ_ENABLE_PARAM_SZ; - updt_params[3] = flag; - params_length += COMMAND_PAYLOAD_SZ + EQ_ENABLE_PARAM_SZ; + rc = q6asm_pack_and_set_pp_param_in_band(ac, param_hdr, + (u8 *) &flag_param); + + param_hdr.module_id = AUDPROC_MODULE_ID_POPLESS_EQUALIZER; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AUDPROC_PARAM_ID_ENABLE; + param_hdr.param_size = EQ_ENABLE_PARAM_SZ; if (effects->equalizer.enable_flag) - q6asm_send_audio_effects_params(ac, (char *)&updt_params[0], - params_length); + rc = q6asm_pack_and_set_pp_param_in_band(ac, param_hdr, + (u8 *) &flag_param); + return rc; } @@ -108,25 +100,32 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac, { long *param_max_offset = values + MAX_PP_PARAMS_SZ - 1; char *params = NULL; + u8 *updt_params; int rc = 0; int devices = GET_NEXT(values, param_max_offset, rc); int num_commands = GET_NEXT(values, param_max_offset, rc); - int *updt_params, i, prev_enable_flag; - uint32_t params_length = (MAX_INBAND_PARAM_SZ); + int i, prev_enable_flag; + uint32_t max_params_length = 0; + uint32_t params_length = 0; + struct param_hdr_v3 param_hdr = {0}; + u8 *param_data = NULL; + u32 packed_data_size = 0; pr_debug("%s\n", __func__); if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) { pr_err("%s: cannot set audio effects\n", __func__); return -EINVAL; } - params = kzalloc(params_length, GFP_KERNEL); + params = kzalloc(MAX_INBAND_PARAM_SZ, GFP_KERNEL); if (!params) { pr_err("%s, params memory alloc failed\n", __func__); return -ENOMEM; } pr_debug("%s: device: %d\n", __func__, devices); - updt_params = (int *)params; - params_length = 0; + updt_params = (u8 *) params; + /* Set MID and IID once at top and only update param specific fields*/ + param_hdr.module_id = AUDPROC_MODULE_ID_VIRTUALIZER; + param_hdr.instance_id = INSTANCE_ID_0; for (i = 0; i < num_commands; i++) { uint32_t command_id = GET_NEXT(values, param_max_offset, rc); @@ -148,23 +147,19 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s:VIRT ENABLE prev:%d, new:%d\n", __func__, prev_enable_flag, virtualizer->enable_flag); - if (prev_enable_flag != virtualizer->enable_flag) { - params_length += COMMAND_PAYLOAD_SZ + - VIRTUALIZER_ENABLE_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "VIRT ENABLE", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_VIRTUALIZER; - *updt_params++ = + if (prev_enable_flag == virtualizer->enable_flag) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + VIRTUALIZER_ENABLE_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "VIRT ENABLE", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_VIRTUALIZER_ENABLE; - *updt_params++ = - VIRTUALIZER_ENABLE_PARAM_SZ; - *updt_params++ = - virtualizer->enable_flag; - } + param_hdr.param_size = VIRTUALIZER_ENABLE_PARAM_SZ; + param_data = (u8 *) &virtualizer->enable_flag; break; case VIRTUALIZER_STRENGTH: if (length != 1 || index_offset != 0) { @@ -176,23 +171,19 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: VIRT STRENGTH val: %d\n", __func__, virtualizer->strength); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - VIRTUALIZER_STRENGTH_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "VIRT STRENGTH", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_VIRTUALIZER; - *updt_params++ = - AUDPROC_PARAM_ID_VIRTUALIZER_STRENGTH; - *updt_params++ = - VIRTUALIZER_STRENGTH_PARAM_SZ; - *updt_params++ = - virtualizer->strength; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + VIRTUALIZER_STRENGTH_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "VIRT STRENGTH", rc); + if (rc != 0) + break; + param_hdr.param_id = + AUDPROC_PARAM_ID_VIRTUALIZER_STRENGTH; + param_hdr.param_size = VIRTUALIZER_STRENGTH_PARAM_SZ; + param_data = (u8 *) &virtualizer->strength; break; case VIRTUALIZER_OUT_TYPE: if (length != 1 || index_offset != 0) { @@ -204,23 +195,19 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: VIRT OUT_TYPE val:%d\n", __func__, virtualizer->out_type); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - VIRTUALIZER_OUT_TYPE_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "VIRT OUT_TYPE", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_VIRTUALIZER; - *updt_params++ = - AUDPROC_PARAM_ID_VIRTUALIZER_OUT_TYPE; - *updt_params++ = - VIRTUALIZER_OUT_TYPE_PARAM_SZ; - *updt_params++ = - virtualizer->out_type; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + VIRTUALIZER_OUT_TYPE_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "VIRT OUT_TYPE", rc); + if (rc != 0) + break; + param_hdr.param_id = + AUDPROC_PARAM_ID_VIRTUALIZER_OUT_TYPE; + param_hdr.param_size = VIRTUALIZER_OUT_TYPE_PARAM_SZ; + param_data = (u8 *) &virtualizer->out_type; break; case VIRTUALIZER_GAIN_ADJUST: if (length != 1 || index_offset != 0) { @@ -232,32 +219,40 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: VIRT GAIN_ADJUST val:%d\n", __func__, virtualizer->gain_adjust); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - VIRTUALIZER_GAIN_ADJUST_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "VIRT GAIN_ADJUST", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_VIRTUALIZER; - *updt_params++ = + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + VIRTUALIZER_GAIN_ADJUST_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "VIRT GAIN_ADJUST", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_VIRTUALIZER_GAIN_ADJUST; - *updt_params++ = - VIRTUALIZER_GAIN_ADJUST_PARAM_SZ; - *updt_params++ = - virtualizer->gain_adjust; - } + param_hdr.param_size = VIRTUALIZER_GAIN_ADJUST_PARAM_SZ; + param_data = (u8 *) &virtualizer->gain_adjust; break; default: pr_err("%s: Invalid command to set config\n", __func__); - break; + continue; + } + if (rc) + goto invalid_config; + + rc = q6common_pack_pp_params(updt_params, ¶m_hdr, + param_data, &packed_data_size); + if (rc) { + pr_err("%s: Failed to pack params, error %d\n", + __func__, rc); + goto invalid_config; } + + updt_params += packed_data_size; + params_length += packed_data_size; } if (params_length && (rc == 0)) - q6asm_send_audio_effects_params(ac, params, - params_length); + q6asm_set_pp_params(ac, NULL, params, params_length); else pr_debug("%s: did not send pp params\n", __func__); invalid_config: @@ -271,25 +266,32 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, { long *param_max_offset = values + MAX_PP_PARAMS_SZ - 1; char *params = NULL; + u8 *updt_params; int rc = 0; int devices = GET_NEXT(values, param_max_offset, rc); int num_commands = GET_NEXT(values, param_max_offset, rc); - int *updt_params, i, prev_enable_flag; - uint32_t params_length = (MAX_INBAND_PARAM_SZ); + int i, prev_enable_flag; + uint32_t max_params_length = 0; + uint32_t params_length = 0; + struct param_hdr_v3 param_hdr = {0}; + u8 *param_data = NULL; + u32 packed_data_size = 0; pr_debug("%s\n", __func__); if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) { pr_err("%s: cannot set audio effects\n", __func__); return -EINVAL; } - params = kzalloc(params_length, GFP_KERNEL); + params = kzalloc(MAX_INBAND_PARAM_SZ, GFP_KERNEL); if (!params) { pr_err("%s, params memory alloc failed\n", __func__); return -ENOMEM; } pr_debug("%s: device: %d\n", __func__, devices); - updt_params = (int *)params; - params_length = 0; + updt_params = (u8 *) params; + /* Set MID and IID once at top and only update param specific fields*/ + param_hdr.module_id = AUDPROC_MODULE_ID_REVERB; + param_hdr.instance_id = INSTANCE_ID_0; for (i = 0; i < num_commands; i++) { uint32_t command_id = GET_NEXT(values, param_max_offset, rc); @@ -311,23 +313,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s:REVERB_ENABLE prev:%d,new:%d\n", __func__, prev_enable_flag, reverb->enable_flag); - if (prev_enable_flag != reverb->enable_flag) { - params_length += COMMAND_PAYLOAD_SZ + - REVERB_ENABLE_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "REVERB_ENABLE", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_REVERB; - *updt_params++ = - AUDPROC_PARAM_ID_REVERB_ENABLE; - *updt_params++ = - REVERB_ENABLE_PARAM_SZ; - *updt_params++ = - reverb->enable_flag; - } + if (prev_enable_flag == reverb->enable_flag) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + REVERB_ENABLE_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "REVERB_ENABLE", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_ENABLE; + param_hdr.param_size = REVERB_ENABLE_PARAM_SZ; + param_data = (u8 *) &reverb->enable_flag; break; case REVERB_MODE: if (length != 1 || index_offset != 0) { @@ -339,23 +336,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: REVERB_MODE val:%d\n", __func__, reverb->mode); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - REVERB_MODE_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "REVERB_MODE", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_REVERB; - *updt_params++ = - AUDPROC_PARAM_ID_REVERB_MODE; - *updt_params++ = - REVERB_MODE_PARAM_SZ; - *updt_params++ = - reverb->mode; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + REVERB_MODE_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "REVERB_MODE", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_MODE; + param_hdr.param_size = REVERB_MODE_PARAM_SZ; + param_data = (u8 *) &reverb->mode; break; case REVERB_PRESET: if (length != 1 || index_offset != 0) { @@ -367,23 +359,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: REVERB_PRESET val:%d\n", __func__, reverb->preset); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - REVERB_PRESET_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "REVERB_PRESET", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_REVERB; - *updt_params++ = - AUDPROC_PARAM_ID_REVERB_PRESET; - *updt_params++ = - REVERB_PRESET_PARAM_SZ; - *updt_params++ = - reverb->preset; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + REVERB_PRESET_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "REVERB_PRESET", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_PRESET; + param_hdr.param_size = REVERB_PRESET_PARAM_SZ; + param_data = (u8 *) &reverb->preset; break; case REVERB_WET_MIX: if (length != 1 || index_offset != 0) { @@ -395,23 +382,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: REVERB_WET_MIX val:%d\n", __func__, reverb->wet_mix); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - REVERB_WET_MIX_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "REVERB_WET_MIX", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_REVERB; - *updt_params++ = - AUDPROC_PARAM_ID_REVERB_WET_MIX; - *updt_params++ = - REVERB_WET_MIX_PARAM_SZ; - *updt_params++ = - reverb->wet_mix; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + REVERB_WET_MIX_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "REVERB_WET_MIX", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_WET_MIX; + param_hdr.param_size = REVERB_WET_MIX_PARAM_SZ; + param_data = (u8 *) &reverb->wet_mix; break; case REVERB_GAIN_ADJUST: if (length != 1 || index_offset != 0) { @@ -423,23 +405,19 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: REVERB_GAIN_ADJUST val:%d\n", __func__, reverb->gain_adjust); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - REVERB_GAIN_ADJUST_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "REVERB_GAIN_ADJUST", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_REVERB; - *updt_params++ = - AUDPROC_PARAM_ID_REVERB_GAIN_ADJUST; - *updt_params++ = - REVERB_GAIN_ADJUST_PARAM_SZ; - *updt_params++ = - reverb->gain_adjust; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + REVERB_GAIN_ADJUST_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "REVERB_GAIN_ADJUST", rc); + if (rc != 0) + break; + param_hdr.param_id = + AUDPROC_PARAM_ID_REVERB_GAIN_ADJUST; + param_hdr.param_size = REVERB_GAIN_ADJUST_PARAM_SZ; + param_data = (u8 *) &reverb->gain_adjust; break; case REVERB_ROOM_LEVEL: if (length != 1 || index_offset != 0) { @@ -451,23 +429,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: REVERB_ROOM_LEVEL val:%d\n", __func__, reverb->room_level); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - REVERB_ROOM_LEVEL_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "REVERB_ROOM_LEVEL", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_REVERB; - *updt_params++ = - AUDPROC_PARAM_ID_REVERB_ROOM_LEVEL; - *updt_params++ = - REVERB_ROOM_LEVEL_PARAM_SZ; - *updt_params++ = - reverb->room_level; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + REVERB_ROOM_LEVEL_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "REVERB_ROOM_LEVEL", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_ROOM_LEVEL; + param_hdr.param_size = REVERB_ROOM_LEVEL_PARAM_SZ; + param_data = (u8 *) &reverb->room_level; break; case REVERB_ROOM_HF_LEVEL: if (length != 1 || index_offset != 0) { @@ -479,23 +452,19 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: REVERB_ROOM_HF_LEVEL val%d\n", __func__, reverb->room_hf_level); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - REVERB_ROOM_HF_LEVEL_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "REVERB_ROOM_HF_LEVEL", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_REVERB; - *updt_params++ = - AUDPROC_PARAM_ID_REVERB_ROOM_HF_LEVEL; - *updt_params++ = - REVERB_ROOM_HF_LEVEL_PARAM_SZ; - *updt_params++ = - reverb->room_hf_level; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + REVERB_ROOM_HF_LEVEL_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "REVERB_ROOM_HF_LEVEL", rc); + if (rc != 0) + break; + param_hdr.param_id = + AUDPROC_PARAM_ID_REVERB_ROOM_HF_LEVEL; + param_hdr.param_size = REVERB_ROOM_HF_LEVEL_PARAM_SZ; + param_data = (u8 *) &reverb->room_hf_level; break; case REVERB_DECAY_TIME: if (length != 1 || index_offset != 0) { @@ -507,23 +476,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: REVERB_DECAY_TIME val:%d\n", __func__, reverb->decay_time); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - REVERB_DECAY_TIME_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "REVERB_DECAY_TIME", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_REVERB; - *updt_params++ = - AUDPROC_PARAM_ID_REVERB_DECAY_TIME; - *updt_params++ = - REVERB_DECAY_TIME_PARAM_SZ; - *updt_params++ = - reverb->decay_time; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + REVERB_DECAY_TIME_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "REVERB_DECAY_TIME", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_DECAY_TIME; + param_hdr.param_size = REVERB_DECAY_TIME_PARAM_SZ; + param_data = (u8 *) &reverb->decay_time; break; case REVERB_DECAY_HF_RATIO: if (length != 1 || index_offset != 0) { @@ -535,23 +499,19 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: REVERB_DECAY_HF_RATIO val%d\n", __func__, reverb->decay_hf_ratio); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - REVERB_DECAY_HF_RATIO_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "REVERB_DECAY_HF_RATIO", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_REVERB; - *updt_params++ = - AUDPROC_PARAM_ID_REVERB_DECAY_HF_RATIO; - *updt_params++ = - REVERB_DECAY_HF_RATIO_PARAM_SZ; - *updt_params++ = - reverb->decay_hf_ratio; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + REVERB_DECAY_HF_RATIO_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "REVERB_DECAY_HF_RATIO", rc); + if (rc != 0) + break; + param_hdr.param_id = + AUDPROC_PARAM_ID_REVERB_DECAY_HF_RATIO; + param_hdr.param_size = REVERB_DECAY_HF_RATIO_PARAM_SZ; + param_data = (u8 *) &reverb->decay_hf_ratio; break; case REVERB_REFLECTIONS_LEVEL: if (length != 1 || index_offset != 0) { @@ -563,23 +523,20 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: REVERB_REFLECTIONS_LEVEL val:%d\n", __func__, reverb->reflections_level); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - REVERB_REFLECTIONS_LEVEL_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "REVERB_REFLECTIONS_LEVEL", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_REVERB; - *updt_params++ = + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + REVERB_REFLECTIONS_LEVEL_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "REVERB_REFLECTIONS_LEVEL", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_REFLECTIONS_LEVEL; - *updt_params++ = + param_hdr.param_size = REVERB_REFLECTIONS_LEVEL_PARAM_SZ; - *updt_params++ = - reverb->reflections_level; - } + param_data = (u8 *) &reverb->reflections_level; break; case REVERB_REFLECTIONS_DELAY: if (length != 1 || index_offset != 0) { @@ -591,23 +548,20 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: REVERB_REFLECTIONS_DELAY val:%d\n", __func__, reverb->reflections_delay); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - REVERB_REFLECTIONS_DELAY_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "REVERB_REFLECTIONS_DELAY", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_REVERB; - *updt_params++ = + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + REVERB_REFLECTIONS_DELAY_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "REVERB_REFLECTIONS_DELAY", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_REFLECTIONS_DELAY; - *updt_params++ = + param_hdr.param_size = REVERB_REFLECTIONS_DELAY_PARAM_SZ; - *updt_params++ = - reverb->reflections_delay; - } + param_data = (u8 *) &reverb->reflections_delay; break; case REVERB_LEVEL: if (length != 1 || index_offset != 0) { @@ -619,23 +573,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: REVERB_LEVEL val:%d\n", __func__, reverb->level); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - REVERB_LEVEL_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "REVERB_LEVEL", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_REVERB; - *updt_params++ = - AUDPROC_PARAM_ID_REVERB_LEVEL; - *updt_params++ = - REVERB_LEVEL_PARAM_SZ; - *updt_params++ = - reverb->level; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + REVERB_LEVEL_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "REVERB_LEVEL", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_LEVEL; + param_hdr.param_size = REVERB_LEVEL_PARAM_SZ; + param_data = (u8 *) &reverb->level; break; case REVERB_DELAY: if (length != 1 || index_offset != 0) { @@ -647,23 +596,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s:REVERB_DELAY val:%d\n", __func__, reverb->delay); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - REVERB_DELAY_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "REVERB_DELAY", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_REVERB; - *updt_params++ = - AUDPROC_PARAM_ID_REVERB_DELAY; - *updt_params++ = - REVERB_DELAY_PARAM_SZ; - *updt_params++ = - reverb->delay; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + REVERB_DELAY_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "REVERB_DELAY", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_DELAY; + param_hdr.param_size = REVERB_DELAY_PARAM_SZ; + param_data = (u8 *) &reverb->delay; break; case REVERB_DIFFUSION: if (length != 1 || index_offset != 0) { @@ -675,23 +619,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: REVERB_DIFFUSION val:%d\n", __func__, reverb->diffusion); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - REVERB_DIFFUSION_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "REVERB_DIFFUSION", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_REVERB; - *updt_params++ = - AUDPROC_PARAM_ID_REVERB_DIFFUSION; - *updt_params++ = - REVERB_DIFFUSION_PARAM_SZ; - *updt_params++ = - reverb->diffusion; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + REVERB_DIFFUSION_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "REVERB_DIFFUSION", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_DIFFUSION; + param_hdr.param_size = REVERB_DIFFUSION_PARAM_SZ; + param_data = (u8 *) &reverb->diffusion; break; case REVERB_DENSITY: if (length != 1 || index_offset != 0) { @@ -703,32 +642,39 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: REVERB_DENSITY val:%d\n", __func__, reverb->density); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - REVERB_DENSITY_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "REVERB_DENSITY", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_REVERB; - *updt_params++ = - AUDPROC_PARAM_ID_REVERB_DENSITY; - *updt_params++ = - REVERB_DENSITY_PARAM_SZ; - *updt_params++ = - reverb->density; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + REVERB_DENSITY_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "REVERB_DENSITY", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_DENSITY; + param_hdr.param_size = REVERB_DENSITY_PARAM_SZ; + param_data = (u8 *) &reverb->density; break; default: pr_err("%s: Invalid command to set config\n", __func__); - break; + continue; } + if (rc) + goto invalid_config; + + rc = q6common_pack_pp_params(updt_params, ¶m_hdr, + param_data, &packed_data_size); + if (rc) { + pr_err("%s: Failed to pack params, error %d\n", + __func__, rc); + goto invalid_config; + } + + updt_params += packed_data_size; + params_length += packed_data_size; } if (params_length && (rc == 0)) - q6asm_send_audio_effects_params(ac, params, - params_length); + q6asm_set_pp_params(ac, NULL, params, params_length); else pr_debug("%s: did not send pp params\n", __func__); invalid_config: @@ -742,25 +688,32 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac, { long *param_max_offset = values + MAX_PP_PARAMS_SZ - 1; char *params = NULL; + u8 *updt_params; int rc = 0; int devices = GET_NEXT(values, param_max_offset, rc); int num_commands = GET_NEXT(values, param_max_offset, rc); - int *updt_params, i, prev_enable_flag; - uint32_t params_length = (MAX_INBAND_PARAM_SZ); + int i, prev_enable_flag; + uint32_t max_params_length = 0; + uint32_t params_length = 0; + struct param_hdr_v3 param_hdr = {0}; + u8 *param_data = NULL; + u32 packed_data_size = 0; pr_debug("%s\n", __func__); if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) { pr_err("%s: cannot set audio effects\n", __func__); return -EINVAL; } - params = kzalloc(params_length, GFP_KERNEL); + params = kzalloc(MAX_INBAND_PARAM_SZ, GFP_KERNEL); if (!params) { pr_err("%s, params memory alloc failed\n", __func__); return -ENOMEM; } pr_debug("%s: device: %d\n", __func__, devices); - updt_params = (int *)params; - params_length = 0; + updt_params = (u8 *) params; + /* Set MID and IID once at top and only update param specific fields*/ + param_hdr.module_id = AUDPROC_MODULE_ID_BASS_BOOST; + param_hdr.instance_id = INSTANCE_ID_0; for (i = 0; i < num_commands; i++) { uint32_t command_id = GET_NEXT(values, param_max_offset, rc); @@ -783,23 +736,18 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac, pr_debug("%s: BASS_BOOST_ENABLE prev:%d new:%d\n", __func__, prev_enable_flag, bass_boost->enable_flag); - if (prev_enable_flag != bass_boost->enable_flag) { - params_length += COMMAND_PAYLOAD_SZ + - BASS_BOOST_ENABLE_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "BASS_BOOST_ENABLE", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_BASS_BOOST; - *updt_params++ = - AUDPROC_PARAM_ID_BASS_BOOST_ENABLE; - *updt_params++ = - BASS_BOOST_ENABLE_PARAM_SZ; - *updt_params++ = - bass_boost->enable_flag; - } + if (prev_enable_flag == bass_boost->enable_flag) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + BASS_BOOST_ENABLE_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "BASS_BOOST_ENABLE", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_BASS_BOOST_ENABLE; + param_hdr.param_size = BASS_BOOST_ENABLE_PARAM_SZ; + param_data = (u8 *) &bass_boost->enable_flag; break; case BASS_BOOST_MODE: if (length != 1 || index_offset != 0) { @@ -811,23 +759,18 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: BASS_BOOST_MODE val:%d\n", __func__, bass_boost->mode); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - BASS_BOOST_MODE_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "BASS_BOOST_MODE", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_BASS_BOOST; - *updt_params++ = - AUDPROC_PARAM_ID_BASS_BOOST_MODE; - *updt_params++ = - BASS_BOOST_MODE_PARAM_SZ; - *updt_params++ = - bass_boost->mode; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + BASS_BOOST_MODE_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "BASS_BOOST_MODE", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_BASS_BOOST_MODE; + param_hdr.param_size = BASS_BOOST_MODE_PARAM_SZ; + param_data = (u8 *) &bass_boost->mode; break; case BASS_BOOST_STRENGTH: if (length != 1 || index_offset != 0) { @@ -839,32 +782,40 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: BASS_BOOST_STRENGTH val:%d\n", __func__, bass_boost->strength); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - BASS_BOOST_STRENGTH_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "BASS_BOOST_STRENGTH", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_BASS_BOOST; - *updt_params++ = - AUDPROC_PARAM_ID_BASS_BOOST_STRENGTH; - *updt_params++ = - BASS_BOOST_STRENGTH_PARAM_SZ; - *updt_params++ = - bass_boost->strength; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + BASS_BOOST_STRENGTH_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "BASS_BOOST_STRENGTH", rc); + if (rc != 0) + break; + param_hdr.param_id = + AUDPROC_PARAM_ID_BASS_BOOST_STRENGTH; + param_hdr.param_size = BASS_BOOST_STRENGTH_PARAM_SZ; + param_data = (u8 *) &bass_boost->strength; break; default: pr_err("%s: Invalid command to set config\n", __func__); - break; + continue; } + if (rc) + goto invalid_config; + + rc = q6common_pack_pp_params(updt_params, ¶m_hdr, + param_data, &packed_data_size); + if (rc) { + pr_err("%s: Failed to pack params, error %d\n", + __func__, rc); + goto invalid_config; + } + + updt_params += packed_data_size; + params_length += packed_data_size; } if (params_length && (rc == 0)) - q6asm_send_audio_effects_params(ac, params, - params_length); + q6asm_set_pp_params(ac, NULL, params, params_length); else pr_debug("%s: did not send pp params\n", __func__); invalid_config: @@ -878,25 +829,32 @@ int msm_audio_effects_pbe_handler(struct audio_client *ac, { long *param_max_offset = values + MAX_PP_PARAMS_SZ - 1; char *params = NULL; + u8 *updt_params; int rc = 0; int devices = GET_NEXT(values, param_max_offset, rc); int num_commands = GET_NEXT(values, param_max_offset, rc); - int *updt_params, i, j, prev_enable_flag; - uint32_t params_length = (MAX_INBAND_PARAM_SZ); + int i, prev_enable_flag; + uint32_t max_params_length = 0; + uint32_t params_length = 0; + struct param_hdr_v3 param_hdr = {0}; + u8 *param_data = NULL; + u32 packed_data_size = 0; pr_debug("%s\n", __func__); if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) { pr_err("%s: cannot set audio effects\n", __func__); return -EINVAL; } - params = kzalloc(params_length, GFP_KERNEL); + params = kzalloc(MAX_INBAND_PARAM_SZ, GFP_KERNEL); if (!params) { pr_err("%s, params memory alloc failed\n", __func__); return -ENOMEM; } pr_debug("%s: device: %d\n", __func__, devices); - updt_params = (int *)params; - params_length = 0; + updt_params = (u8 *) params; + /* Set MID and IID once at top and only update param specific fields*/ + param_hdr.module_id = AUDPROC_MODULE_ID_PBE; + param_hdr.instance_id = INSTANCE_ID_0; for (i = 0; i < num_commands; i++) { uint32_t command_id = GET_NEXT(values, param_max_offset, rc); @@ -917,23 +875,18 @@ int msm_audio_effects_pbe_handler(struct audio_client *ac, prev_enable_flag = pbe->enable_flag; pbe->enable_flag = GET_NEXT(values, param_max_offset, rc); - if (prev_enable_flag != pbe->enable_flag) { - params_length += COMMAND_PAYLOAD_SZ + - PBE_ENABLE_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "PBE_ENABLE", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_PBE; - *updt_params++ = - AUDPROC_PARAM_ID_PBE_ENABLE; - *updt_params++ = - PBE_ENABLE_PARAM_SZ; - *updt_params++ = - pbe->enable_flag; - } + if (prev_enable_flag == pbe->enable_flag) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + PBE_ENABLE_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "PBE_ENABLE", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_PBE_ENABLE; + param_hdr.param_size = PBE_ENABLE_PARAM_SZ; + param_data = (u8 *) &pbe->enable_flag; break; case PBE_CONFIG: pr_debug("%s: PBE_PARAM length %u\n", __func__, length); @@ -944,37 +897,38 @@ int msm_audio_effects_pbe_handler(struct audio_client *ac, rc = -EINVAL; goto invalid_config; } - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + length; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "PBE_PARAM", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_PBE; - *updt_params++ = - AUDPROC_PARAM_ID_PBE_PARAM_CONFIG; - *updt_params++ = - length; - for (j = 0; j < length; ) { - j += sizeof(*updt_params); - *updt_params++ = - GET_NEXT( - values, - param_max_offset, - rc); - } - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = + params_length + COMMAND_IID_PAYLOAD_SZ + length; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "PBE_PARAM", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_PBE_PARAM_CONFIG; + param_hdr.param_size = length; + param_data = (u8 *) values; break; default: pr_err("%s: Invalid command to set config\n", __func__); - break; + continue; + } + if (rc) + goto invalid_config; + + rc = q6common_pack_pp_params(updt_params, ¶m_hdr, + param_data, &packed_data_size); + if (rc) { + pr_err("%s: Failed to pack params, error %d\n", + __func__, rc); + goto invalid_config; } + + updt_params += packed_data_size; + params_length += packed_data_size; } if (params_length && (rc == 0)) - q6asm_send_audio_effects_params(ac, params, - params_length); + q6asm_set_pp_params(ac, NULL, params, params_length); invalid_config: kfree(params); return rc; @@ -986,25 +940,35 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac, { long *param_max_offset = values + MAX_PP_PARAMS_SZ - 1; char *params = NULL; + u8 *updt_params = NULL; int rc = 0; int devices = GET_NEXT(values, param_max_offset, rc); int num_commands = GET_NEXT(values, param_max_offset, rc); - int *updt_params, i, prev_enable_flag; - uint32_t params_length = (MAX_INBAND_PARAM_SZ); + int i, prev_enable_flag; + uint32_t max_params_length = 0; + uint32_t params_length = 0; + struct param_hdr_v3 param_hdr = {0}; + u8 *param_data = NULL; + u32 packed_data_size = 0; + u8 *eq_config_data = NULL; + u32 *updt_config_data = NULL; + int config_param_length; pr_debug("%s\n", __func__); if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) { pr_err("%s: cannot set audio effects\n", __func__); return -EINVAL; } - params = kzalloc(params_length, GFP_KERNEL); + params = kzalloc(MAX_INBAND_PARAM_SZ, GFP_KERNEL); if (!params) { pr_err("%s, params memory alloc failed\n", __func__); return -ENOMEM; } pr_debug("%s: device: %d\n", __func__, devices); - updt_params = (int *)params; - params_length = 0; + updt_params = (u8 *) params; + /* Set MID and IID once at top and only update param specific fields*/ + param_hdr.module_id = AUDPROC_MODULE_ID_POPLESS_EQUALIZER; + param_hdr.instance_id = INSTANCE_ID_0; for (i = 0; i < num_commands; i++) { uint32_t command_id = GET_NEXT(values, param_max_offset, rc); @@ -1028,23 +992,18 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: EQ_ENABLE prev:%d new:%d\n", __func__, prev_enable_flag, eq->enable_flag); - if (prev_enable_flag != eq->enable_flag) { - params_length += COMMAND_PAYLOAD_SZ + - EQ_ENABLE_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "EQ_ENABLE", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_POPLESS_EQUALIZER; - *updt_params++ = - AUDPROC_PARAM_ID_EQ_ENABLE; - *updt_params++ = - EQ_ENABLE_PARAM_SZ; - *updt_params++ = - eq->enable_flag; - } + if (prev_enable_flag == eq->enable_flag) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + EQ_ENABLE_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "EQ_ENABLE", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_EQ_ENABLE; + param_hdr.param_size = EQ_ENABLE_PARAM_SZ; + param_data = (u8 *) &eq->enable_flag; break; case EQ_CONFIG: if (length < EQ_CONFIG_PARAM_LEN || index_offset != 0) { @@ -1093,43 +1052,46 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac, eq->per_band_cfg[idx].quality_factor = GET_NEXT(values, param_max_offset, rc); } - if (command_config_state == CONFIG_SET) { - int config_param_length = EQ_CONFIG_PARAM_SZ + - (EQ_CONFIG_PER_BAND_PARAM_SZ* - eq->config.num_bands); - params_length += COMMAND_PAYLOAD_SZ + - config_param_length; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "EQ_CONFIG", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_POPLESS_EQUALIZER; - *updt_params++ = - AUDPROC_PARAM_ID_EQ_CONFIG; - *updt_params++ = - config_param_length; - *updt_params++ = - eq->config.eq_pregain; - *updt_params++ = - eq->config.preset_id; - *updt_params++ = - eq->config.num_bands; - for (idx = 0; idx < MAX_EQ_BANDS; idx++) { - if (eq->per_band_cfg[idx].band_idx < 0) - continue; - *updt_params++ = + if (command_config_state != CONFIG_SET) + break; + config_param_length = EQ_CONFIG_PARAM_SZ + + (EQ_CONFIG_PER_BAND_PARAM_SZ * + eq->config.num_bands); + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + config_param_length; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "EQ_CONFIG", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_EQ_CONFIG; + param_hdr.param_size = config_param_length; + + if (!eq_config_data) + eq_config_data = kzalloc(config_param_length, + GFP_KERNEL); + else + memset(eq_config_data, 0, config_param_length); + if (!eq_config_data) + return -ENOMEM; + param_data = eq_config_data; + updt_config_data = (u32 *) eq_config_data; + *updt_config_data++ = eq->config.eq_pregain; + *updt_config_data++ = eq->config.preset_id; + *updt_config_data++ = eq->config.num_bands; + for (idx = 0; idx < MAX_EQ_BANDS; idx++) { + if (eq->per_band_cfg[idx].band_idx < 0) + continue; + *updt_config_data++ = eq->per_band_cfg[idx].filter_type; - *updt_params++ = + *updt_config_data++ = eq->per_band_cfg[idx].freq_millihertz; - *updt_params++ = + *updt_config_data++ = eq->per_band_cfg[idx].gain_millibels; - *updt_params++ = + *updt_config_data++ = eq->per_band_cfg[idx].quality_factor; - *updt_params++ = + *updt_config_data++ = eq->per_band_cfg[idx].band_idx; - } } break; case EQ_BAND_INDEX: @@ -1147,23 +1109,18 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac, eq->band_index = idx; pr_debug("%s: EQ_BAND_INDEX val:%d\n", __func__, eq->band_index); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - EQ_BAND_INDEX_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "EQ_BAND_INDEX", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_POPLESS_EQUALIZER; - *updt_params++ = - AUDPROC_PARAM_ID_EQ_BAND_INDEX; - *updt_params++ = - EQ_BAND_INDEX_PARAM_SZ; - *updt_params++ = - eq->band_index; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + EQ_BAND_INDEX_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "EQ_BAND_INDEX", rc); + if (rc != 0) + break; + param_hdr.param_id = AUDPROC_PARAM_ID_EQ_BAND_INDEX; + param_hdr.param_size = EQ_BAND_INDEX_PARAM_SZ; + param_data = (u8 *) &eq->band_index; break; case EQ_SINGLE_BAND_FREQ: if (length != 1 || index_offset != 0) { @@ -1179,36 +1136,45 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac, GET_NEXT(values, param_max_offset, rc); pr_debug("%s: EQ_SINGLE_BAND_FREQ idx:%d, val:%d\n", __func__, eq->band_index, eq->freq_millihertz); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - EQ_SINGLE_BAND_FREQ_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "EQ_SINGLE_BAND_FREQ", rc); - if (rc != 0) - goto invalid_config; - *updt_params++ = - AUDPROC_MODULE_ID_POPLESS_EQUALIZER; - *updt_params++ = - AUDPROC_PARAM_ID_EQ_SINGLE_BAND_FREQ; - *updt_params++ = - EQ_SINGLE_BAND_FREQ_PARAM_SZ; - *updt_params++ = - eq->freq_millihertz; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + EQ_SINGLE_BAND_FREQ_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "EQ_SINGLE_BAND_FREQ", rc); + if (rc != 0) + break; + param_hdr.param_id = + AUDPROC_PARAM_ID_EQ_SINGLE_BAND_FREQ; + param_hdr.param_size = EQ_SINGLE_BAND_FREQ_PARAM_SZ; + param_data = (u8 *) &eq->freq_millihertz; break; default: pr_err("%s: Invalid command to set config\n", __func__); - break; + continue; + } + if (rc) + goto invalid_config; + + rc = q6common_pack_pp_params(updt_params, ¶m_hdr, + param_data, &packed_data_size); + if (rc) { + pr_err("%s: Failed to pack params, error %d\n", + __func__, rc); + goto invalid_config; } + + updt_params += packed_data_size; + params_length += packed_data_size; } if (params_length && (rc == 0)) - q6asm_send_audio_effects_params(ac, params, - params_length); + q6asm_set_pp_params(ac, NULL, params, params_length); else pr_debug("%s: did not send pp params\n", __func__); invalid_config: kfree(params); + kfree(eq_config_data); return rc; } @@ -1220,8 +1186,13 @@ static int __msm_audio_effects_volume_handler(struct audio_client *ac, int devices; int num_commands; char *params = NULL; - int *updt_params, i; - uint32_t params_length = (MAX_INBAND_PARAM_SZ); + u8 *updt_params; + int i; + uint32_t vol_gain_2ch = 0; + uint32_t max_params_length = 0; + uint32_t params_length = 0; + struct param_hdr_v3 param_hdr = {0}; + u32 packed_data_size = 0; long *param_max_offset; int rc = 0; @@ -1238,13 +1209,14 @@ static int __msm_audio_effects_volume_handler(struct audio_client *ac, pr_err("%s: cannot set audio effects\n", __func__); return -EINVAL; } - params = kzalloc(params_length, GFP_KERNEL); + params = kzalloc(MAX_INBAND_PARAM_SZ, GFP_KERNEL); if (!params) { pr_err("%s, params memory alloc failed\n", __func__); return -ENOMEM; } - updt_params = (int *)params; - params_length = 0; + updt_params = (u8 *) params; + /* Set MID and IID once at top and only update param specific fields*/ + q6asm_set_soft_volume_module_instance_ids(instance, ¶m_hdr); for (i = 0; i < num_commands; i++) { uint32_t command_id = GET_NEXT(values, param_max_offset, rc); @@ -1266,43 +1238,15 @@ static int __msm_audio_effects_volume_handler(struct audio_client *ac, vol->right_gain = GET_NEXT(values, param_max_offset, rc); vol->master_gain = 0x2000; - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - SOFT_VOLUME_GAIN_2CH_PARAM_SZ; - params_length += COMMAND_PAYLOAD_SZ + - SOFT_VOLUME_GAIN_MASTER_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "VOLUME/VOLUME2_GAIN_2CH", - rc); - if (rc != 0) - goto invalid_config; - if (instance == SOFT_VOLUME_INSTANCE_2) - *updt_params++ = - ASM_MODULE_ID_VOL_CTRL2; - else - *updt_params++ = - ASM_MODULE_ID_VOL_CTRL; - *updt_params++ = - ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN; - *updt_params++ = - SOFT_VOLUME_GAIN_2CH_PARAM_SZ; - *updt_params++ = - (vol->left_gain << 16) | - vol->right_gain; - if (instance == SOFT_VOLUME_INSTANCE_2) - *updt_params++ = - ASM_MODULE_ID_VOL_CTRL2; - else - *updt_params++ = - ASM_MODULE_ID_VOL_CTRL; - *updt_params++ = - ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN; - *updt_params++ = - SOFT_VOLUME_GAIN_MASTER_PARAM_SZ; - *updt_params++ = - vol->master_gain; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + SOFT_VOLUME_GAIN_2CH_PARAM_SZ + + COMMAND_IID_PAYLOAD_SZ + + SOFT_VOLUME_GAIN_MASTER_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "VOLUME/VOLUME2_GAIN_2CH", rc); break; case SOFT_VOLUME_GAIN_MASTER: case SOFT_VOLUME2_GAIN_MASTER: @@ -1315,53 +1259,57 @@ static int __msm_audio_effects_volume_handler(struct audio_client *ac, vol->right_gain = 0x2000; vol->master_gain = GET_NEXT(values, param_max_offset, rc); - if (command_config_state == CONFIG_SET) { - params_length += COMMAND_PAYLOAD_SZ + - SOFT_VOLUME_GAIN_2CH_PARAM_SZ; - params_length += COMMAND_PAYLOAD_SZ + - SOFT_VOLUME_GAIN_MASTER_PARAM_SZ; - CHECK_PARAM_LEN(params_length, - MAX_INBAND_PARAM_SZ, - "VOLUME/VOLUME2_GAIN_MASTER", - rc); - if (rc != 0) - goto invalid_config; - if (instance == SOFT_VOLUME_INSTANCE_2) - *updt_params++ = - ASM_MODULE_ID_VOL_CTRL2; - else - *updt_params++ = - ASM_MODULE_ID_VOL_CTRL; - *updt_params++ = - ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN; - *updt_params++ = - SOFT_VOLUME_GAIN_2CH_PARAM_SZ; - *updt_params++ = - (vol->left_gain << 16) | - vol->right_gain; - if (instance == SOFT_VOLUME_INSTANCE_2) - *updt_params++ = - ASM_MODULE_ID_VOL_CTRL2; - else - *updt_params++ = - ASM_MODULE_ID_VOL_CTRL; - *updt_params++ = - ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN; - *updt_params++ = - SOFT_VOLUME_GAIN_MASTER_PARAM_SZ; - *updt_params++ = - vol->master_gain; - } + if (command_config_state != CONFIG_SET) + break; + max_params_length = params_length + + COMMAND_IID_PAYLOAD_SZ + + SOFT_VOLUME_GAIN_2CH_PARAM_SZ + + COMMAND_IID_PAYLOAD_SZ + + SOFT_VOLUME_GAIN_MASTER_PARAM_SZ; + CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ, + "VOLUME/VOLUME2_GAIN_MASTER", rc); break; default: pr_err("%s: Invalid command id: %d to set config\n", __func__, command_id); - break; + continue; + } + if (rc) + continue; + + /* Set Volume Control for Left/Right */ + param_hdr.param_id = ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN; + param_hdr.param_size = SOFT_VOLUME_GAIN_2CH_PARAM_SZ; + vol_gain_2ch = (vol->left_gain << 16) | vol->right_gain; + rc = q6common_pack_pp_params(updt_params, ¶m_hdr, + (u8 *) &vol_gain_2ch, + &packed_data_size); + if (rc) { + pr_err("%s: Failed to pack params, error %d\n", + __func__, rc); + goto invalid_config; } + + updt_params += packed_data_size; + params_length += packed_data_size; + + /* Set Master Volume Control */ + param_hdr.param_id = ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN; + param_hdr.param_size = SOFT_VOLUME_GAIN_MASTER_PARAM_SZ; + rc = q6common_pack_pp_params(updt_params, ¶m_hdr, + (u8 *) &vol->master_gain, + &packed_data_size); + if (rc) { + pr_err("%s: Failed to pack params, error %d\n", + __func__, rc); + goto invalid_config; + } + + updt_params += packed_data_size; + params_length += packed_data_size; } if (params_length && (rc == 0)) - q6asm_send_audio_effects_params(ac, params, - params_length); + q6asm_set_pp_params(ac, NULL, params, params_length); invalid_config: kfree(params); return rc; diff --git a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c index 4de712a10f96..6dda41cc85bb 100644 --- a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c +++ b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c @@ -14,6 +14,7 @@ #include <linux/bitops.h> #include <sound/control.h> #include <sound/q6adm-v2.h> +#include <sound/q6common.h> #include "msm-ds2-dap-config.h" #include "msm-pcm-routing-v2.h" @@ -196,6 +197,7 @@ static void msm_ds2_dap_check_and_update_ramp_wait(int port_id, int copp_idx, int32_t *update_params_value = NULL; uint32_t params_length = SOFT_VOLUME_PARAM_SIZE * sizeof(uint32_t); uint32_t param_payload_len = PARAM_PAYLOAD_SIZE * sizeof(uint32_t); + struct param_hdr_v3 param_hdr = {0}; int rc = 0; update_params_value = kzalloc(params_length + param_payload_len, @@ -204,11 +206,13 @@ static void msm_ds2_dap_check_and_update_ramp_wait(int port_id, int copp_idx, pr_err("%s: params memory alloc failed\n", __func__); goto end; } - rc = adm_get_params(port_id, copp_idx, - AUDPROC_MODULE_ID_VOL_CTRL, - AUDPROC_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS, - params_length + param_payload_len, - (char *) update_params_value); + + param_hdr.module_id = AUDPROC_MODULE_ID_VOL_CTRL; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AUDPROC_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS; + param_hdr.param_size = params_length + param_payload_len; + rc = adm_get_pp_params(port_id, copp_idx, ADM_CLIENT_ID_DEFAULT, NULL, + ¶m_hdr, (char *) update_params_value); if (rc == 0) { pr_debug("%s: params_value [0x%x, 0x%x, 0x%x]\n", __func__, update_params_value[0], @@ -229,12 +233,13 @@ end: static int msm_ds2_dap_set_vspe_vdhe(int dev_map_idx, bool is_custom_stereo_enabled) { - int32_t *update_params_value = NULL; - int32_t *param_val = NULL; - int idx, i, j, rc = 0, cdev; - uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM + - 2 * DOLBY_PARAM_PAYLOAD_SIZE) * - sizeof(uint32_t); + u8 *packed_param_data = NULL; + u8 *param_data = NULL; + struct param_hdr_v3 param_hdr = {0}; + u32 packed_param_size = 0; + u32 param_size = 0; + int cdev; + int rc = 0; if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) { pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx); @@ -262,73 +267,88 @@ static int msm_ds2_dap_set_vspe_vdhe(int dev_map_idx, goto end; } - update_params_value = kzalloc(params_length, GFP_KERNEL); - if (!update_params_value) { - pr_err("%s: params memory alloc failed\n", __func__); - rc = -ENOMEM; + /* Allocate the max space needed */ + packed_param_size = (TOTAL_LENGTH_DOLBY_PARAM * sizeof(uint32_t)) + + (2 * sizeof(union param_hdrs)); + packed_param_data = kzalloc(packed_param_size, GFP_KERNEL); + if (!packed_param_data) + return -ENOMEM; + + packed_param_size = 0; + + /* Set common values */ + cdev = dev_map[dev_map_idx].cache_dev; + param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID; + param_hdr.instance_id = INSTANCE_ID_0; + + /* Pack VDHE header + data */ + param_hdr.param_id = DOLBY_PARAM_ID_VDHE; + param_size = DOLBY_PARAM_VDHE_LENGTH * sizeof(uint32_t); + param_hdr.param_size = param_size; + + if (is_custom_stereo_enabled) + param_data = NULL; + else + param_data = (u8 *) &ds2_dap_params[cdev] + .params_val[DOLBY_PARAM_VDHE_OFFSET]; + + rc = q6common_pack_pp_params(packed_param_data, ¶m_hdr, param_data, + ¶m_size); + if (rc) { + pr_err("%s: Failed to pack params, error %d\n", __func__, rc); goto end; } - params_length = 0; - param_val = update_params_value; - cdev = dev_map[dev_map_idx].cache_dev; - /* for VDHE and VSPE DAP params at index 0 and 1 in table */ - for (i = 0; i < 2; i++) { - *update_params_value++ = DOLBY_BUNDLE_MODULE_ID; - *update_params_value++ = ds2_dap_params_id[i]; - *update_params_value++ = ds2_dap_params_length[i] * - sizeof(uint32_t); - idx = ds2_dap_params_offset[i]; - for (j = 0; j < ds2_dap_params_length[i]; j++) { - if (is_custom_stereo_enabled) - *update_params_value++ = 0; - else - *update_params_value++ = - ds2_dap_params[cdev].params_val[idx+j]; - } - params_length += (DOLBY_PARAM_PAYLOAD_SIZE + - ds2_dap_params_length[i]) * - sizeof(uint32_t); - } - - pr_debug("%s: valid param length: %d\n", __func__, params_length); - if (params_length) { - rc = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id, - dev_map[dev_map_idx].copp_idx, - (char *)param_val, - params_length); - if (rc) { - pr_err("%s: send vdhe/vspe params failed with rc=%d\n", - __func__, rc); - rc = -EINVAL; - goto end; - } + packed_param_size += param_size; + + /* Pack VSPE header + data */ + param_hdr.param_id = DOLBY_PARAM_ID_VSPE; + param_size = DOLBY_PARAM_VSPE_LENGTH * sizeof(uint32_t); + param_hdr.param_size = param_size; + + if (is_custom_stereo_enabled) + param_data = NULL; + else + param_data = (u8 *) &ds2_dap_params[cdev] + .params_val[DOLBY_PARAM_VSPE_OFFSET]; + + rc = q6common_pack_pp_params(packed_param_data + packed_param_size, + ¶m_hdr, param_data, ¶m_size); + if (rc) { + pr_err("%s: Failed to pack params, error %d\n", __func__, rc); + goto end; + } + packed_param_size += param_size; + + rc = adm_set_pp_params(dev_map[dev_map_idx].port_id, + dev_map[dev_map_idx].copp_idx, NULL, + packed_param_data, packed_param_size); + if (rc) { + pr_err("%s: send vdhe/vspe params failed with rc=%d\n", + __func__, rc); + rc = -EINVAL; + goto end; } end: - kfree(param_val); + kfree(packed_param_data); return rc; } int qti_set_custom_stereo_on(int port_id, int copp_idx, bool is_custom_stereo_on) { - + struct custom_stereo_param custom_stereo = {0}; + struct param_hdr_v3 param_hdr = {0}; uint16_t op_FL_ip_FL_weight; uint16_t op_FL_ip_FR_weight; uint16_t op_FR_ip_FL_weight; uint16_t op_FR_ip_FR_weight; - - int32_t *update_params_value32 = NULL, rc = 0; - int32_t *param_val = NULL; - int16_t *update_params_value16 = 0; - uint32_t params_length_bytes = CUSTOM_STEREO_PAYLOAD_SIZE * - sizeof(uint32_t); - uint32_t avail_length = params_length_bytes; + int rc = 0; if ((port_id != SLIMBUS_0_RX) && (port_id != RT_PROXY_PORT_001_RX)) { pr_debug("%s:No Custom stereo for port:0x%x\n", __func__, port_id); - goto skip_send_cmd; + return 0; } pr_debug("%s: port 0x%x, copp_idx %d, is_custom_stereo_on %d\n", @@ -349,76 +369,49 @@ int qti_set_custom_stereo_on(int port_id, int copp_idx, op_FR_ip_FR_weight = Q14_GAIN_UNITY; } - update_params_value32 = kzalloc(params_length_bytes, GFP_KERNEL); - if (!update_params_value32) { - pr_err("%s, params memory alloc failed\n", __func__); - rc = -ENOMEM; - goto skip_send_cmd; - } - param_val = update_params_value32; - if (avail_length < 2 * sizeof(uint32_t)) - goto skip_send_cmd; - *update_params_value32++ = MTMX_MODULE_ID_DEFAULT_CHMIXER; - *update_params_value32++ = DEFAULT_CHMIXER_PARAM_ID_COEFF; - avail_length = avail_length - (2 * sizeof(uint32_t)); - - update_params_value16 = (int16_t *)update_params_value32; - if (avail_length < 10 * sizeof(uint16_t)) - goto skip_send_cmd; - *update_params_value16++ = CUSTOM_STEREO_CMD_PARAM_SIZE; - /* for alignment only*/ - *update_params_value16++ = 0; + param_hdr.module_id = MTMX_MODULE_ID_DEFAULT_CHMIXER; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = DEFAULT_CHMIXER_PARAM_ID_COEFF; + param_hdr.param_size = sizeof(struct custom_stereo_param); + /* index is 32-bit param in little endian*/ - *update_params_value16++ = CUSTOM_STEREO_INDEX_PARAM; - *update_params_value16++ = 0; + custom_stereo.index = CUSTOM_STEREO_INDEX_PARAM; + custom_stereo.reserved = 0; /* for stereo mixing num out ch*/ - *update_params_value16++ = CUSTOM_STEREO_NUM_OUT_CH; + custom_stereo.num_out_ch = CUSTOM_STEREO_NUM_OUT_CH; /* for stereo mixing num in ch*/ - *update_params_value16++ = CUSTOM_STEREO_NUM_IN_CH; + custom_stereo.num_in_ch = CUSTOM_STEREO_NUM_IN_CH; /* Out ch map FL/FR*/ - *update_params_value16++ = PCM_CHANNEL_FL; - *update_params_value16++ = PCM_CHANNEL_FR; + custom_stereo.out_fl = PCM_CHANNEL_FL; + custom_stereo.out_fr = PCM_CHANNEL_FR; /* In ch map FL/FR*/ - *update_params_value16++ = PCM_CHANNEL_FL; - *update_params_value16++ = PCM_CHANNEL_FR; - avail_length = avail_length - (10 * sizeof(uint16_t)); + custom_stereo.in_fl = PCM_CHANNEL_FL; + custom_stereo.in_fr = PCM_CHANNEL_FR; + /* weighting coefficients as name suggests, mixing will be done according to these coefficients*/ - if (avail_length < 4 * sizeof(uint16_t)) - goto skip_send_cmd; - *update_params_value16++ = op_FL_ip_FL_weight; - *update_params_value16++ = op_FL_ip_FR_weight; - *update_params_value16++ = op_FR_ip_FL_weight; - *update_params_value16++ = op_FR_ip_FR_weight; - avail_length = avail_length - (4 * sizeof(uint16_t)); - if (params_length_bytes != 0) { - rc = adm_dolby_dap_send_params(port_id, copp_idx, - (char *)param_val, - params_length_bytes); - if (rc) { - pr_err("%s: send params failed rc=%d\n", __func__, rc); - rc = -EINVAL; - goto skip_send_cmd; - } + custom_stereo.op_FL_ip_FL_weight = op_FL_ip_FL_weight; + custom_stereo.op_FL_ip_FR_weight = op_FL_ip_FR_weight; + custom_stereo.op_FR_ip_FL_weight = op_FR_ip_FL_weight; + custom_stereo.op_FR_ip_FR_weight = op_FR_ip_FR_weight; + rc = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr, + (u8 *) &custom_stereo); + if (rc) { + pr_err("%s: send params failed rc=%d\n", __func__, rc); + return -EINVAL; } - kfree(param_val); + return 0; -skip_send_cmd: - pr_err("%s: insufficient memory, send cmd failed\n", - __func__); - kfree(param_val); - return rc; } static int dap_set_custom_stereo_onoff(int dev_map_idx, bool is_custom_stereo_enabled) { + uint32_t enable = is_custom_stereo_enabled ? 1 : 0; + struct param_hdr_v3 param_hdr = {0}; + int rc = 0; - int32_t *update_params_value = NULL, rc = 0; - int32_t *param_val = NULL; - uint32_t params_length_bytes = (TOTAL_LENGTH_DOLBY_PARAM + - DOLBY_PARAM_PAYLOAD_SIZE) * sizeof(uint32_t); if ((dev_map[dev_map_idx].port_id != SLIMBUS_0_RX) && (dev_map[dev_map_idx].port_id != RT_PROXY_PORT_001_RX)) { pr_debug("%s:No Custom stereo for port:0x%x\n", @@ -435,38 +428,21 @@ static int dap_set_custom_stereo_onoff(int dev_map_idx, /* DAP custom stereo */ msm_ds2_dap_set_vspe_vdhe(dev_map_idx, is_custom_stereo_enabled); - update_params_value = kzalloc(params_length_bytes, GFP_KERNEL); - if (!update_params_value) { - pr_err("%s: params memory alloc failed\n", __func__); - rc = -ENOMEM; + param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = DOLBY_ENABLE_CUSTOM_STEREO; + param_hdr.param_size = sizeof(enable); + + rc = adm_pack_and_set_one_pp_param(dev_map[dev_map_idx].port_id, + dev_map[dev_map_idx].copp_idx, + param_hdr, (u8 *) &enable); + if (rc) { + pr_err("%s: set custom stereo enable failed with rc=%d\n", + __func__, rc); + rc = -EINVAL; goto end; } - params_length_bytes = 0; - param_val = update_params_value; - *update_params_value++ = DOLBY_BUNDLE_MODULE_ID; - *update_params_value++ = DOLBY_ENABLE_CUSTOM_STEREO; - *update_params_value++ = sizeof(uint32_t); - if (is_custom_stereo_enabled) - *update_params_value++ = 1; - else - *update_params_value++ = 0; - params_length_bytes += (DOLBY_PARAM_PAYLOAD_SIZE + 1) * - sizeof(uint32_t); - pr_debug("%s: valid param length: %d\n", __func__, params_length_bytes); - if (params_length_bytes) { - rc = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id, - dev_map[dev_map_idx].copp_idx, - (char *)param_val, - params_length_bytes); - if (rc) { - pr_err("%s: custom stereo param failed with rc=%d\n", - __func__, rc); - rc = -EINVAL; - goto end; - } - } end: - kfree(param_val); return rc; } @@ -654,8 +630,11 @@ static int msm_ds2_dap_init_modules_in_topology(int dev_map_idx) { int rc = 0, i = 0, port_id, copp_idx; /* Account for 32 bit interger allocation */ - int32_t param_sz = (ADM_GET_TOPO_MODULE_LIST_LENGTH / sizeof(uint32_t)); + int32_t param_sz = + (ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH / sizeof(uint32_t)); int32_t *update_param_val = NULL; + struct module_instance_info mod_inst_info = {0}; + int mod_inst_info_sz = 0; if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) { pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx); @@ -666,7 +645,8 @@ static int msm_ds2_dap_init_modules_in_topology(int dev_map_idx) port_id = dev_map[dev_map_idx].port_id; copp_idx = dev_map[dev_map_idx].copp_idx; pr_debug("%s: port_id 0x%x copp_idx %d\n", __func__, port_id, copp_idx); - update_param_val = kzalloc(ADM_GET_TOPO_MODULE_LIST_LENGTH, GFP_KERNEL); + update_param_val = + kzalloc(ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH, GFP_KERNEL); if (!update_param_val) { pr_err("%s, param memory alloc failed\n", __func__); rc = -ENOMEM; @@ -675,9 +655,10 @@ static int msm_ds2_dap_init_modules_in_topology(int dev_map_idx) if (!ds2_dap_params_states.dap_bypass) { /* get modules from dsp */ - rc = adm_get_pp_topo_module_list(port_id, copp_idx, - ADM_GET_TOPO_MODULE_LIST_LENGTH, - (char *)update_param_val); + rc = adm_get_pp_topo_module_list_v2( + port_id, copp_idx, + ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH, + update_param_val); if (rc < 0) { pr_err("%s:topo list port %d, err %d,copp_idx %d\n", __func__, port_id, copp_idx, rc); @@ -691,11 +672,15 @@ static int msm_ds2_dap_init_modules_in_topology(int dev_map_idx) rc = -EINVAL; goto end; } + + mod_inst_info_sz = sizeof(struct module_instance_info) / + sizeof(uint32_t); /* Turn off modules */ - for (i = 1; i < update_param_val[0]; i++) { + for (i = 1; i < update_param_val[0] * mod_inst_info_sz; + i += mod_inst_info_sz) { if (!msm_ds2_dap_can_enable_module( - update_param_val[i]) || - (update_param_val[i] == DS2_MODULE_ID)) { + update_param_val[i]) || + (update_param_val[i] == DS2_MODULE_ID)) { pr_debug("%s: Do not enable/disable %d\n", __func__, update_param_val[i]); continue; @@ -703,15 +688,21 @@ static int msm_ds2_dap_init_modules_in_topology(int dev_map_idx) pr_debug("%s: param disable %d\n", __func__, update_param_val[i]); - adm_param_enable(port_id, copp_idx, update_param_val[i], - MODULE_DISABLE); + memcpy(&mod_inst_info, &update_param_val[i], + sizeof(mod_inst_info)); + adm_param_enable_v2(port_id, copp_idx, + mod_inst_info, + MODULE_DISABLE); } } else { msm_ds2_dap_send_cal_data(dev_map_idx); } - adm_param_enable(port_id, copp_idx, DS2_MODULE_ID, - !ds2_dap_params_states.dap_bypass); + + mod_inst_info.module_id = DS2_MODULE_ID; + mod_inst_info.instance_id = INSTANCE_ID_0; + adm_param_enable_v2(port_id, copp_idx, mod_inst_info, + !ds2_dap_params_states.dap_bypass); end: kfree(update_param_val); return rc; @@ -884,17 +875,21 @@ static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data) { int rc = 0, i = 0, j = 0; /*Account for 32 bit interger allocation */ - int32_t param_sz = (ADM_GET_TOPO_MODULE_LIST_LENGTH / sizeof(uint32_t)); + int32_t param_sz = + (ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH / sizeof(uint32_t)); int32_t *mod_list = NULL; int port_id = 0, copp_idx = -1; bool cs_onoff = ds2_dap_params_states.custom_stereo_onoff; int ramp_wait = DOLBY_SOFT_VOLUME_PERIOD; + struct module_instance_info mod_inst_info = {0}; + int mod_inst_info_sz = 0; pr_debug("%s: bypass type %d bypass %d custom stereo %d\n", __func__, ds2_dap_params_states.dap_bypass_type, ds2_dap_params_states.dap_bypass, ds2_dap_params_states.custom_stereo_onoff); - mod_list = kzalloc(ADM_GET_TOPO_MODULE_LIST_LENGTH, GFP_KERNEL); + mod_list = + kzalloc(ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH, GFP_KERNEL); if (!mod_list) { pr_err("%s: param memory alloc failed\n", __func__); rc = -ENOMEM; @@ -921,9 +916,10 @@ static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data) } /* getmodules from dsp */ - rc = adm_get_pp_topo_module_list(port_id, copp_idx, - ADM_GET_TOPO_MODULE_LIST_LENGTH, - (char *)mod_list); + rc = adm_get_pp_topo_module_list_v2( + port_id, copp_idx, + ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH, + mod_list); if (rc < 0) { pr_err("%s:adm get topo list port %d", __func__, port_id); @@ -975,8 +971,11 @@ static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data) /* if dap bypass is set */ if (ds2_dap_params_states.dap_bypass) { /* Turn off dap module */ - adm_param_enable(port_id, copp_idx, - DS2_MODULE_ID, MODULE_DISABLE); + mod_inst_info.module_id = DS2_MODULE_ID; + mod_inst_info.instance_id = INSTANCE_ID_0; + adm_param_enable_v2(port_id, copp_idx, + mod_inst_info, + MODULE_DISABLE); /* * If custom stereo is on at the time of bypass, * switch off custom stereo on dap and turn on @@ -999,8 +998,13 @@ static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data) copp_idx, rc); } } + + mod_inst_info_sz = + sizeof(struct module_instance_info) / + sizeof(uint32_t); /* Turn on qti modules */ - for (j = 1; j < mod_list[0]; j++) { + for (j = 1; j < mod_list[0] * mod_inst_info_sz; + j += mod_inst_info_sz) { if (!msm_ds2_dap_can_enable_module( mod_list[j]) || mod_list[j] == @@ -1008,9 +1012,11 @@ static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data) continue; pr_debug("%s: param enable %d\n", __func__, mod_list[j]); - adm_param_enable(port_id, copp_idx, - mod_list[j], - MODULE_ENABLE); + memcpy(&mod_inst_info, &mod_list[j], + sizeof(mod_inst_info)); + adm_param_enable_v2(port_id, copp_idx, + mod_inst_info, + MODULE_ENABLE); } /* Add adm api to resend calibration on port */ @@ -1025,7 +1031,8 @@ static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data) } } else { /* Turn off qti modules */ - for (j = 1; j < mod_list[0]; j++) { + for (j = 1; j < mod_list[0] * mod_inst_info_sz; + j += mod_inst_info_sz) { if (!msm_ds2_dap_can_enable_module( mod_list[j]) || mod_list[j] == @@ -1033,15 +1040,20 @@ static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data) continue; pr_debug("%s: param disable %d\n", __func__, mod_list[j]); - adm_param_enable(port_id, copp_idx, - mod_list[j], - MODULE_DISABLE); + memcpy(&mod_inst_info, &mod_list[j], + sizeof(mod_inst_info)); + adm_param_enable_v2(port_id, copp_idx, + mod_inst_info, + MODULE_DISABLE); } /* Enable DAP modules */ pr_debug("%s:DS2 param enable\n", __func__); - adm_param_enable(port_id, copp_idx, - DS2_MODULE_ID, MODULE_ENABLE); + mod_inst_info.module_id = DS2_MODULE_ID; + mod_inst_info.instance_id = INSTANCE_ID_0; + adm_param_enable_v2(port_id, copp_idx, + mod_inst_info, + MODULE_ENABLE); /* * If custom stereo is on at the time of dap on, * switch off custom stereo on qti channel mixer @@ -1100,13 +1112,12 @@ end: static int msm_ds2_dap_send_end_point(int dev_map_idx, int endp_idx) { - int rc = 0; - int32_t *update_params_value = NULL, *params_value = NULL; - uint32_t params_length = (DOLBY_PARAM_INT_ENDP_LENGTH + - DOLBY_PARAM_PAYLOAD_SIZE) * sizeof(uint32_t); + uint32_t offset = 0; + struct param_hdr_v3 param_hdr = {0}; int cache_device = 0; struct ds2_dap_params_s *ds2_ap_params_obj = NULL; int32_t *modified_param = NULL; + int rc = 0; if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) { pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx); @@ -1121,13 +1132,6 @@ static int msm_ds2_dap_send_end_point(int dev_map_idx, int endp_idx) pr_debug("%s: endp - %pK %pK\n", __func__, &ds2_dap_params[cache_device], ds2_ap_params_obj); - params_value = kzalloc(params_length, GFP_KERNEL); - if (!params_value) { - pr_err("%s: params memory alloc failed\n", __func__); - rc = -ENOMEM; - goto end; - } - if (dev_map[dev_map_idx].port_id == DOLBY_INVALID_PORT_ID) { pr_err("%s: invalid port\n", __func__); rc = -EINVAL; @@ -1141,21 +1145,20 @@ static int msm_ds2_dap_send_end_point(int dev_map_idx, int endp_idx) goto end; } - update_params_value = params_value; - *update_params_value++ = DOLBY_BUNDLE_MODULE_ID; - *update_params_value++ = DOLBY_PARAM_ID_INIT_ENDP; - *update_params_value++ = DOLBY_PARAM_INT_ENDP_LENGTH * sizeof(uint32_t); - *update_params_value++ = ds2_ap_params_obj->params_val[ - ds2_dap_params_offset[endp_idx]]; + param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = DOLBY_PARAM_ID_INIT_ENDP; + param_hdr.param_size = sizeof(offset); + offset = ds2_ap_params_obj->params_val[ds2_dap_params_offset[endp_idx]]; pr_debug("%s: off %d, length %d\n", __func__, ds2_dap_params_offset[endp_idx], ds2_dap_params_length[endp_idx]); pr_debug("%s: param 0x%x, param val %d\n", __func__, ds2_dap_params_id[endp_idx], ds2_ap_params_obj-> params_val[ds2_dap_params_offset[endp_idx]]); - rc = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id, - dev_map[dev_map_idx].copp_idx, - (char *)params_value, params_length); + rc = adm_pack_and_set_one_pp_param(dev_map[dev_map_idx].port_id, + dev_map[dev_map_idx].copp_idx, + param_hdr, (u8 *) &offset); if (rc) { pr_err("%s: send dolby params failed rc %d\n", __func__, rc); rc = -EINVAL; @@ -1172,19 +1175,17 @@ static int msm_ds2_dap_send_end_point(int dev_map_idx, int endp_idx) ds2_ap_params_obj->dap_params_modified[endp_idx] = 0x00010001; end: - kfree(params_value); return rc; } static int msm_ds2_dap_send_cached_params(int dev_map_idx, int commit) { - int32_t *update_params_value = NULL, *params_value = NULL; - uint32_t idx, i, j, ret = 0; - uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM + - (MAX_DS2_PARAMS - 1) * - DOLBY_PARAM_PAYLOAD_SIZE) * - sizeof(uint32_t); + uint8_t *packed_params = NULL; + uint32_t packed_params_size = 0; + uint32_t param_size = 0; + struct param_hdr_v3 param_hdr = {0}; + uint32_t idx, i, ret = 0; int cache_device = 0; struct ds2_dap_params_s *ds2_ap_params_obj = NULL; int32_t *modified_param = NULL; @@ -1207,12 +1208,16 @@ static int msm_ds2_dap_send_cached_params(int dev_map_idx, pr_debug("%s: cached param - %pK %pK, cache_device %d\n", __func__, &ds2_dap_params[cache_device], ds2_ap_params_obj, cache_device); - params_value = kzalloc(params_length, GFP_KERNEL); - if (!params_value) { - pr_err("%s: params memory alloc failed\n", __func__); - ret = -ENOMEM; - goto end; - } + + /* + * Allocate the max space needed. This is enough space to hold the + * header for each param plus the total size of all the params. + */ + packed_params_size = (sizeof(param_hdr) * (MAX_DS2_PARAMS - 1)) + + (TOTAL_LENGTH_DOLBY_PARAM * sizeof(uint32_t)); + packed_params = kzalloc(packed_params_size, GFP_KERNEL); + if (!packed_params) + return -ENOMEM; if (dev_map[dev_map_idx].port_id == DOLBY_INVALID_PORT_ID) { pr_err("%s: invalid port id\n", __func__); @@ -1227,8 +1232,7 @@ static int msm_ds2_dap_send_cached_params(int dev_map_idx, goto end; } - update_params_value = params_value; - params_length = 0; + packed_params_size = 0; for (i = 0; i < (MAX_DS2_PARAMS-1); i++) { /*get the pointer to the param modified array in the cache*/ modified_param = ds2_ap_params_obj->dap_params_modified; @@ -1241,28 +1245,33 @@ static int msm_ds2_dap_send_cached_params(int dev_map_idx, if (!msm_ds2_dap_check_is_param_modified(modified_param, i, commit)) continue; - *update_params_value++ = DOLBY_BUNDLE_MODULE_ID; - *update_params_value++ = ds2_dap_params_id[i]; - *update_params_value++ = ds2_dap_params_length[i] * - sizeof(uint32_t); + + param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = ds2_dap_params_id[i]; + param_hdr.param_size = + ds2_dap_params_length[i] * sizeof(uint32_t); + idx = ds2_dap_params_offset[i]; - for (j = 0; j < ds2_dap_params_length[i]; j++) { - *update_params_value++ = - ds2_ap_params_obj->params_val[idx+j]; - pr_debug("%s: id 0x%x,val %d\n", __func__, - ds2_dap_params_id[i], - ds2_ap_params_obj->params_val[idx+j]); + ret = q6common_pack_pp_params( + packed_params + packed_params_size, ¶m_hdr, + (u8 *) &ds2_ap_params_obj->params_val[idx], + ¶m_size); + if (ret) { + pr_err("%s: Failed to pack params, error %d\n", + __func__, ret); + goto end; } - params_length += (DOLBY_PARAM_PAYLOAD_SIZE + - ds2_dap_params_length[i]) * sizeof(uint32_t); + + packed_params_size += param_size; } - pr_debug("%s: valid param length: %d\n", __func__, params_length); - if (params_length) { - ret = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id, - dev_map[dev_map_idx].copp_idx, - (char *)params_value, - params_length); + pr_debug("%s: total packed param length: %d\n", __func__, + packed_params_size); + if (packed_params_size) { + ret = adm_set_pp_params(dev_map[dev_map_idx].port_id, + dev_map[dev_map_idx].copp_idx, NULL, + packed_params, packed_params_size); if (ret) { pr_err("%s: send dolby params failed ret %d\n", __func__, ret); @@ -1285,7 +1294,7 @@ static int msm_ds2_dap_send_cached_params(int dev_map_idx, } } end: - kfree(params_value); + kfree(packed_params); return ret; } @@ -1522,11 +1531,12 @@ static int msm_ds2_dap_get_param(u32 cmd, void *arg) { int rc = 0, i, port_id = 0, copp_idx = -1; struct dolby_param_data *dolby_data = (struct dolby_param_data *)arg; - int32_t *update_params_value = NULL, *params_value = NULL; + int32_t *params_value = NULL; uint32_t params_length = DOLBY_MAX_LENGTH_INDIVIDUAL_PARAM * sizeof(uint32_t); uint32_t param_payload_len = DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t); + struct param_hdr_v3 param_hdr; /* Return error on get param in soft or hard bypass */ if (ds2_dap_params_states.dap_bypass == true) { @@ -1572,18 +1582,14 @@ static int msm_ds2_dap_get_param(u32 cmd, void *arg) params_value = kzalloc(params_length + param_payload_len, GFP_KERNEL); - if (!params_value) { - pr_err("%s: params memory alloc failed\n", __func__); - rc = -ENOMEM; - goto end; - } + if (!params_value) + return -ENOMEM; if (dolby_data->param_id == DOLBY_PARAM_ID_VER) { - rc = adm_get_params(port_id, copp_idx, - DOLBY_BUNDLE_MODULE_ID, - DOLBY_PARAM_ID_VER, - params_length + param_payload_len, - (char *)params_value); + param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = DOLBY_PARAM_ID_VER; + param_hdr.param_size = params_length + param_payload_len; } else { for (i = 0; i < MAX_DS2_PARAMS; i++) if (ds2_dap_params_id[i] == @@ -1596,25 +1602,25 @@ static int msm_ds2_dap_get_param(u32 cmd, void *arg) goto end; } else { params_length = - ds2_dap_params_length[i] * sizeof(uint32_t); + ds2_dap_params_length[i] * sizeof(uint32_t); - rc = adm_get_params(port_id, copp_idx, - DOLBY_BUNDLE_MODULE_ID, - ds2_dap_params_id[i], - params_length + - param_payload_len, - (char *)params_value); + param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = ds2_dap_params_id[i]; + param_hdr.param_size = + params_length + param_payload_len; } } + rc = adm_get_pp_params(port_id, copp_idx, ADM_CLIENT_ID_DEFAULT, NULL, + ¶m_hdr, (u8 *) params_value); if (rc) { pr_err("%s: get parameters failed rc %d\n", __func__, rc); rc = -EINVAL; goto end; } - update_params_value = params_value; - if (copy_to_user((void *)dolby_data->data, - &update_params_value[DOLBY_PARAM_PAYLOAD_SIZE], - (dolby_data->length * sizeof(uint32_t)))) { + if (copy_to_user((void __user *) dolby_data->data, + ¶ms_value[DOLBY_PARAM_PAYLOAD_SIZE], + (dolby_data->length * sizeof(uint32_t)))) { pr_err("%s: error getting param\n", __func__); rc = -EFAULT; goto end; @@ -1633,6 +1639,7 @@ static int msm_ds2_dap_param_visualizer_control_get(u32 cmd, void *arg) uint32_t offset, length, params_length; uint32_t param_payload_len = DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t); + struct param_hdr_v3 param_hdr = {0}; for (i = 0; i < DS2_DEVICES_ALL; i++) { if ((dev_map[i].active)) { @@ -1683,11 +1690,13 @@ static int msm_ds2_dap_param_visualizer_control_get(u32 cmd, void *arg) offset = 0; params_length = length * sizeof(uint32_t); - ret = adm_get_params(port_id, copp_idx, - DOLBY_BUNDLE_MODULE_ID, - DOLBY_PARAM_ID_VCBG, - params_length + param_payload_len, - (((char *)(visualizer_data)) + offset)); + param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = DOLBY_PARAM_ID_VCBG; + param_hdr.param_size = length * sizeof(uint32_t) + param_payload_len; + ret = adm_get_pp_params(port_id, copp_idx, ADM_CLIENT_ID_DEFAULT, NULL, + ¶m_hdr, + (((char *) (visualizer_data)) + offset)); if (ret) { pr_err("%s: get parameters failed ret %d\n", __func__, ret); ret = -EINVAL; @@ -1695,11 +1704,13 @@ static int msm_ds2_dap_param_visualizer_control_get(u32 cmd, void *arg) goto end; } offset = length * sizeof(uint32_t); - ret = adm_get_params(port_id, copp_idx, - DOLBY_BUNDLE_MODULE_ID, - DOLBY_PARAM_ID_VCBE, - params_length + param_payload_len, - (((char *)(visualizer_data)) + offset)); + param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = DOLBY_PARAM_ID_VCBE; + param_hdr.param_size = length * sizeof(uint32_t) + param_payload_len; + ret = adm_get_pp_params(port_id, copp_idx, ADM_CLIENT_ID_DEFAULT, NULL, + ¶m_hdr, + (((char *) (visualizer_data)) + offset)); if (ret) { pr_err("%s: get parameters failed ret %d\n", __func__, ret); ret = -EINVAL; diff --git a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.h b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.h index 0eb6017fd383..c2687017c962 100644 --- a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.h +++ b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.h @@ -1,4 +1,5 @@ -/* Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2014, 2016-2017, The Linux Foundation. All rights + * reserved. * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. @@ -32,7 +33,6 @@ struct dolby_param_license32 { compat_uptr_t license_key; }; - #define SNDRV_DEVDEP_DAP_IOCTL_SET_PARAM32\ _IOWR('U', 0x10, struct dolby_param_data32) #define SNDRV_DEVDEP_DAP_IOCTL_GET_PARAM32\ @@ -62,6 +62,34 @@ enum { DAP_CMD_SET_BYPASS_TYPE = 5, }; +struct custom_stereo_param { + /* Index is 32-bit param in little endian */ + u16 index; + u16 reserved; + + /* For stereo mixing, the number of out channels */ + u16 num_out_ch; + /* For stereo mixing, the number of in channels */ + u16 num_in_ch; + + /* Out channel map FL/FR*/ + u16 out_fl; + u16 out_fr; + + /* In channel map FL/FR*/ + u16 in_fl; + u16 in_fr; + + /* + * Weighting coefficients. Mixing will be done according to + * these coefficients. + */ + u16 op_FL_ip_FL_weight; + u16 op_FL_ip_FR_weight; + u16 op_FR_ip_FL_weight; + u16 op_FR_ip_FR_weight; +}; + #define DOLBY_PARAM_INT_ENDP_LENGTH 1 #define DOLBY_PARAM_INT_ENDP_OFFSET (DOLBY_PARAM_PSTG_OFFSET + \ DOLBY_PARAM_PSTG_LENGTH) diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c index 13eb97ae3660..2dba05df40e0 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c @@ -11384,22 +11384,23 @@ int msm_routing_get_rms_value_control(struct snd_kcontrol *kcontrol, int be_idx = 0; char *param_value; int *update_param_value; - uint32_t param_length = sizeof(uint32_t); - uint32_t param_payload_len = RMS_PAYLOAD_LEN * sizeof(uint32_t); - param_value = kzalloc(param_length + param_payload_len, GFP_KERNEL); - if (!param_value) { - pr_err("%s, param memory alloc failed\n", __func__); + uint32_t param_size = (RMS_PAYLOAD_LEN + 1) * sizeof(uint32_t); + struct param_hdr_v3 param_hdr = {0}; + + param_value = kzalloc(param_size, GFP_KERNEL); + if (!param_value) return -ENOMEM; - } + for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++) if (msm_bedais[be_idx].port_id == SLIMBUS_0_TX) break; if ((be_idx < MSM_BACKEND_DAI_MAX) && msm_bedais[be_idx].active) { - rc = adm_get_params(SLIMBUS_0_TX, 0, - RMS_MODULEID_APPI_PASSTHRU, - RMS_PARAM_FIRST_SAMPLE, - param_length + param_payload_len, - param_value); + param_hdr.module_id = RMS_MODULEID_APPI_PASSTHRU; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = RMS_PARAM_FIRST_SAMPLE; + param_hdr.param_size = param_size; + rc = adm_get_pp_params(SLIMBUS_0_TX, 0, ADM_CLIENT_ID_DEFAULT, + NULL, ¶m_hdr, (u8 *) param_value); if (rc) { pr_err("%s: get parameters failed:%d\n", __func__, rc); kfree(param_value); diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c index f3ec45b8f9b1..76d8f8d9e33c 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c @@ -556,12 +556,14 @@ static int msm_voice_slowtalk_put(struct snd_kcontrol *kcontrol, { int st_enable = ucontrol->value.integer.value[0]; uint32_t session_id = ucontrol->value.integer.value[1]; + struct module_instance_info mod_inst_info = {0}; pr_debug("%s: st enable=%d session_id=%#x\n", __func__, st_enable, session_id); - voc_set_pp_enable(session_id, - MODULE_ID_VOICE_MODULE_ST, st_enable); + mod_inst_info.module_id = MODULE_ID_VOICE_MODULE_ST; + mod_inst_info.instance_id = INSTANCE_ID_0; + voc_set_pp_enable(session_id, mod_inst_info, st_enable); return 0; } diff --git a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c index 65f5167d9dee..bcfb090d556b 100644 --- a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c +++ b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c @@ -18,6 +18,7 @@ #include <sound/q6adm-v2.h> #include <sound/q6asm-v2.h> #include <sound/q6afe-v2.h> +#include <sound/q6common.h> #include <sound/asound.h> #include <sound/q6audio-v2.h> #include <sound/tlv.h> @@ -327,14 +328,13 @@ static int msm_qti_pp_get_rms_value_control(struct snd_kcontrol *kcontrol, int be_idx = 0, copp_idx; char *param_value; int *update_param_value; - uint32_t param_length = sizeof(uint32_t); - uint32_t param_payload_len = RMS_PAYLOAD_LEN * sizeof(uint32_t); + uint32_t param_size = (RMS_PAYLOAD_LEN + 1) * sizeof(uint32_t); struct msm_pcm_routing_bdai_data msm_bedai; - param_value = kzalloc(param_length + param_payload_len, GFP_KERNEL); - if (!param_value) { - pr_err("%s, param memory alloc failed\n", __func__); + struct param_hdr_v3 param_hdr = {0}; + + param_value = kzalloc(param_size, GFP_KERNEL); + if (!param_value) return -ENOMEM; - } msm_pcm_routing_acquire_lock(); for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++) { msm_pcm_routing_get_bedai_info(be_idx, &msm_bedai); @@ -354,11 +354,12 @@ static int msm_qti_pp_get_rms_value_control(struct snd_kcontrol *kcontrol, rc = -EINVAL; goto get_rms_value_err; } - rc = adm_get_params(SLIMBUS_0_TX, copp_idx, - RMS_MODULEID_APPI_PASSTHRU, - RMS_PARAM_FIRST_SAMPLE, - param_length + param_payload_len, - param_value); + param_hdr.module_id = RMS_MODULEID_APPI_PASSTHRU; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = RMS_PARAM_FIRST_SAMPLE; + param_hdr.param_size = param_size; + rc = adm_get_pp_params(SLIMBUS_0_TX, copp_idx, ADM_CLIENT_ID_DEFAULT, + NULL, ¶m_hdr, param_value); if (rc) { pr_err("%s: get parameters failed rc=%d\n", __func__, rc); rc = -EINVAL; @@ -655,64 +656,82 @@ static void msm_qti_pp_asphere_init_state(void) static int msm_qti_pp_asphere_send_params(int port_id, int copp_idx, bool force) { - char *params_value = NULL; - uint32_t *update_params_value = NULL; - uint32_t param_size = sizeof(uint32_t) + - sizeof(struct adm_param_data_v5); - int params_length = 0, param_count = 0, ret = 0; + u8 *packed_params = NULL; + u32 packed_params_size = 0; + u32 param_size = 0; + struct param_hdr_v3 param_hdr = {0}; bool set_enable = force || (asphere_state.enabled != asphere_state.enabled_prev); bool set_strength = asphere_state.enabled == 1 && (set_enable || (asphere_state.strength != asphere_state.strength_prev)); + int param_count = 0; + int ret = 0; if (set_enable) param_count++; if (set_strength) param_count++; - params_length = param_count * param_size; + + if (param_count == 0) { + pr_debug("%s: Nothing to send, exiting\n", __func__); + return 0; + } pr_debug("%s: port_id %d, copp_id %d, forced %d, param_count %d\n", - __func__, port_id, copp_idx, force, param_count); + __func__, port_id, copp_idx, force, param_count); pr_debug("%s: enable prev:%u cur:%u, strength prev:%u cur:%u\n", __func__, asphere_state.enabled_prev, asphere_state.enabled, asphere_state.strength_prev, asphere_state.strength); - if (params_length > 0) - params_value = kzalloc(params_length, GFP_KERNEL); - if (!params_value) { - pr_err("%s, params memory alloc failed\n", __func__); + packed_params_size = + param_count * (sizeof(struct param_hdr_v3) + sizeof(uint32_t)); + packed_params = kzalloc(packed_params_size, GFP_KERNEL); + if (!packed_params) return -ENOMEM; - } - update_params_value = (uint32_t *)params_value; - params_length = 0; + + packed_params_size = 0; + param_hdr.module_id = AUDPROC_MODULE_ID_AUDIOSPHERE; + param_hdr.instance_id = INSTANCE_ID_0; if (set_strength) { /* add strength command */ - *update_params_value++ = AUDPROC_MODULE_ID_AUDIOSPHERE; - *update_params_value++ = AUDPROC_PARAM_ID_AUDIOSPHERE_STRENGTH; - *update_params_value++ = sizeof(uint32_t); - *update_params_value++ = asphere_state.strength; - params_length += param_size; + param_hdr.param_id = AUDPROC_PARAM_ID_AUDIOSPHERE_STRENGTH; + param_hdr.param_size = sizeof(asphere_state.strength); + ret = q6common_pack_pp_params(packed_params + + packed_params_size, + ¶m_hdr, + (u8 *) &asphere_state.strength, + ¶m_size); + if (ret) { + pr_err("%s: Failed to pack params, error %d\n", + __func__, ret); + goto done; + } + packed_params_size += param_size; } if (set_enable) { /* add enable command */ - *update_params_value++ = AUDPROC_MODULE_ID_AUDIOSPHERE; - *update_params_value++ = AUDPROC_PARAM_ID_AUDIOSPHERE_ENABLE; - *update_params_value++ = sizeof(uint32_t); - *update_params_value++ = asphere_state.enabled; - params_length += param_size; - } - pr_debug("%s, param length: %d\n", __func__, params_length); - if (params_length) { - ret = adm_send_params_v5(port_id, copp_idx, - params_value, params_length); + param_hdr.param_id = AUDPROC_PARAM_ID_AUDIOSPHERE_ENABLE; + param_hdr.param_size = sizeof(asphere_state.enabled); + q6common_pack_pp_params(packed_params + packed_params_size, + ¶m_hdr, + (u8 *) &asphere_state.enabled, + ¶m_size); if (ret) { - pr_err("%s: setting param failed with err=%d\n", - __func__, ret); - kfree(params_value); - return -EINVAL; + pr_err("%s: Failed to pack params, error %d\n", + __func__, ret); + goto done; } + packed_params_size += param_size; } - kfree(params_value); + + pr_debug("%s: packed data size: %d\n", __func__, packed_params_size); + ret = adm_set_pp_params(port_id, copp_idx, NULL, packed_params, + packed_params_size); + if (ret) + pr_err("%s: set param failed with err=%d\n", __func__, ret); + +done: + kfree(packed_params); return 0; } diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c index 018681309f2e..dc66c5ad93d5 100644 --- a/sound/soc/msm/qdsp6v2/q6adm.c +++ b/sound/soc/msm/qdsp6v2/q6adm.c @@ -22,6 +22,7 @@ #include <sound/q6adm-v2.h> #include <sound/q6audio-v2.h> #include <sound/q6afe-v2.h> +#include <sound/q6common.h> #include <sound/audio_cal_utils.h> #include <sound/asound.h> #include "msm-dts-srs-tm-config.h" @@ -32,8 +33,8 @@ #define RESET_COPP_ID 99 #define INVALID_COPP_ID 0xFF /* Used for inband payload copy, max size is 4k */ -/* 2 is to account for module & param ID in payload */ -#define ADM_GET_PARAMETER_LENGTH (4096 - APR_HDR_SIZE - 2 * sizeof(uint32_t)) +/* 3 is to account for module, instance & param ID in payload */ +#define ADM_GET_PARAMETER_LENGTH (4096 - APR_HDR_SIZE - 3 * sizeof(uint32_t)) #define ULL_SUPPORTED_BITS_PER_SAMPLE 16 #define ULL_SUPPORTED_SAMPLE_RATE 48000 @@ -119,8 +120,8 @@ static struct adm_multi_ch_map multi_ch_maps[2] = { }; static int adm_get_parameters[MAX_COPPS_PER_PORT * ADM_GET_PARAMETER_LENGTH]; -static int adm_module_topo_list[ - MAX_COPPS_PER_PORT * ADM_GET_TOPO_MODULE_LIST_LENGTH]; +static int adm_module_topo_list[MAX_COPPS_PER_PORT * + ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH]; int adm_validate_and_get_port_index(int port_id) { @@ -258,10 +259,12 @@ static int adm_get_next_available_copp(int port_idx) int srs_trumedia_open(int port_id, int copp_idx, __s32 srs_tech_id, void *srs_params) { - struct adm_cmd_set_pp_params_inband_v5 *adm_params = NULL; - struct adm_cmd_set_pp_params_v5 *adm_params_ = NULL; - __s32 sz = 0, param_id, module_id = SRS_TRUMEDIA_MODULE_ID, outband = 0; - int ret = 0, port_idx; + struct param_hdr_v3 param_hdr = {0}; + struct mem_mapping_hdr mem_hdr = {0}; + u32 total_param_size = 0; + bool outband = false; + int port_idx; + int ret = 0; pr_debug("SRS - %s", __func__); @@ -271,246 +274,92 @@ int srs_trumedia_open(int port_id, int copp_idx, __s32 srs_tech_id, pr_err("%s: Invalid port_id %#x\n", __func__, port_id); return -EINVAL; } + + param_hdr.module_id = SRS_TRUMEDIA_MODULE_ID; + param_hdr.instance_id = INSTANCE_ID_0; + switch (srs_tech_id) { case SRS_ID_GLOBAL: { - struct srs_trumedia_params_GLOBAL *glb_params = NULL; - sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) + + param_hdr.param_id = SRS_TRUMEDIA_PARAMS; + param_hdr.param_size = sizeof(struct srs_trumedia_params_GLOBAL); - adm_params = kzalloc(sz, GFP_KERNEL); - if (!adm_params) { - pr_err("%s, adm params memory alloc failed\n", - __func__); - return -ENOMEM; - } - adm_params->payload_size = - sizeof(struct srs_trumedia_params_GLOBAL) + - sizeof(struct adm_param_data_v5); - param_id = SRS_TRUMEDIA_PARAMS; - adm_params->params.param_size = - sizeof(struct srs_trumedia_params_GLOBAL); - glb_params = (struct srs_trumedia_params_GLOBAL *) - ((u8 *)adm_params + - sizeof(struct adm_cmd_set_pp_params_inband_v5)); - memcpy(glb_params, srs_params, - sizeof(struct srs_trumedia_params_GLOBAL)); break; } case SRS_ID_WOWHD: { - struct srs_trumedia_params_WOWHD *whd_params = NULL; - sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) + - sizeof(struct srs_trumedia_params_WOWHD); - adm_params = kzalloc(sz, GFP_KERNEL); - if (!adm_params) { - pr_err("%s, adm params memory alloc failed\n", - __func__); - return -ENOMEM; - } - adm_params->payload_size = - sizeof(struct srs_trumedia_params_WOWHD) + - sizeof(struct adm_param_data_v5); - param_id = SRS_TRUMEDIA_PARAMS_WOWHD; - adm_params->params.param_size = - sizeof(struct srs_trumedia_params_WOWHD); - whd_params = (struct srs_trumedia_params_WOWHD *) - ((u8 *)adm_params + - sizeof(struct adm_cmd_set_pp_params_inband_v5)); - memcpy(whd_params, srs_params, - sizeof(struct srs_trumedia_params_WOWHD)); + param_hdr.param_id = SRS_TRUMEDIA_PARAMS_WOWHD; + param_hdr.param_size = sizeof(struct srs_trumedia_params_WOWHD); break; } case SRS_ID_CSHP: { - struct srs_trumedia_params_CSHP *chp_params = NULL; - sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) + - sizeof(struct srs_trumedia_params_CSHP); - adm_params = kzalloc(sz, GFP_KERNEL); - if (!adm_params) { - pr_err("%s, adm params memory alloc failed\n", - __func__); - return -ENOMEM; - } - adm_params->payload_size = - sizeof(struct srs_trumedia_params_CSHP) + - sizeof(struct adm_param_data_v5); - param_id = SRS_TRUMEDIA_PARAMS_CSHP; - adm_params->params.param_size = - sizeof(struct srs_trumedia_params_CSHP); - chp_params = (struct srs_trumedia_params_CSHP *) - ((u8 *)adm_params + - sizeof(struct adm_cmd_set_pp_params_inband_v5)); - memcpy(chp_params, srs_params, - sizeof(struct srs_trumedia_params_CSHP)); + param_hdr.param_id = SRS_TRUMEDIA_PARAMS_CSHP; + param_hdr.param_size = sizeof(struct srs_trumedia_params_CSHP); break; } case SRS_ID_HPF: { - struct srs_trumedia_params_HPF *hpf_params = NULL; - sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) + - sizeof(struct srs_trumedia_params_HPF); - adm_params = kzalloc(sz, GFP_KERNEL); - if (!adm_params) { - pr_err("%s, adm params memory alloc failed\n", - __func__); - return -ENOMEM; - } - adm_params->payload_size = - sizeof(struct srs_trumedia_params_HPF) + - sizeof(struct adm_param_data_v5); - param_id = SRS_TRUMEDIA_PARAMS_HPF; - adm_params->params.param_size = - sizeof(struct srs_trumedia_params_HPF); - hpf_params = (struct srs_trumedia_params_HPF *) - ((u8 *)adm_params + - sizeof(struct adm_cmd_set_pp_params_inband_v5)); - memcpy(hpf_params, srs_params, - sizeof(struct srs_trumedia_params_HPF)); + param_hdr.param_id = SRS_TRUMEDIA_PARAMS_HPF; + param_hdr.param_size = sizeof(struct srs_trumedia_params_HPF); break; } case SRS_ID_AEQ: { - int *update_params_ptr = (int *)this_adm.outband_memmap.kvaddr; - outband = 1; - adm_params = kzalloc(sizeof(struct adm_cmd_set_pp_params_v5), - GFP_KERNEL); - adm_params_ = (struct adm_cmd_set_pp_params_v5 *)adm_params; - if (!adm_params_) { - pr_err("%s, adm params memory alloc failed\n", - __func__); - return -ENOMEM; - } + u8 *update_params_ptr = (u8 *) this_adm.outband_memmap.kvaddr; + + outband = true; - sz = sizeof(struct srs_trumedia_params_AEQ); if (update_params_ptr == NULL) { pr_err("ADM_SRS_TRUMEDIA - %s: null memmap for AEQ params\n", __func__); ret = -EINVAL; goto fail_cmd; } - param_id = SRS_TRUMEDIA_PARAMS_AEQ; - *update_params_ptr++ = module_id; - *update_params_ptr++ = param_id; - *update_params_ptr++ = sz; - memcpy(update_params_ptr, srs_params, sz); - adm_params_->payload_size = sz + 12; + param_hdr.param_id = SRS_TRUMEDIA_PARAMS_AEQ; + param_hdr.param_size = sizeof(struct srs_trumedia_params_AEQ); + ret = q6common_pack_pp_params(update_params_ptr, ¶m_hdr, + srs_params, &total_param_size); + if (ret) { + pr_err("%s: Failed to pack param header and data, error %d\n", + __func__, ret); + goto fail_cmd; + } break; } case SRS_ID_HL: { - struct srs_trumedia_params_HL *hl_params = NULL; - sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) + - sizeof(struct srs_trumedia_params_HL); - adm_params = kzalloc(sz, GFP_KERNEL); - if (!adm_params) { - pr_err("%s, adm params memory alloc failed\n", - __func__); - return -ENOMEM; - } - adm_params->payload_size = - sizeof(struct srs_trumedia_params_HL) + - sizeof(struct adm_param_data_v5); - param_id = SRS_TRUMEDIA_PARAMS_HL; - adm_params->params.param_size = - sizeof(struct srs_trumedia_params_HL); - hl_params = (struct srs_trumedia_params_HL *) - ((u8 *)adm_params + - sizeof(struct adm_cmd_set_pp_params_inband_v5)); - memcpy(hl_params, srs_params, - sizeof(struct srs_trumedia_params_HL)); + param_hdr.param_id = SRS_TRUMEDIA_PARAMS_HL; + param_hdr.param_size = sizeof(struct srs_trumedia_params_HL); break; } case SRS_ID_GEQ: { - struct srs_trumedia_params_GEQ *geq_params = NULL; - sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) + - sizeof(struct srs_trumedia_params_GEQ); - adm_params = kzalloc(sz, GFP_KERNEL); - if (!adm_params) { - pr_err("%s, adm params memory alloc failed\n", - __func__); - return -ENOMEM; - } - adm_params->payload_size = - sizeof(struct srs_trumedia_params_GEQ) + - sizeof(struct adm_param_data_v5); - param_id = SRS_TRUMEDIA_PARAMS_GEQ; - adm_params->params.param_size = - sizeof(struct srs_trumedia_params_GEQ); - geq_params = (struct srs_trumedia_params_GEQ *) - ((u8 *)adm_params + - sizeof(struct adm_cmd_set_pp_params_inband_v5)); - memcpy(geq_params, srs_params, - sizeof(struct srs_trumedia_params_GEQ)); - pr_debug("SRS - %s: GEQ params prepared\n", __func__); + param_hdr.param_id = SRS_TRUMEDIA_PARAMS_GEQ; + param_hdr.param_size = sizeof(struct srs_trumedia_params_GEQ); break; } default: goto fail_cmd; } - adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - adm_params->hdr.src_svc = APR_SVC_ADM; - adm_params->hdr.src_domain = APR_DOMAIN_APPS; - adm_params->hdr.src_port = port_id; - adm_params->hdr.dest_svc = APR_SVC_ADM; - adm_params->hdr.dest_domain = APR_DOMAIN_ADSP; - adm_params->hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - adm_params->hdr.token = port_idx << 16 | copp_idx; - adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; if (outband && this_adm.outband_memmap.paddr) { - adm_params->hdr.pkt_size = - sizeof(struct adm_cmd_set_pp_params_v5); - adm_params->payload_addr_lsw = lower_32_bits( - this_adm.outband_memmap.paddr); - adm_params->payload_addr_msw = msm_audio_populate_upper_32_bits( - this_adm.outband_memmap.paddr); - adm_params->mem_map_handle = atomic_read(&this_adm. - mem_map_handles[ADM_SRS_TRUMEDIA]); + mem_hdr.data_payload_addr_lsw = + lower_32_bits(this_adm.outband_memmap.paddr); + mem_hdr.data_payload_addr_msw = + msm_audio_populate_upper_32_bits( + this_adm.outband_memmap.paddr); + mem_hdr.mem_map_handle = atomic_read( + &this_adm.mem_map_handles[ADM_SRS_TRUMEDIA]); + + ret = adm_set_pp_params(port_id, copp_idx, &mem_hdr, NULL, + total_param_size); } else { - adm_params->hdr.pkt_size = sz; - adm_params->payload_addr_lsw = 0; - adm_params->payload_addr_msw = 0; - adm_params->mem_map_handle = 0; - - adm_params->params.module_id = module_id; - adm_params->params.param_id = param_id; - adm_params->params.reserved = 0; + ret = adm_pack_and_set_one_pp_param(port_id, copp_idx, + param_hdr, + (u8 *) srs_params); } - pr_debug("SRS - %s: Command was sent now check Q6 - port id = %d, size %d, module id %x, param id %x.\n", - __func__, adm_params->hdr.dest_port, - adm_params->payload_size, module_id, param_id); - - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); - ret = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params); - if (ret < 0) { + if (ret < 0) pr_err("SRS - %s: ADM enable for port %d failed\n", __func__, port_id); - ret = -EINVAL; - goto fail_cmd; - } - /* Wait for the callback with copp id */ - ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: SRS set params timed out port = %d\n", - __func__, port_id); - ret = -EINVAL; - goto fail_cmd; - } else if (atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx])); - goto fail_cmd; - } fail_cmd: - kfree(adm_params); return ret; } @@ -570,7 +419,7 @@ int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id, int channel_index) { struct adm_cmd_set_pspd_mtmx_strtr_params_v5 *adm_params = NULL; - struct adm_param_data_v5 data_v5; + struct param_hdr_v3 data_v5; int ret = 0, port_idx, sz = 0, param_size = 0; u16 *adm_pspd_params; u16 *ptr; @@ -602,8 +451,8 @@ int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id, roundup(param_size, 4); sz = sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5) + - sizeof(struct default_chmixer_param_id_coeff) + - sizeof(struct adm_param_data_v5) + param_size; + sizeof(struct default_chmixer_param_id_coeff) + + sizeof(struct param_hdr_v3) + param_size; pr_debug("%s: sz = %d\n", __func__, sz); adm_params = kzalloc(sz, GFP_KERNEL); if (!adm_params) @@ -626,8 +475,8 @@ int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id, data_v5.reserved = 0; data_v5.param_size = param_size; adm_params->payload_size = - sizeof(struct default_chmixer_param_id_coeff) + - sizeof(struct adm_param_data_v5) + data_v5.param_size; + sizeof(struct default_chmixer_param_id_coeff) + + sizeof(struct param_hdr_v3) + data_v5.param_size; adm_pspd_params = (u16 *)((u8 *)adm_params + sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5)); memcpy(adm_pspd_params, &data_v5, sizeof(data_v5)); @@ -861,286 +710,267 @@ set_stereo_to_custom_stereo_return: return rc; } -int adm_dolby_dap_send_params(int port_id, int copp_idx, char *params, - uint32_t params_length) +/* + * With pre-packed data, only the opcode differes from V5 and V6. + * Use q6common_pack_pp_params to pack the data correctly. + */ +int adm_set_pp_params(int port_id, int copp_idx, + struct mem_mapping_hdr *mem_hdr, u8 *param_data, + u32 param_size) { - struct adm_cmd_set_pp_params_v5 *adm_params = NULL; - int sz, rc = 0; - int port_idx; + struct adm_cmd_set_pp_params *adm_set_params = NULL; + int size = sizeof(struct adm_cmd_set_pp_params); + int port_idx = 0; + atomic_t *copp_stat = NULL; + int ret = 0; - pr_debug("%s:\n", __func__); port_id = afe_convert_virtual_to_portid(port_id); port_idx = adm_validate_and_get_port_index(port_id); - if (port_idx < 0) { - pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id); + if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) { + pr_err("%s: Invalid port_idx 0x%x\n", __func__, port_idx); + return -EINVAL; + } else if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) { + pr_err("%s: Invalid copp_idx 0x%x\n", __func__, copp_idx); return -EINVAL; } - sz = sizeof(struct adm_cmd_set_pp_params_v5) + params_length; - adm_params = kzalloc(sz, GFP_KERNEL); - if (!adm_params) { - pr_err("%s, adm params memory alloc failed", __func__); + /* Only add params_size in inband case */ + if (param_data != NULL) + size += param_size; + adm_set_params = kzalloc(size, GFP_KERNEL); + if (!adm_set_params) return -ENOMEM; - } - memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)), - params, params_length); - adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - adm_params->hdr.pkt_size = sz; - adm_params->hdr.src_svc = APR_SVC_ADM; - adm_params->hdr.src_domain = APR_DOMAIN_APPS; - adm_params->hdr.src_port = port_id; - adm_params->hdr.dest_svc = APR_SVC_ADM; - adm_params->hdr.dest_domain = APR_DOMAIN_ADSP; - adm_params->hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - adm_params->hdr.token = port_idx << 16 | copp_idx; - adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; - adm_params->payload_addr_lsw = 0; - adm_params->payload_addr_msw = 0; - adm_params->mem_map_handle = 0; - adm_params->payload_size = params_length; + adm_set_params->apr_hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + adm_set_params->apr_hdr.pkt_size = size; + adm_set_params->apr_hdr.src_svc = APR_SVC_ADM; + adm_set_params->apr_hdr.src_domain = APR_DOMAIN_APPS; + adm_set_params->apr_hdr.src_port = port_id; + adm_set_params->apr_hdr.dest_svc = APR_SVC_ADM; + adm_set_params->apr_hdr.dest_domain = APR_DOMAIN_ADSP; + adm_set_params->apr_hdr.dest_port = + atomic_read(&this_adm.copp.id[port_idx][copp_idx]); + adm_set_params->apr_hdr.token = port_idx << 16 | copp_idx; + + if (q6common_is_instance_id_supported()) + adm_set_params->apr_hdr.opcode = ADM_CMD_SET_PP_PARAMS_V6; + else + adm_set_params->apr_hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; + + adm_set_params->payload_size = param_size; + + if (mem_hdr != NULL) { + /* Out of Band Case */ + adm_set_params->mem_hdr = *mem_hdr; + } else if (param_data != NULL) { + /* In band case. Parameter data must be pre-packed with its + * header before calling this function. Use + * q6common_pack_pp_params to pack parameter data and header + * correctly. + */ + memcpy(&adm_set_params->param_data, param_data, param_size); + } else { + pr_err("%s: Received NULL pointers for both memory header and param data\n", + __func__); + ret = -EINVAL; + goto done; + } - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); - rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params); - if (rc < 0) { - pr_err("%s: Set params failed port = 0x%x rc %d\n", - __func__, port_id, rc); - rc = -EINVAL; - goto dolby_dap_send_param_return; + copp_stat = &this_adm.copp.stat[port_idx][copp_idx]; + atomic_set(copp_stat, -1); + ret = apr_send_pkt(this_adm.apr, (uint32_t *) adm_set_params); + if (ret < 0) { + pr_err("%s: Set params APR send failed port = 0x%x ret %d\n", + __func__, port_id, ret); + goto done; } - /* Wait for the callback */ - rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!rc) { - pr_err("%s: Set params timed out port = 0x%x\n", - __func__, port_id); - rc = -EINVAL; - goto dolby_dap_send_param_return; - } else if (atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - rc = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx])); - goto dolby_dap_send_param_return; + ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], + atomic_read(copp_stat) >= 0, + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_err("%s: Set params timed out port = 0x%x\n", __func__, + port_id); + ret = -ETIMEDOUT; + goto done; } - rc = 0; -dolby_dap_send_param_return: - kfree(adm_params); - return rc; + if (atomic_read(copp_stat) > 0) { + pr_err("%s: DSP returned error[%s]\n", __func__, + adsp_err_get_err_str(atomic_read(copp_stat))); + ret = adsp_err_get_lnx_err_code(atomic_read(copp_stat)); + goto done; + } + + ret = 0; +done: + kfree(adm_set_params); + return ret; } +EXPORT_SYMBOL(adm_set_pp_params); -int adm_send_params_v5(int port_id, int copp_idx, char *params, - uint32_t params_length) +int adm_pack_and_set_one_pp_param(int port_id, int copp_idx, + struct param_hdr_v3 param_hdr, u8 *param_data) { - struct adm_cmd_set_pp_params_v5 *adm_params = NULL; - int rc = 0; - int sz, port_idx; - - pr_debug("%s:\n", __func__); - port_id = afe_convert_virtual_to_portid(port_id); - port_idx = adm_validate_and_get_port_index(port_id); - if (port_idx < 0) { - pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id); - return -EINVAL; - } + u8 *packed_data = NULL; + u32 total_size = 0; + int ret = 0; - sz = sizeof(struct adm_cmd_set_pp_params_v5) + params_length; - adm_params = kzalloc(sz, GFP_KERNEL); - if (!adm_params) { - pr_err("%s, adm params memory alloc failed", __func__); + total_size = sizeof(union param_hdrs) + param_hdr.param_size; + packed_data = kzalloc(total_size, GFP_KERNEL); + if (!packed_data) return -ENOMEM; - } - memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)), - params, params_length); - adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - adm_params->hdr.pkt_size = sz; - adm_params->hdr.src_svc = APR_SVC_ADM; - adm_params->hdr.src_domain = APR_DOMAIN_APPS; - adm_params->hdr.src_port = port_id; - adm_params->hdr.dest_svc = APR_SVC_ADM; - adm_params->hdr.dest_domain = APR_DOMAIN_ADSP; - adm_params->hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - adm_params->hdr.token = port_idx << 16 | copp_idx; - adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; - adm_params->payload_addr_lsw = 0; - adm_params->payload_addr_msw = 0; - adm_params->mem_map_handle = 0; - adm_params->payload_size = params_length; - - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); - rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params); - if (rc < 0) { - pr_err("%s: Set params failed port = 0x%x rc %d\n", - __func__, port_id, rc); - rc = -EINVAL; - goto send_param_return; - } - /* Wait for the callback */ - rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!rc) { - pr_err("%s: Set params timed out port = 0x%x\n", - __func__, port_id); - rc = -EINVAL; - goto send_param_return; - } else if (atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - rc = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx])); - goto send_param_return; + ret = q6common_pack_pp_params(packed_data, ¶m_hdr, param_data, + &total_size); + if (ret) { + pr_err("%s: Failed to pack parameter data, error %d\n", + __func__, ret); + goto done; } - rc = 0; -send_param_return: - kfree(adm_params); - return rc; + + ret = adm_set_pp_params(port_id, copp_idx, NULL, packed_data, + total_size); + if (ret) + pr_err("%s: Failed to set parameter data, error %d\n", __func__, + ret); +done: + kfree(packed_data); + return ret; } +EXPORT_SYMBOL(adm_pack_and_set_one_pp_param); -int adm_get_params_v2(int port_id, int copp_idx, uint32_t module_id, - uint32_t param_id, uint32_t params_length, - char *params, uint32_t client_id) +/* + * Only one parameter can be requested at a time. Therefore, packing and sending + * the request can be handled locally. + */ +int adm_get_pp_params(int port_id, int copp_idx, uint32_t client_id, + struct mem_mapping_hdr *mem_hdr, + struct param_hdr_v3 *param_hdr, u8 *returned_param_data) { - struct adm_cmd_get_pp_params_v5 *adm_params = NULL; - int rc = 0, i = 0; - int port_idx, idx; - int *params_data = (int *)params; - uint64_t sz = 0; + struct adm_cmd_get_pp_params adm_get_params; + int total_size = 0; + int get_param_array_sz = ARRAY_SIZE(adm_get_parameters); + int returned_param_size = 0; + int returned_param_size_in_bytes = 0; + int port_idx = 0; + int idx = 0; + atomic_t *copp_stat = NULL; + int ret = 0; - port_id = afe_convert_virtual_to_portid(port_id); - port_idx = adm_validate_and_get_port_index(port_id); - if (port_idx < 0) { - pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id); + if (param_hdr == NULL) { + pr_err("%s: Received NULL pointer for parameter header\n", + __func__); return -EINVAL; } - sz = (uint64_t)sizeof(struct adm_cmd_get_pp_params_v5) + - (uint64_t)params_length; - /* - * Check if the value of "sz" (which is ultimately assigned to - * "hdr.pkt_size") crosses U16_MAX. - */ - if (sz > U16_MAX) { - pr_err("%s: Invalid params_length\n", __func__); + port_id = afe_convert_virtual_to_portid(port_id); + port_idx = adm_validate_and_get_port_index(port_id); + if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) { + pr_err("%s: Invalid port_idx 0x%x\n", __func__, port_idx); return -EINVAL; } - adm_params = kzalloc(sz, GFP_KERNEL); - if (!adm_params) { - pr_err("%s: adm params memory alloc failed", __func__); - return -ENOMEM; + if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) { + pr_err("%s: Invalid copp_idx 0x%x\n", __func__, copp_idx); + return -EINVAL; } - memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_get_pp_params_v5)), - params, params_length); - adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - adm_params->hdr.pkt_size = sz; - adm_params->hdr.src_svc = APR_SVC_ADM; - adm_params->hdr.src_domain = APR_DOMAIN_APPS; - adm_params->hdr.src_port = port_id; - adm_params->hdr.dest_svc = APR_SVC_ADM; - adm_params->hdr.dest_domain = APR_DOMAIN_ADSP; - adm_params->hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - adm_params->hdr.token = port_idx << 16 | client_id << 8 | copp_idx; - adm_params->hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5; - adm_params->data_payload_addr_lsw = 0; - adm_params->data_payload_addr_msw = 0; - adm_params->mem_map_handle = 0; - adm_params->module_id = module_id; - adm_params->param_id = param_id; - adm_params->param_max_size = params_length; - adm_params->reserved = 0; + memset(&adm_get_params, 0, sizeof(adm_get_params)); - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); - rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params); - if (rc < 0) { - pr_err("%s: Failed to Get Params on port_id 0x%x %d\n", - __func__, port_id, rc); - rc = -EINVAL; - goto adm_get_param_return; + if (mem_hdr != NULL) + adm_get_params.mem_hdr = *mem_hdr; + + q6common_pack_pp_params((u8 *) &adm_get_params.param_hdr, param_hdr, + NULL, &total_size); + + /* Pack APR header after filling body so total_size has correct value */ + adm_get_params.apr_hdr.pkt_size = total_size; + adm_get_params.apr_hdr.src_svc = APR_SVC_ADM; + adm_get_params.apr_hdr.src_domain = APR_DOMAIN_APPS; + adm_get_params.apr_hdr.src_port = port_id; + adm_get_params.apr_hdr.dest_svc = APR_SVC_ADM; + adm_get_params.apr_hdr.dest_domain = APR_DOMAIN_ADSP; + adm_get_params.apr_hdr.dest_port = + atomic_read(&this_adm.copp.id[port_idx][copp_idx]); + adm_get_params.apr_hdr.token = + port_idx << 16 | client_id << 8 | copp_idx; + + if (q6common_is_instance_id_supported()) + adm_get_params.apr_hdr.opcode = ADM_CMD_GET_PP_PARAMS_V6; + else + adm_get_params.apr_hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5; + + copp_stat = &this_adm.copp.stat[port_idx][copp_idx]; + atomic_set(copp_stat, -1); + ret = apr_send_pkt(this_adm.apr, (uint32_t *) &adm_get_params); + if (ret) { + pr_err("%s: Get params APR send failed port = 0x%x ret %d\n", + __func__, port_id, ret); + ret = -EINVAL; + goto done; } - /* Wait for the callback with copp id */ - rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!rc) { - pr_err("%s: get params timed out port_id = 0x%x\n", __func__, - port_id); - rc = -EINVAL; - goto adm_get_param_return; - } else if (atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - rc = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx])); - goto adm_get_param_return; + ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], + atomic_read(copp_stat) >= 0, + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_err("%s: Get params timed out port = 0x%x\n", __func__, + port_id); + ret = -ETIMEDOUT; + goto done; + } + if (atomic_read(copp_stat) > 0) { + pr_err("%s: DSP returned error[%s]\n", __func__, + adsp_err_get_err_str(atomic_read(copp_stat))); + ret = adsp_err_get_lnx_err_code(atomic_read(copp_stat)); + goto done; } - idx = ADM_GET_PARAMETER_LENGTH * copp_idx; - if (adm_get_parameters[idx] < 0) { - pr_err("%s: Size is invalid %d\n", __func__, - adm_get_parameters[idx]); - rc = -EINVAL; - goto adm_get_param_return; - } - if ((params_data) && - (ARRAY_SIZE(adm_get_parameters) > - idx) && - (ARRAY_SIZE(adm_get_parameters) >= - 1+adm_get_parameters[idx]+idx) && - (params_length/sizeof(uint32_t) >= - adm_get_parameters[idx])) { - for (i = 0; i < adm_get_parameters[idx]; i++) - params_data[i] = adm_get_parameters[1+i+idx]; + ret = 0; - } else { - pr_err("%s: Get param data not copied! get_param array size %zd, index %d, params array size %zd, index %d\n", - __func__, ARRAY_SIZE(adm_get_parameters), - (1+adm_get_parameters[idx]+idx), - params_length/sizeof(int), - adm_get_parameters[idx]); + /* Copy data to caller if sent in band */ + if (!returned_param_data) { + pr_debug("%s: Received NULL pointer for param destination, not copying payload\n", + __func__); + return 0; } - rc = 0; -adm_get_param_return: - kfree(adm_params); - return rc; -} + idx = ADM_GET_PARAMETER_LENGTH * copp_idx; + returned_param_size = adm_get_parameters[idx]; + if (returned_param_size < 0 || + returned_param_size + idx + 1 > get_param_array_sz) { + pr_err("%s: Invalid parameter size %d\n", __func__, + returned_param_size); + return -EINVAL; + } -int adm_get_params(int port_id, int copp_idx, uint32_t module_id, - uint32_t param_id, uint32_t params_length, char *params) -{ - return adm_get_params_v2(port_id, copp_idx, module_id, param_id, - params_length, params, 0); + returned_param_size_in_bytes = returned_param_size * sizeof(uint32_t); + if (param_hdr->param_size < returned_param_size_in_bytes) { + pr_err("%s: Provided buffer is not big enough, provided buffer size(%d) size needed(%d)\n", + __func__, param_hdr->param_size, + returned_param_size_in_bytes); + return -EINVAL; + } + + memcpy(returned_param_data, &adm_get_parameters[idx + 1], + returned_param_size_in_bytes); +done: + return ret; } +EXPORT_SYMBOL(adm_get_pp_params); -int adm_get_pp_topo_module_list(int port_id, int copp_idx, int32_t param_length, - char *params) +int adm_get_pp_topo_module_list_v2(int port_id, int copp_idx, + int32_t param_length, + int32_t *returned_params) { - struct adm_cmd_get_pp_topo_module_list_t *adm_pp_module_list = NULL; - int sz, rc = 0, i = 0; - int port_idx, idx; - int32_t *params_data = (int32_t *)params; + struct adm_cmd_get_pp_topo_module_list adm_get_module_list; + bool iid_supported = q6common_is_instance_id_supported(); int *topo_list; + int num_modules = 0; + int list_size = 0; + int port_idx, idx; + int i = 0; + atomic_t *copp_stat = NULL; + int ret = 0; pr_debug("%s : port_id %x", __func__, port_id); port_id = afe_convert_virtual_to_portid(port_id); @@ -1149,86 +979,102 @@ int adm_get_pp_topo_module_list(int port_id, int copp_idx, int32_t param_length, pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id); return -EINVAL; } - if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) { pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx); return -EINVAL; } - sz = sizeof(struct adm_cmd_get_pp_topo_module_list_t) + param_length; - adm_pp_module_list = kzalloc(sz, GFP_KERNEL); - if (!adm_pp_module_list) { - pr_err("%s, adm params memory alloc failed", __func__); - return -ENOMEM; - } + memset(&adm_get_module_list, 0, sizeof(adm_get_module_list)); - memcpy(((u8 *)adm_pp_module_list + - sizeof(struct adm_cmd_get_pp_topo_module_list_t)), - params, param_length); - adm_pp_module_list->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - adm_pp_module_list->hdr.pkt_size = sz; - adm_pp_module_list->hdr.src_svc = APR_SVC_ADM; - adm_pp_module_list->hdr.src_domain = APR_DOMAIN_APPS; - adm_pp_module_list->hdr.src_port = port_id; - adm_pp_module_list->hdr.dest_svc = APR_SVC_ADM; - adm_pp_module_list->hdr.dest_domain = APR_DOMAIN_ADSP; - adm_pp_module_list->hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - adm_pp_module_list->hdr.token = port_idx << 16 | copp_idx; - adm_pp_module_list->hdr.opcode = ADM_CMD_GET_PP_TOPO_MODULE_LIST; - adm_pp_module_list->param_max_size = param_length; - /* Payload address and mmap handle set to zero by kzalloc */ + adm_get_module_list.apr_hdr.pkt_size = sizeof(adm_get_module_list); + adm_get_module_list.apr_hdr.src_svc = APR_SVC_ADM; + adm_get_module_list.apr_hdr.src_domain = APR_DOMAIN_APPS; + adm_get_module_list.apr_hdr.src_port = port_id; + adm_get_module_list.apr_hdr.dest_svc = APR_SVC_ADM; + adm_get_module_list.apr_hdr.dest_domain = APR_DOMAIN_ADSP; + adm_get_module_list.apr_hdr.dest_port = + atomic_read(&this_adm.copp.id[port_idx][copp_idx]); + adm_get_module_list.apr_hdr.token = port_idx << 16 | copp_idx; + /* + * Out of band functionality is not currently utilized. + * Assume in band. + */ + if (iid_supported) { + adm_get_module_list.apr_hdr.opcode = + ADM_CMD_GET_PP_TOPO_MODULE_LIST_V2; + adm_get_module_list.param_max_size = param_length; + } else { + adm_get_module_list.apr_hdr.opcode = + ADM_CMD_GET_PP_TOPO_MODULE_LIST; - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); + if (param_length > U16_MAX) { + pr_err("%s: Invalid param length for V1 %d\n", __func__, + param_length); + return -EINVAL; + } + adm_get_module_list.param_max_size = param_length << 16; + } - rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_pp_module_list); - if (rc < 0) { - pr_err("%s: Failed to Get Params on port %d\n", __func__, - port_id); - rc = -EINVAL; - goto adm_pp_module_list_l; + copp_stat = &this_adm.copp.stat[port_idx][copp_idx]; + atomic_set(copp_stat, -1); + ret = apr_send_pkt(this_adm.apr, (uint32_t *) &adm_get_module_list); + if (ret) { + pr_err("%s: APR send pkt failed for port_id: 0x%x failed ret %d\n", + __func__, port_id, ret); + ret = -EINVAL; + goto done; } - /* Wait for the callback with copp id */ - rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!rc) { - pr_err("%s: get params timed out port = %d\n", __func__, - port_id); - rc = -EINVAL; - goto adm_pp_module_list_l; - } else if (atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - rc = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx])); - goto adm_pp_module_list_l; - } - if (params_data) { - idx = ADM_GET_TOPO_MODULE_LIST_LENGTH * copp_idx; - topo_list = (int *)(adm_module_topo_list + idx); - if (param_length <= ADM_GET_TOPO_MODULE_LIST_LENGTH && - idx < - (MAX_COPPS_PER_PORT * ADM_GET_TOPO_MODULE_LIST_LENGTH)) - memcpy(params_data, topo_list, param_length); - else - pr_debug("%s: i/p size:%d > MAX param size:%d\n", - __func__, param_length, - (int)ADM_GET_TOPO_MODULE_LIST_LENGTH); - for (i = 1; i <= params_data[0]; i++) - pr_debug("module = 0x%x\n", params_data[i]); + ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], + atomic_read(copp_stat) >= 0, + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_err("%s: Timeout for port_id: 0x%x\n", __func__, port_id); + ret = -ETIMEDOUT; + goto done; } - rc = 0; -adm_pp_module_list_l: - kfree(adm_pp_module_list); - pr_debug("%s : rc = %d ", __func__, rc); - return rc; + if (atomic_read(copp_stat) > 0) { + pr_err("%s: DSP returned error[%s]\n", __func__, + adsp_err_get_err_str(atomic_read(copp_stat))); + ret = adsp_err_get_lnx_err_code(atomic_read(copp_stat)); + goto done; + } + + ret = 0; + + if (returned_params) { + /* + * When processing ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST IID is + * added since it is not present. Therefore, there is no need to + * do anything different if IID is not supported here as it is + * already taken care of. + */ + idx = ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH * copp_idx; + num_modules = adm_module_topo_list[idx]; + if (num_modules < 0 || num_modules > MAX_MODULES_IN_TOPO) { + pr_err("%s: Invalid number of modules returned %d\n", + __func__, num_modules); + return -EINVAL; + } + + list_size = num_modules * sizeof(struct module_instance_info); + if (param_length < list_size) { + pr_err("%s: Provided buffer not big enough to hold module-instance list, provided size %d, needed size %d\n", + __func__, param_length, list_size); + return -EINVAL; + } + + topo_list = (int32_t *) (&adm_module_topo_list[idx]); + memcpy(returned_params, topo_list, list_size); + for (i = 1; i <= num_modules; i += 2) { + pr_debug("module = 0x%x instance = 0x%x\n", + returned_params[i], returned_params[i + 1]); + } + } +done: + return ret; } +EXPORT_SYMBOL(adm_get_pp_topo_module_list_v2); + static void adm_callback_debug_print(struct apr_client_data *data) { uint32_t *payload; @@ -1288,13 +1134,122 @@ int adm_get_multi_ch_map(char *channel_map, int path) return 0; } +static int adm_process_get_param_response(u32 opcode, u32 idx, u32 *payload, + u32 payload_size) +{ + struct adm_cmd_rsp_get_pp_params_v5 *v5_rsp = NULL; + struct adm_cmd_rsp_get_pp_params_v6 *v6_rsp = NULL; + u32 *param_data = NULL; + int data_size; + int struct_size; + + if (payload == NULL) { + pr_err("%s: Payload is NULL\n", __func__); + return -EINVAL; + } + + switch (opcode) { + case ADM_CMDRSP_GET_PP_PARAMS_V5: + struct_size = sizeof(struct adm_cmd_rsp_get_pp_params_v5); + v5_rsp = (struct adm_cmd_rsp_get_pp_params_v5 *) payload; + data_size = v5_rsp->param_hdr.param_size; + param_data = v5_rsp->param_data; + break; + case ADM_CMDRSP_GET_PP_PARAMS_V6: + struct_size = sizeof(struct adm_cmd_rsp_get_pp_params_v6); + v6_rsp = (struct adm_cmd_rsp_get_pp_params_v6 *) payload; + data_size = v6_rsp->param_hdr.param_size; + param_data = v6_rsp->param_data; + break; + default: + pr_err("%s: Invalid opcode %d\n", __func__, opcode); + return -EINVAL; + } + + /* + * Just store the returned parameter data, not the header. The calling + * function is expected to know what it asked for. Therefore, there is + * no difference between V5 and V6. + */ + if ((payload_size >= struct_size + data_size) && + (ARRAY_SIZE(adm_get_parameters) > idx) && + (ARRAY_SIZE(adm_get_parameters) >= idx + 1 + data_size)) { + /* + * data_size is expressed in number of bytes, store in number of + * ints + */ + adm_get_parameters[idx] = + data_size / sizeof(*adm_get_parameters); + pr_debug("%s: GET_PP PARAM: received parameter length: 0x%x\n", + __func__, adm_get_parameters[idx]); + /* store params after param_size */ + memcpy(&adm_get_parameters[idx + 1], param_data, data_size); + return 0; + } + + pr_err("%s: Invlaid parameter combination, payload_size %d, idx %d\n", + __func__, payload_size, idx); + return -EINVAL; +} + +static int adm_process_get_topo_list_response(u32 opcode, int copp_idx, + u32 num_modules, u32 *payload, + u32 payload_size) +{ + u32 *fill_list = NULL; + int idx = 0; + int i = 0; + int j = 0; + + if (payload == NULL) { + pr_err("%s: Payload is NULL\n", __func__); + return -EINVAL; + } else if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) + pr_err("%s: Invalid COPP index %d\n", __func__, copp_idx); + return -EINVAL; + + idx = ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH * copp_idx; + fill_list = adm_module_topo_list + idx; + *fill_list++ = num_modules; + for (i = 0; i < num_modules; i++) { + if (j > payload_size / sizeof(u32)) { + pr_err("%s: Invalid number of modules specified %d\n", + __func__, num_modules); + return -EINVAL; + } + + /* store module ID */ + *fill_list++ = payload[j]; + j++; + + switch (opcode) { + case ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST_V2: + /* store instance ID */ + *fill_list++ = payload[j]; + j++; + break; + case ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST: + /* Insert IID 0 when repacking */ + *fill_list++ = INSTANCE_ID_0; + break; + default: + pr_err("%s: Invalid opcode %d\n", __func__, opcode); + return -EINVAL; + } + } + + return 0; +} + static int32_t adm_callback(struct apr_client_data *data, void *priv) { uint32_t *payload; int i, j, port_idx, copp_idx, idx, client_id; + int num_modules; + int ret; if (data == NULL) { - pr_err("%s: data paramter is null\n", __func__); + pr_err("%s: data parameter is null\n", __func__); return -EINVAL; } @@ -1312,7 +1267,8 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv) RESET_COPP_ID); atomic_set(&this_adm.copp.cnt[i][j], 0); atomic_set( - &this_adm.copp.topology[i][j], 0); + &this_adm.copp.topology[i][j], + 0); atomic_set(&this_adm.copp.mode[i][j], 0); atomic_set(&this_adm.copp.stat[i][j], @@ -1320,8 +1276,8 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv) atomic_set(&this_adm.copp.rate[i][j], 0); atomic_set( - &this_adm.copp.channels[i][j], - 0); + &this_adm.copp.channels[i][j], + 0); atomic_set( &this_adm.copp.bit_width[i][j], 0); atomic_set( @@ -1392,8 +1348,9 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv) } switch (payload[0]) { case ADM_CMD_SET_PP_PARAMS_V5: - pr_debug("%s: ADM_CMD_SET_PP_PARAMS_V5\n", - __func__); + case ADM_CMD_SET_PP_PARAMS_V6: + pr_debug("%s: ADM_CMD_SET_PP_PARAMS\n", + __func__); if (client_id == ADM_CLIENT_ID_SOURCE_TRACKING) this_adm.sourceTrackingData. apr_cmd_status = payload[1]; @@ -1450,8 +1407,9 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv) } break; case ADM_CMD_GET_PP_PARAMS_V5: - pr_debug("%s: ADM_CMD_GET_PP_PARAMS_V5\n", - __func__); + case ADM_CMD_GET_PP_PARAMS_V6: + pr_debug("%s: ADM_CMD_GET_PP_PARAMS\n", + __func__); /* Should only come here if there is an APR */ /* error or malformed APR packet. Otherwise */ /* response will be returned as */ @@ -1488,11 +1446,12 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv) &this_adm.copp.wait[port_idx][copp_idx]); break; case ADM_CMD_GET_PP_TOPO_MODULE_LIST: + case ADM_CMD_GET_PP_TOPO_MODULE_LIST_V2: pr_debug("%s:ADM_CMD_GET_PP_TOPO_MODULE_LIST\n", __func__); if (payload[1] != 0) - pr_err("%s: ADM get topo list error = %d,\n", - __func__, payload[1]); + pr_err("%s: ADM get topo list error = %d\n", + __func__, payload[1]); break; default: pr_err("%s: Unknown Cmd: 0x%x\n", __func__, @@ -1527,80 +1486,60 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv) } break; case ADM_CMDRSP_GET_PP_PARAMS_V5: - pr_debug("%s: ADM_CMDRSP_GET_PP_PARAMS_V5\n", __func__); - if (payload[0] != 0) - pr_err("%s: ADM_CMDRSP_GET_PP_PARAMS_V5 returned error = 0x%x\n", - __func__, payload[0]); + case ADM_CMDRSP_GET_PP_PARAMS_V6: + pr_debug("%s: ADM_CMDRSP_GET_PP_PARAMS\n", __func__); if (client_id == ADM_CLIENT_ID_SOURCE_TRACKING) this_adm.sourceTrackingData.apr_cmd_status = - payload[0]; + payload[0]; else if (rtac_make_adm_callback(payload, - data->payload_size)) + data->payload_size)) break; idx = ADM_GET_PARAMETER_LENGTH * copp_idx; - if ((payload[0] == 0) && (data->payload_size > - (4 * sizeof(*payload))) && - (data->payload_size - 4 >= - payload[3]) && - (ARRAY_SIZE(adm_get_parameters) > - idx) && - (ARRAY_SIZE(adm_get_parameters)-idx-1 >= - payload[3])) { - adm_get_parameters[idx] = payload[3] / - sizeof(uint32_t); - /* - * payload[3] is param_size which is - * expressed in number of bytes - */ - pr_debug("%s: GET_PP PARAM:received parameter length: 0x%x\n", - __func__, adm_get_parameters[idx]); - /* storing param size then params */ - for (i = 0; i < payload[3] / - sizeof(uint32_t); i++) - adm_get_parameters[idx+1+i] = - payload[4+i]; - } else if (payload[0] == 0) { + if (payload[0] == 0 && data->payload_size > 0) { + pr_debug("%s: Received parameter data in band\n", + __func__); + ret = adm_process_get_param_response( + data->opcode, idx, payload, + data->payload_size); + if (ret) + pr_err("%s: Failed to process get param response, error %d\n", + __func__, ret); + } else if (payload[0] == 0 && data->payload_size == 0) { adm_get_parameters[idx] = -1; - pr_err("%s: Out of band case, setting size to %d\n", + pr_debug("%s: Out of band case, setting size to %d\n", __func__, adm_get_parameters[idx]); } else { adm_get_parameters[idx] = -1; - pr_err("%s: GET_PP_PARAMS failed, setting size to %d\n", - __func__, adm_get_parameters[idx]); + pr_err("%s: ADM_CMDRSP_GET_PP_PARAMS returned error 0x%x\n", + __func__, payload[0]); } - atomic_set(&this_adm.copp.stat - [port_idx][copp_idx], payload[0]); + atomic_set(&this_adm.copp.stat[port_idx][copp_idx], + payload[0]); wake_up(&this_adm.copp.wait[port_idx][copp_idx]); break; case ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST: + case ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST_V2: pr_debug("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST\n", __func__); - if (payload[0] != 0) { - pr_err("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST", - __func__); - pr_err(":err = 0x%x\n", payload[0]); - } else if (payload[1] > - ((ADM_GET_TOPO_MODULE_LIST_LENGTH / - sizeof(uint32_t)) - 1)) { - pr_err("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST", - __func__); - pr_err(":size = %d\n", payload[1]); + num_modules = payload[1]; + pr_debug("%s: Num modules %d\n", __func__, num_modules); + if (payload[0]) { + pr_err("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST, error = %d\n", + __func__, payload[0]); + } else if (num_modules > MAX_MODULES_IN_TOPO) { + pr_err("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST invalid num modules received, num modules = %d\n", + __func__, num_modules); } else { - idx = ADM_GET_TOPO_MODULE_LIST_LENGTH * - copp_idx; - pr_debug("%s:Num modules payload[1] %d\n", - __func__, payload[1]); - adm_module_topo_list[idx] = payload[1]; - for (i = 1; i <= payload[1]; i++) { - adm_module_topo_list[idx+i] = - payload[1+i]; - pr_debug("%s:payload[%d] = %x\n", - __func__, (i+1), payload[1+i]); - } + ret = adm_process_get_topo_list_response( + data->opcode, copp_idx, num_modules, + payload, data->payload_size); + if (ret) + pr_err("%s: Failed to process get topo modules list response, error %d\n", + __func__, ret); } - atomic_set(&this_adm.copp.stat - [port_idx][copp_idx], payload[0]); + atomic_set(&this_adm.copp.stat[port_idx][copp_idx], + payload[0]); wake_up(&this_adm.copp.wait[port_idx][copp_idx]); break; case ADM_CMDRSP_SHARED_MEM_MAP_REGIONS: @@ -1882,21 +1821,16 @@ done: } static int send_adm_cal_block(int port_id, int copp_idx, - struct cal_block_data *cal_block, int perf_mode, - int app_type, int acdb_id, int sample_rate) + struct cal_block_data *cal_block, int perf_mode) { - s32 result = 0; - struct adm_cmd_set_pp_params_v5 adm_params; - int port_idx; + struct mem_mapping_hdr mem_hdr = {0}; + int payload_size = 0; + int port_idx = 0; + int topology; + int result = 0; + + pr_debug("%s: Port id 0x%x,\n", __func__, port_id); - pr_debug("%s: Port id 0x%x sample_rate %d ,\n", __func__, - port_id, sample_rate); - port_id = afe_convert_virtual_to_portid(port_id); - port_idx = adm_validate_and_get_port_index(port_id); - if (port_idx < 0) { - pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id); - return -EINVAL; - } if (!cal_block) { pr_debug("%s: No ADM cal to send for port_id = 0x%x!\n", __func__, port_id); @@ -1904,75 +1838,38 @@ static int send_adm_cal_block(int port_id, int copp_idx, goto done; } if (cal_block->cal_data.size <= 0) { - pr_debug("%s: No ADM cal send for port_id = 0x%x!\n", - __func__, port_id); + pr_debug("%s: No ADM cal sent for port_id = 0x%x!\n", __func__, + port_id); result = -EINVAL; goto done; } + port_id = afe_convert_virtual_to_portid(port_id); + port_idx = adm_validate_and_get_port_index(port_id); + if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) { + pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id); + return -EINVAL; + } else if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) { + pr_err("%s: Invalid copp_idx 0x%x\n", __func__, copp_idx); + return -EINVAL; + } + + topology = atomic_read(&this_adm.copp.topology[port_idx][copp_idx]); if (perf_mode == LEGACY_PCM_MODE && - ((atomic_read(&this_adm.copp.topology[port_idx][copp_idx])) == - DS2_ADM_COPP_TOPOLOGY_ID)) { + topology == DS2_ADM_COPP_TOPOLOGY_ID) { pr_err("%s: perf_mode %d, topology 0x%x\n", __func__, perf_mode, - atomic_read( - &this_adm.copp.topology[port_idx][copp_idx])); + topology); goto done; } - adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(20), APR_PKT_VER); - adm_params.hdr.pkt_size = sizeof(adm_params); - adm_params.hdr.src_svc = APR_SVC_ADM; - adm_params.hdr.src_domain = APR_DOMAIN_APPS; - adm_params.hdr.src_port = port_id; - adm_params.hdr.dest_svc = APR_SVC_ADM; - adm_params.hdr.dest_domain = APR_DOMAIN_ADSP; - - adm_params.hdr.token = port_idx << 16 | copp_idx; - adm_params.hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - adm_params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; - adm_params.payload_addr_lsw = lower_32_bits(cal_block->cal_data.paddr); - adm_params.payload_addr_msw = msm_audio_populate_upper_32_bits( - cal_block->cal_data.paddr); - adm_params.mem_map_handle = cal_block->map_data.q6map_handle; - adm_params.payload_size = cal_block->cal_data.size; + mem_hdr.data_payload_addr_lsw = + lower_32_bits(cal_block->cal_data.paddr); + mem_hdr.data_payload_addr_msw = + msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr); + mem_hdr.mem_map_handle = cal_block->map_data.q6map_handle; + payload_size = cal_block->cal_data.size; - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); - pr_debug("%s: Sending SET_PARAMS payload = 0x%pK, size = %d\n", - __func__, &cal_block->cal_data.paddr, - adm_params.payload_size); - result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params); - if (result < 0) { - pr_err("%s: Set params failed port 0x%x result %d\n", - __func__, port_id, result); - pr_debug("%s: Set params failed port = 0x%x payload = 0x%pK result %d\n", - __func__, port_id, &cal_block->cal_data.paddr, result); - result = -EINVAL; - goto done; - } - /* Wait for the callback */ - result = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!result) { - pr_err("%s: Set params timed out port = 0x%x\n", - __func__, port_id); - pr_debug("%s: Set params timed out port = 0x%x, payload = 0x%pK\n", - __func__, port_id, &cal_block->cal_data.paddr); - result = -EINVAL; - goto done; - } else if (atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - result = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx])); - goto done; - } + adm_set_pp_params(port_id, copp_idx, &mem_hdr, NULL, payload_size); done: return result; @@ -2089,8 +1986,7 @@ static int adm_remap_and_send_cal_block(int cal_index, int port_id, __func__, cal_index); goto done; } - ret = send_adm_cal_block(port_id, copp_idx, cal_block, perf_mode, - app_type, acdb_id, sample_rate); + ret = send_adm_cal_block(port_id, copp_idx, cal_block, perf_mode); if (ret < 0) pr_debug("%s: No cal sent for cal_index %d, port_id = 0x%x! ret %d sample_rate %d\n", __func__, cal_index, port_id, ret, sample_rate); @@ -2600,10 +2496,10 @@ int adm_open(int port_id, int path, int rate, int channel_mode, int topology, void adm_copp_mfc_cfg(int port_id, int copp_idx, int dst_sample_rate) { - struct audproc_mfc_output_media_fmt mfc_cfg; + struct audproc_mfc_param_media_fmt mfc_cfg = {0}; struct adm_cmd_device_open_v5 open; + struct param_hdr_v3 param_hdr = {0}; int port_idx; - int sz = 0; int rc = 0; int i = 0; @@ -2620,32 +2516,13 @@ void adm_copp_mfc_cfg(int port_id, int copp_idx, int dst_sample_rate) goto fail_cmd; } - sz = sizeof(struct audproc_mfc_output_media_fmt); + memset(&open, 0, sizeof(open)); + + param_hdr.module_id = AUDPROC_MODULE_ID_MFC; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT; + param_hdr.param_size = sizeof(mfc_cfg); - mfc_cfg.params.hdr.hdr_field = - APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - mfc_cfg.params.hdr.pkt_size = sz; - mfc_cfg.params.hdr.src_svc = APR_SVC_ADM; - mfc_cfg.params.hdr.src_domain = APR_DOMAIN_APPS; - mfc_cfg.params.hdr.src_port = port_id; - mfc_cfg.params.hdr.dest_svc = APR_SVC_ADM; - mfc_cfg.params.hdr.dest_domain = APR_DOMAIN_ADSP; - mfc_cfg.params.hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - mfc_cfg.params.hdr.token = port_idx << 16 | copp_idx; - mfc_cfg.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; - mfc_cfg.params.payload_addr_lsw = 0; - mfc_cfg.params.payload_addr_msw = 0; - mfc_cfg.params.mem_map_handle = 0; - mfc_cfg.params.payload_size = sizeof(mfc_cfg) - - sizeof(mfc_cfg.params); - mfc_cfg.data.module_id = AUDPROC_MODULE_ID_MFC; - mfc_cfg.data.param_id = - AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT; - mfc_cfg.data.param_size = mfc_cfg.params.payload_size - - sizeof(mfc_cfg.data); - mfc_cfg.data.reserved = 0; mfc_cfg.sampling_rate = dst_sample_rate; mfc_cfg.bits_per_sample = atomic_read(&this_adm.copp.bit_width[port_idx][copp_idx]); @@ -2671,31 +2548,12 @@ void adm_copp_mfc_cfg(int port_id, int copp_idx, int dst_sample_rate) mfc_cfg.bits_per_sample, mfc_cfg.num_channels, mfc_cfg.sampling_rate); - rc = apr_send_pkt(this_adm.apr, (uint32_t *)&mfc_cfg); + rc = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr, + (uint8_t *) &mfc_cfg); + if (rc) + pr_err("%s: Failed to set media format configuration data, err %d\n", + __func__, rc); - if (rc < 0) { - pr_err("%s: port_id: for[0x%x] failed %d\n", - __func__, port_id, rc); - goto fail_cmd; - } - /* Wait for the callback with copp id */ - rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!rc) { - pr_err("%s: mfc_cfg Set params timed out for port_id: for [0x%x]\n", - __func__, port_id); - goto fail_cmd; - } else if (atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - goto fail_cmd; - } - rc = 0; fail_cmd: return; } @@ -3545,134 +3403,43 @@ err: int adm_set_volume(int port_id, int copp_idx, int volume) { - struct audproc_volume_ctrl_master_gain audproc_vol; - int sz = 0; + struct audproc_volume_ctrl_master_gain audproc_vol = {0}; + struct param_hdr_v3 param_hdr = {0}; int rc = 0; - int port_idx; pr_debug("%s: port_id %d, volume %d\n", __func__, port_id, volume); - port_id = afe_convert_virtual_to_portid(port_id); - port_idx = adm_validate_and_get_port_index(port_id); - if (port_idx < 0) { - pr_err("%s: Invalid port_id %#x\n", __func__, port_id); - rc = -EINVAL; - goto fail_cmd; - } - if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) { - pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx); - return -EINVAL; - } + param_hdr.module_id = AUDPROC_MODULE_ID_VOL_CTRL; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AUDPROC_PARAM_ID_VOL_CTRL_MASTER_GAIN; + param_hdr.param_size = sizeof(audproc_vol); - sz = sizeof(struct audproc_volume_ctrl_master_gain); - audproc_vol.params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - audproc_vol.params.hdr.pkt_size = sz; - audproc_vol.params.hdr.src_svc = APR_SVC_ADM; - audproc_vol.params.hdr.src_domain = APR_DOMAIN_APPS; - audproc_vol.params.hdr.src_port = port_id; - audproc_vol.params.hdr.dest_svc = APR_SVC_ADM; - audproc_vol.params.hdr.dest_domain = APR_DOMAIN_ADSP; - audproc_vol.params.hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - audproc_vol.params.hdr.token = port_idx << 16 | copp_idx; - audproc_vol.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; - audproc_vol.params.payload_addr_lsw = 0; - audproc_vol.params.payload_addr_msw = 0; - audproc_vol.params.mem_map_handle = 0; - audproc_vol.params.payload_size = sizeof(audproc_vol) - - sizeof(audproc_vol.params); - audproc_vol.data.module_id = AUDPROC_MODULE_ID_VOL_CTRL; - audproc_vol.data.param_id = AUDPROC_PARAM_ID_VOL_CTRL_MASTER_GAIN; - audproc_vol.data.param_size = audproc_vol.params.payload_size - - sizeof(audproc_vol.data); - audproc_vol.data.reserved = 0; audproc_vol.master_gain = volume; - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); - rc = apr_send_pkt(this_adm.apr, (uint32_t *)&audproc_vol); - if (rc < 0) { - pr_err("%s: Set params failed port = %#x\n", - __func__, port_id); - rc = -EINVAL; - goto fail_cmd; - } - /* Wait for the callback */ - rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!rc) { - pr_err("%s: Vol cntrl Set params timed out port = %#x\n", - __func__, port_id); - rc = -EINVAL; - goto fail_cmd; - } else if (atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - rc = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx])); - goto fail_cmd; - } - rc = 0; -fail_cmd: + rc = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr, + (uint8_t *) &audproc_vol); + if (rc) + pr_err("%s: Failed to set volume, err %d\n", __func__, rc); + return rc; } int adm_set_softvolume(int port_id, int copp_idx, struct audproc_softvolume_params *softvol_param) { - struct audproc_soft_step_volume_params audproc_softvol; - int sz = 0; + struct audproc_soft_step_volume_params audproc_softvol = {0}; + struct param_hdr_v3 param_hdr = {0}; int rc = 0; - int port_idx; pr_debug("%s: period %d step %d curve %d\n", __func__, softvol_param->period, softvol_param->step, softvol_param->rampingcurve); - port_id = afe_convert_virtual_to_portid(port_id); - port_idx = adm_validate_and_get_port_index(port_id); - if (port_idx < 0) { - pr_err("%s: Invalid port_id %#x\n", __func__, port_id); - rc = -EINVAL; - goto fail_cmd; - } + param_hdr.module_id = AUDPROC_MODULE_ID_VOL_CTRL; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AUDPROC_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS; + param_hdr.param_size = sizeof(audproc_softvol); - if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) { - pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx); - return -EINVAL; - } - - sz = sizeof(struct audproc_soft_step_volume_params); - - audproc_softvol.params.hdr.hdr_field = - APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - audproc_softvol.params.hdr.pkt_size = sz; - audproc_softvol.params.hdr.src_svc = APR_SVC_ADM; - audproc_softvol.params.hdr.src_domain = APR_DOMAIN_APPS; - audproc_softvol.params.hdr.src_port = port_id; - audproc_softvol.params.hdr.dest_svc = APR_SVC_ADM; - audproc_softvol.params.hdr.dest_domain = APR_DOMAIN_ADSP; - audproc_softvol.params.hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - audproc_softvol.params.hdr.token = port_idx << 16 | copp_idx; - audproc_softvol.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; - audproc_softvol.params.payload_addr_lsw = 0; - audproc_softvol.params.payload_addr_msw = 0; - audproc_softvol.params.mem_map_handle = 0; - audproc_softvol.params.payload_size = sizeof(audproc_softvol) - - sizeof(audproc_softvol.params); - audproc_softvol.data.module_id = AUDPROC_MODULE_ID_VOL_CTRL; - audproc_softvol.data.param_id = - AUDPROC_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS; - audproc_softvol.data.param_size = audproc_softvol.params.payload_size - - sizeof(audproc_softvol.data); - audproc_softvol.data.reserved = 0; audproc_softvol.period = softvol_param->period; audproc_softvol.step = softvol_param->step; audproc_softvol.ramping_curve = softvol_param->rampingcurve; @@ -3681,315 +3448,122 @@ int adm_set_softvolume(int port_id, int copp_idx, audproc_softvol.period, audproc_softvol.step, audproc_softvol.ramping_curve); - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); - rc = apr_send_pkt(this_adm.apr, (uint32_t *)&audproc_softvol); - if (rc < 0) { - pr_err("%s: Set params failed port = %#x\n", - __func__, port_id); - rc = -EINVAL; - goto fail_cmd; - } - /* Wait for the callback */ - rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!rc) { - pr_err("%s: Soft volume Set params timed out port = %#x\n", - __func__, port_id); - rc = -EINVAL; - goto fail_cmd; - } else if (atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - rc = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx])); - goto fail_cmd; - } - rc = 0; -fail_cmd: + rc = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr, + (uint8_t *) &audproc_softvol); + if (rc) + pr_err("%s: Failed to set soft volume, err %d\n", __func__, rc); + return rc; } int adm_set_mic_gain(int port_id, int copp_idx, int volume) { - struct adm_set_mic_gain_params mic_gain_params; + struct admx_mic_gain mic_gain_params = {0}; + struct param_hdr_v3 param_hdr = {0}; int rc = 0; - int sz, port_idx; - pr_debug("%s:\n", __func__); - port_id = afe_convert_virtual_to_portid(port_id); - port_idx = adm_validate_and_get_port_index(port_id); - if (port_idx < 0) { - pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id); - return -EINVAL; - } + pr_debug("%s: Setting mic gain to %d at port_id 0x%x\n", __func__, + volume, port_id); - sz = sizeof(struct adm_set_mic_gain_params); + param_hdr.module_id = ADM_MODULE_IDX_MIC_GAIN_CTRL; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = ADM_PARAM_IDX_MIC_GAIN; + param_hdr.param_size = sizeof(mic_gain_params); - mic_gain_params.params.hdr.hdr_field = - APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - mic_gain_params.params.hdr.pkt_size = sz; - mic_gain_params.params.hdr.src_svc = APR_SVC_ADM; - mic_gain_params.params.hdr.src_domain = APR_DOMAIN_APPS; - mic_gain_params.params.hdr.src_port = port_id; - mic_gain_params.params.hdr.dest_svc = APR_SVC_ADM; - mic_gain_params.params.hdr.dest_domain = APR_DOMAIN_ADSP; - mic_gain_params.params.hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - mic_gain_params.params.hdr.token = port_idx << 16 | copp_idx; - mic_gain_params.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; - mic_gain_params.params.payload_addr_lsw = 0; - mic_gain_params.params.payload_addr_msw = 0; - mic_gain_params.params.mem_map_handle = 0; - mic_gain_params.params.payload_size = - sizeof(struct adm_param_data_v5) + - sizeof(struct admx_mic_gain); - mic_gain_params.data.module_id = ADM_MODULE_IDX_MIC_GAIN_CTRL; - mic_gain_params.data.param_id = ADM_PARAM_IDX_MIC_GAIN; - mic_gain_params.data.param_size = - sizeof(struct admx_mic_gain); - mic_gain_params.data.reserved = 0; - mic_gain_params.mic_gain_data.tx_mic_gain = volume; - mic_gain_params.mic_gain_data.reserved = 0; - pr_debug("%s: Mic Gain set to %d at port_id 0x%x\n", - __func__, volume, port_id); + mic_gain_params.tx_mic_gain = volume; + + rc = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr, + (uint8_t *) &mic_gain_params); + if (rc) + pr_err("%s: Failed to set mic gain, err %d\n", __func__, rc); - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); - rc = apr_send_pkt(this_adm.apr, (uint32_t *)&mic_gain_params); - if (rc < 0) { - pr_err("%s: Set params failed port = %#x\n", - __func__, port_id); - rc = -EINVAL; - goto fail_cmd; - } - /* Wait for the callback */ - rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!rc) { - pr_err("%s: Mic Gain Set params timed out port = %#x\n", - __func__, port_id); - rc = -EINVAL; - goto fail_cmd; - } else if (atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - rc = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx])); - goto fail_cmd; - } - rc = 0; -fail_cmd: return rc; } int adm_send_set_multichannel_ec_primary_mic_ch(int port_id, int copp_idx, int primary_mic_ch) { - struct adm_set_sec_primary_ch_params sec_primary_ch_params; + struct admx_sec_primary_mic_ch sec_primary_ch_params = {0}; + struct param_hdr_v3 param_hdr = {0}; int rc = 0; - int sz, port_idx; pr_debug("%s port_id 0x%x, copp_idx 0x%x, primary_mic_ch %d\n", __func__, port_id, copp_idx, primary_mic_ch); - port_id = afe_convert_virtual_to_portid(port_id); - port_idx = adm_validate_and_get_port_index(port_id); - if (port_idx < 0) { - pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id); - return -EINVAL; - } - if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) { - pr_err("%s: Invalid copp_idx 0x%x\n", __func__, copp_idx); - return -EINVAL; - } + param_hdr.module_id = AUDPROC_MODULE_ID_VOICE_TX_SECNS; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AUDPROC_PARAM_IDX_SEC_PRIMARY_MIC_CH; + param_hdr.param_size = sizeof(sec_primary_ch_params); - sz = sizeof(struct adm_set_sec_primary_ch_params); + sec_primary_ch_params.version = 0; + sec_primary_ch_params.sec_primary_mic_ch = primary_mic_ch; - sec_primary_ch_params.params.hdr.hdr_field = - APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - sec_primary_ch_params.params.hdr.pkt_size = sz; - sec_primary_ch_params.params.hdr.src_svc = APR_SVC_ADM; - sec_primary_ch_params.params.hdr.src_domain = APR_DOMAIN_APPS; - sec_primary_ch_params.params.hdr.src_port = port_id; - sec_primary_ch_params.params.hdr.dest_svc = APR_SVC_ADM; - sec_primary_ch_params.params.hdr.dest_domain = APR_DOMAIN_ADSP; - sec_primary_ch_params.params.hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - sec_primary_ch_params.params.hdr.token = port_idx << 16 | copp_idx; - sec_primary_ch_params.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; - sec_primary_ch_params.params.payload_addr_lsw = 0; - sec_primary_ch_params.params.payload_addr_msw = 0; - sec_primary_ch_params.params.mem_map_handle = 0; - sec_primary_ch_params.params.payload_size = - sizeof(struct adm_param_data_v5) + - sizeof(struct admx_sec_primary_mic_ch); - sec_primary_ch_params.data.module_id = - AUDPROC_MODULE_ID_VOICE_TX_SECNS; - sec_primary_ch_params.data.param_id = - AUDPROC_PARAM_IDX_SEC_PRIMARY_MIC_CH; - sec_primary_ch_params.data.param_size = - sizeof(struct admx_sec_primary_mic_ch); - sec_primary_ch_params.data.reserved = 0; - sec_primary_ch_params.sec_primary_mic_ch_data.version = 0; - sec_primary_ch_params.sec_primary_mic_ch_data.reserved = 0; - sec_primary_ch_params.sec_primary_mic_ch_data.sec_primary_mic_ch = - primary_mic_ch; - sec_primary_ch_params.sec_primary_mic_ch_data.reserved1 = 0; + rc = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr, + (uint8_t *) &sec_primary_ch_params); + if (rc) + pr_err("%s: Failed to set primary mic chanel, err %d\n", + __func__, rc); - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); - rc = apr_send_pkt(this_adm.apr, (uint32_t *)&sec_primary_ch_params); - if (rc < 0) { - pr_err("%s: Set params failed port = %#x\n", - __func__, port_id); - rc = -EINVAL; - goto fail_cmd; - } - /* Wait for the callback */ - rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!rc) { - pr_err("%s: Mic Set params timed out port = %#x\n", - __func__, port_id); - rc = -EINVAL; - goto fail_cmd; - } else if (atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - rc = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx])); - goto fail_cmd; - } - rc = 0; -fail_cmd: return rc; } - -int adm_param_enable(int port_id, int copp_idx, int module_id, int enable) +int adm_param_enable(int port_id, int copp_idx, int module_id, int enable) { - struct audproc_enable_param_t adm_mod_enable; - int sz = 0; - int rc = 0; - int port_idx; + struct module_instance_info mod_inst_info = {0}; - pr_debug("%s port_id %d, module_id 0x%x, enable %d\n", - __func__, port_id, module_id, enable); - port_id = afe_convert_virtual_to_portid(port_id); - port_idx = adm_validate_and_get_port_index(port_id); - if (port_idx < 0) { - pr_err("%s: Invalid port_id %#x\n", __func__, port_id); - rc = -EINVAL; - goto fail_cmd; - } + mod_inst_info.module_id = module_id; + mod_inst_info.instance_id = INSTANCE_ID_0; - if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) { - pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx); + return adm_param_enable_v2(port_id, copp_idx, mod_inst_info, enable); +} + +int adm_param_enable_v2(int port_id, int copp_idx, + struct module_instance_info mod_inst_info, int enable) +{ + uint32_t enable_param; + struct param_hdr_v3 param_hdr = {0}; + int rc = 0; + + if (enable < 0 || enable > 1) { + pr_err("%s: Invalid value for enable %d\n", __func__, enable); return -EINVAL; } - sz = sizeof(struct audproc_enable_param_t); + pr_debug("%s port_id %d, module_id 0x%x, instance_id 0x%x, enable %d\n", + __func__, port_id, mod_inst_info.module_id, + mod_inst_info.instance_id, enable); - adm_mod_enable.pp_params.hdr.hdr_field = - APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - adm_mod_enable.pp_params.hdr.pkt_size = sz; - adm_mod_enable.pp_params.hdr.src_svc = APR_SVC_ADM; - adm_mod_enable.pp_params.hdr.src_domain = APR_DOMAIN_APPS; - adm_mod_enable.pp_params.hdr.src_port = port_id; - adm_mod_enable.pp_params.hdr.dest_svc = APR_SVC_ADM; - adm_mod_enable.pp_params.hdr.dest_domain = APR_DOMAIN_ADSP; - adm_mod_enable.pp_params.hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - adm_mod_enable.pp_params.hdr.token = port_idx << 16 | copp_idx; - adm_mod_enable.pp_params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; - adm_mod_enable.pp_params.payload_addr_lsw = 0; - adm_mod_enable.pp_params.payload_addr_msw = 0; - adm_mod_enable.pp_params.mem_map_handle = 0; - adm_mod_enable.pp_params.payload_size = sizeof(adm_mod_enable) - - sizeof(adm_mod_enable.pp_params) + - sizeof(adm_mod_enable.pp_params.params); - adm_mod_enable.pp_params.params.module_id = module_id; - adm_mod_enable.pp_params.params.param_id = AUDPROC_PARAM_ID_ENABLE; - adm_mod_enable.pp_params.params.param_size = - adm_mod_enable.pp_params.payload_size - - sizeof(adm_mod_enable.pp_params.params); - adm_mod_enable.pp_params.params.reserved = 0; - adm_mod_enable.enable = enable; + param_hdr.module_id = mod_inst_info.module_id; + param_hdr.instance_id = mod_inst_info.instance_id; + param_hdr.param_id = AUDPROC_PARAM_ID_ENABLE; + param_hdr.param_size = sizeof(enable_param); - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); + enable_param = enable; + + rc = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr, + (uint8_t *) &enable_param); + if (rc) + pr_err("%s: Failed to set enable of module(%d) instance(%d) to %d, err %d\n", + __func__, mod_inst_info.module_id, + mod_inst_info.instance_id, enable, rc); - rc = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_mod_enable); - if (rc < 0) { - pr_err("%s: Set params failed port = %#x\n", - __func__, port_id); - rc = -EINVAL; - goto fail_cmd; - } - /* Wait for the callback */ - rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!rc) { - pr_err("%s: module %x enable %d timed out on port = %#x\n", - __func__, module_id, enable, port_id); - rc = -EINVAL; - goto fail_cmd; - } else if (atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - rc = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx])); - goto fail_cmd; - } - rc = 0; -fail_cmd: return rc; } +/* Parameter data must be pre-packed at the specified location with its + * header before calling this function. Use + * q6common_pack_pp_params to pack parameter data and header + * correctly. + */ int adm_send_calibration(int port_id, int copp_idx, int path, int perf_mode, int cal_type, char *params, int size) { - struct adm_cmd_set_pp_params_v5 *adm_params = NULL; - int sz, rc = 0; - int port_idx; + int rc = 0; pr_debug("%s:port_id %d, path %d, perf_mode %d, cal_type %d, size %d\n", __func__, port_id, path, perf_mode, cal_type, size); - port_id = afe_convert_virtual_to_portid(port_id); - port_idx = adm_validate_and_get_port_index(port_id); - if (port_idx < 0) { - pr_err("%s: Invalid port_id %#x\n", __func__, port_id); - rc = -EINVAL; - goto end; - } - - if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) { - pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx); - return -EINVAL; - } - /* Maps audio_dev_ctrl path definition to ACDB definition */ if (get_cal_path(path) != RX_DEVICE) { pr_err("%s: acdb_path %d\n", __func__, path); @@ -3997,64 +3571,9 @@ int adm_send_calibration(int port_id, int copp_idx, int path, int perf_mode, goto end; } - sz = sizeof(struct adm_cmd_set_pp_params_v5) + size; - adm_params = kzalloc(sz, GFP_KERNEL); - if (!adm_params) { - pr_err("%s, adm params memory alloc failed", __func__); - rc = -ENOMEM; - goto end; - } - - memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)), - params, size); - - adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - adm_params->hdr.pkt_size = sz; - adm_params->hdr.src_svc = APR_SVC_ADM; - adm_params->hdr.src_domain = APR_DOMAIN_APPS; - adm_params->hdr.src_port = port_id; - adm_params->hdr.dest_svc = APR_SVC_ADM; - adm_params->hdr.dest_domain = APR_DOMAIN_ADSP; - adm_params->hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - adm_params->hdr.token = port_idx << 16 | copp_idx; - adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; - /* payload address and mmap handle initialized to zero by kzalloc */ - adm_params->payload_size = size; - - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); - rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params); - if (rc < 0) { - pr_err("%s: Set params failed port = %#x\n", - __func__, port_id); - rc = -EINVAL; - goto end; - } - /* Wait for the callback */ - rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!rc) { - pr_err("%s: Set params timed out port = %#x\n", - __func__, port_id); - rc = -EINVAL; - goto end; - } else if (atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - rc = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx])); - goto end; - } - rc = 0; + rc = adm_set_pp_params(port_id, copp_idx, NULL, (u8 *) params, size); end: - kfree(adm_params); return rc; } @@ -4236,155 +3755,52 @@ end: int adm_send_compressed_device_mute(int port_id, int copp_idx, bool mute_on) { - struct adm_set_compressed_device_mute mute_params; + u32 mute_param = mute_on ? 1 : 0; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; - int port_idx; pr_debug("%s port_id: 0x%x, copp_idx %d, mute_on: %d\n", __func__, port_id, copp_idx, mute_on); - port_id = afe_convert_virtual_to_portid(port_id); - port_idx = adm_validate_and_get_port_index(port_id); - if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) { - pr_err("%s: Invalid port_id %#x copp_idx %d\n", - __func__, port_id, copp_idx); - ret = -EINVAL; - goto end; - } - mute_params.command.hdr.hdr_field = - APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - mute_params.command.hdr.pkt_size = - sizeof(struct adm_set_compressed_device_mute); - mute_params.command.hdr.src_svc = APR_SVC_ADM; - mute_params.command.hdr.src_domain = APR_DOMAIN_APPS; - mute_params.command.hdr.src_port = port_id; - mute_params.command.hdr.dest_svc = APR_SVC_ADM; - mute_params.command.hdr.dest_domain = APR_DOMAIN_ADSP; - mute_params.command.hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - mute_params.command.hdr.token = port_idx << 16 | copp_idx; - mute_params.command.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; - mute_params.command.payload_addr_lsw = 0; - mute_params.command.payload_addr_msw = 0; - mute_params.command.mem_map_handle = 0; - mute_params.command.payload_size = sizeof(mute_params) - - sizeof(mute_params.command); - mute_params.params.module_id = AUDPROC_MODULE_ID_COMPRESSED_MUTE; - mute_params.params.param_id = AUDPROC_PARAM_ID_COMPRESSED_MUTE; - mute_params.params.param_size = mute_params.command.payload_size - - sizeof(mute_params.params); - mute_params.params.reserved = 0; - mute_params.mute_on = mute_on; + param_hdr.module_id = AUDPROC_MODULE_ID_COMPRESSED_MUTE; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AUDPROC_PARAM_ID_COMPRESSED_MUTE; + param_hdr.param_size = sizeof(mute_param); - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); - ret = apr_send_pkt(this_adm.apr, (uint32_t *)&mute_params); - if (ret < 0) { - pr_err("%s: device mute for port %d copp %d failed, ret %d\n", - __func__, port_id, copp_idx, ret); - ret = -EINVAL; - goto end; - } + ret = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr, + (uint8_t *) &mute_param); + if (ret) + pr_err("%s: Failed to set mute, err %d\n", __func__, ret); - /* Wait for the callback */ - ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: send device mute for port %d copp %d failed\n", - __func__, port_id, copp_idx); - ret = -EINVAL; - goto end; - } else if (atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx])); - goto end; - } - ret = 0; -end: return ret; } int adm_send_compressed_device_latency(int port_id, int copp_idx, int latency) { - struct adm_set_compressed_device_latency latency_params; - int port_idx; + u32 latency_param; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; pr_debug("%s port_id: 0x%x, copp_idx %d latency: %d\n", __func__, port_id, copp_idx, latency); - port_id = afe_convert_virtual_to_portid(port_id); - port_idx = adm_validate_and_get_port_index(port_id); - if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) { - pr_err("%s: Invalid port_id %#x copp_idx %d\n", - __func__, port_id, copp_idx); - ret = -EINVAL; - goto end; + + if (latency < 0) { + pr_err("%s: Invalid value for latency %d", __func__, latency); + return -EINVAL; } - latency_params.command.hdr.hdr_field = - APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - latency_params.command.hdr.pkt_size = - sizeof(struct adm_set_compressed_device_latency); - latency_params.command.hdr.src_svc = APR_SVC_ADM; - latency_params.command.hdr.src_domain = APR_DOMAIN_APPS; - latency_params.command.hdr.src_port = port_id; - latency_params.command.hdr.dest_svc = APR_SVC_ADM; - latency_params.command.hdr.dest_domain = APR_DOMAIN_ADSP; - latency_params.command.hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - latency_params.command.hdr.token = port_idx << 16 | copp_idx; - latency_params.command.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; - latency_params.command.payload_addr_lsw = 0; - latency_params.command.payload_addr_msw = 0; - latency_params.command.mem_map_handle = 0; - latency_params.command.payload_size = sizeof(latency_params) - - sizeof(latency_params.command); - latency_params.params.module_id = AUDPROC_MODULE_ID_COMPRESSED_LATENCY; - latency_params.params.param_id = AUDPROC_PARAM_ID_COMPRESSED_LATENCY; - latency_params.params.param_size = latency_params.command.payload_size - - sizeof(latency_params.params); - latency_params.params.reserved = 0; - latency_params.latency = latency; + param_hdr.module_id = AUDPROC_MODULE_ID_COMPRESSED_LATENCY; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AUDPROC_PARAM_ID_COMPRESSED_LATENCY; + param_hdr.param_size = sizeof(latency_param); - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); - ret = apr_send_pkt(this_adm.apr, (uint32_t *)&latency_params); - if (ret < 0) { - pr_err("%s: send device latency err %d for port %d copp %d\n", - __func__, port_id, copp_idx, ret); - ret = -EINVAL; - goto end; - } + latency_param = latency; + + ret = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr, + (uint8_t *) &latency_param); + if (ret) + pr_err("%s: Failed to set latency, err %d\n", __func__, ret); - /* Wait for the callback */ - ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: send device latency for port %d failed\n", __func__, - port_id); - ret = -EINVAL; - goto end; - } else if (atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx])); - goto end; - } - ret = 0; -end: return ret; } @@ -4403,9 +3819,10 @@ end: int adm_swap_speaker_channels(int port_id, int copp_idx, int sample_rate, bool spk_swap) { - struct audproc_mfc_output_media_fmt mfc_cfg; + struct audproc_mfc_param_media_fmt mfc_cfg; + struct param_hdr_v3 param_hdr = {0}; uint16_t num_channels; - int port_idx; + int port_idx = 0; int ret = 0; pr_debug("%s: Enter, port_id %d, copp_idx %d\n", @@ -4414,50 +3831,26 @@ int adm_swap_speaker_channels(int port_id, int copp_idx, port_idx = adm_validate_and_get_port_index(port_id); if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) { pr_err("%s: Invalid port_id %#x\n", __func__, port_id); - ret = -EINVAL; - goto done; - } - - if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) { - pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx); - ret = -EINVAL; - goto done; + return -EINVAL; + } else if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) { + pr_err("%s: Invalid copp_idx 0x%x\n", __func__, copp_idx); + return -EINVAL; } - num_channels = atomic_read( - &this_adm.copp.channels[port_idx][copp_idx]); + num_channels = atomic_read(&this_adm.copp.channels[port_idx][copp_idx]); if (num_channels != 2) { pr_debug("%s: Invalid number of channels: %d\n", __func__, num_channels); - ret = -EINVAL; - goto done; + return -EINVAL; } memset(&mfc_cfg, 0, sizeof(mfc_cfg)); - mfc_cfg.params.hdr.hdr_field = - APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - mfc_cfg.params.hdr.pkt_size = - sizeof(mfc_cfg); - mfc_cfg.params.hdr.src_svc = APR_SVC_ADM; - mfc_cfg.params.hdr.src_domain = APR_DOMAIN_APPS; - mfc_cfg.params.hdr.src_port = port_id; - mfc_cfg.params.hdr.dest_svc = APR_SVC_ADM; - mfc_cfg.params.hdr.dest_domain = APR_DOMAIN_ADSP; - mfc_cfg.params.hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - mfc_cfg.params.hdr.token = port_idx << 16 | copp_idx; - mfc_cfg.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; - mfc_cfg.params.payload_addr_lsw = 0; - mfc_cfg.params.payload_addr_msw = 0; - mfc_cfg.params.mem_map_handle = 0; - mfc_cfg.params.payload_size = sizeof(mfc_cfg) - - sizeof(mfc_cfg.params); - mfc_cfg.data.module_id = AUDPROC_MODULE_ID_MFC; - mfc_cfg.data.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT; - mfc_cfg.data.param_size = mfc_cfg.params.payload_size - - sizeof(mfc_cfg.data); - mfc_cfg.data.reserved = 0; + + param_hdr.module_id = AUDPROC_MODULE_ID_MFC; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT; + param_hdr.param_size = sizeof(mfc_cfg); + mfc_cfg.sampling_rate = sample_rate; mfc_cfg.bits_per_sample = atomic_read(&this_adm.copp.bit_width[port_idx][copp_idx]); @@ -4476,153 +3869,56 @@ int adm_swap_speaker_channels(int port_id, int copp_idx, (uint16_t) PCM_CHANNEL_FR; } - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); - pr_debug("%s: mfc config: port_idx %d copp_idx %d copp SR %d copp BW %d copp chan %d\n", - __func__, port_idx, copp_idx, mfc_cfg.sampling_rate, - mfc_cfg.bits_per_sample, mfc_cfg.num_channels); - - ret = apr_send_pkt(this_adm.apr, (uint32_t *)&mfc_cfg); + ret = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr, + (u8 *) &mfc_cfg); if (ret < 0) { - pr_err("%s: port_id: for[0x%x] failed %d\n", - __func__, port_id, ret); - goto done; - } - /* Wait for the callback with copp id */ - ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: mfc_cfg Set params timed out for port_id: for [0x%x]\n", - __func__, port_id); - ret = -ETIMEDOUT; - goto done; - } - - if (atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx]))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [port_idx][copp_idx])); - goto done; + pr_err("%s: Failed to set swap speaker channels on port[0x%x] failed %d\n", + __func__, port_id, ret); + return ret; } pr_debug("%s: mfc_cfg Set params returned success", __func__); - ret = 0; - -done: - return ret; + return 0; } EXPORT_SYMBOL(adm_swap_speaker_channels); int adm_set_sound_focus(int port_id, int copp_idx, struct sound_focus_param soundFocusData) { - struct adm_set_fluence_soundfocus_param soundfocus_params; - int sz = 0; + struct adm_param_fluence_soundfocus_t soundfocus_params; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; - int port_idx; int i; pr_debug("%s: Enter, port_id %d, copp_idx %d\n", __func__, port_id, copp_idx); - port_id = afe_convert_virtual_to_portid(port_id); - port_idx = adm_validate_and_get_port_index(port_id); - if (port_idx < 0) { - pr_err("%s: Invalid port_id %#x\n", __func__, port_id); + param_hdr.module_id = VOICEPROC_MODULE_ID_GENERIC_TX; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = VOICEPROC_PARAM_ID_FLUENCE_SOUNDFOCUS; + param_hdr.param_size = sizeof(soundfocus_params); - ret = -EINVAL; - goto done; - } - - if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) { - pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx); - - ret = -EINVAL; - goto done; - } - - sz = sizeof(struct adm_set_fluence_soundfocus_param); - soundfocus_params.params.hdr.hdr_field = - APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), - APR_PKT_VER); - soundfocus_params.params.hdr.pkt_size = sz; - soundfocus_params.params.hdr.src_svc = APR_SVC_ADM; - soundfocus_params.params.hdr.src_domain = APR_DOMAIN_APPS; - soundfocus_params.params.hdr.src_port = port_id; - soundfocus_params.params.hdr.dest_svc = APR_SVC_ADM; - soundfocus_params.params.hdr.dest_domain = APR_DOMAIN_ADSP; - soundfocus_params.params.hdr.dest_port = - atomic_read(&this_adm.copp.id[port_idx][copp_idx]); - soundfocus_params.params.hdr.token = port_idx << 16 | - ADM_CLIENT_ID_SOURCE_TRACKING << 8 | copp_idx; - soundfocus_params.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5; - soundfocus_params.params.payload_addr_lsw = 0; - soundfocus_params.params.payload_addr_msw = 0; - soundfocus_params.params.mem_map_handle = 0; - soundfocus_params.params.payload_size = sizeof(soundfocus_params) - - sizeof(soundfocus_params.params); - soundfocus_params.data.module_id = VOICEPROC_MODULE_ID_GENERIC_TX; - soundfocus_params.data.param_id = VOICEPROC_PARAM_ID_FLUENCE_SOUNDFOCUS; - soundfocus_params.data.param_size = - soundfocus_params.params.payload_size - - sizeof(soundfocus_params.data); - soundfocus_params.data.reserved = 0; - - memset(&(soundfocus_params.soundfocus_data), 0xFF, - sizeof(struct adm_param_fluence_soundfocus_t)); + memset(&(soundfocus_params), 0xFF, sizeof(soundfocus_params)); for (i = 0; i < MAX_SECTORS; i++) { - soundfocus_params.soundfocus_data.start_angles[i] = + soundfocus_params.start_angles[i] = soundFocusData.start_angle[i]; - soundfocus_params.soundfocus_data.enables[i] = - soundFocusData.enable[i]; + soundfocus_params.enables[i] = soundFocusData.enable[i]; pr_debug("%s: start_angle[%d] = %d\n", __func__, i, soundFocusData.start_angle[i]); pr_debug("%s: enable[%d] = %d\n", __func__, i, soundFocusData.enable[i]); } - soundfocus_params.soundfocus_data.gain_step = - soundFocusData.gain_step; + soundfocus_params.gain_step = soundFocusData.gain_step; pr_debug("%s: gain_step = %d\n", __func__, soundFocusData.gain_step); - soundfocus_params.soundfocus_data.reserved = 0; + soundfocus_params.reserved = 0; - atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); - ret = apr_send_pkt(this_adm.apr, (uint32_t *)&soundfocus_params); - if (ret < 0) { - pr_err("%s: Set params failed\n", __func__); + ret = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr, + (uint8_t *) &soundfocus_params); + if (ret) + pr_err("%s: Failed to set sound focus params, err %d\n", + __func__, ret); - ret = -EINVAL; - goto done; - } - /* Wait for the callback */ - ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], - atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: Set params timed out\n", __func__); - - ret = -EINVAL; - goto done; - } - - if (this_adm.sourceTrackingData.apr_cmd_status != 0) { - pr_err("%s - set params returned error [%s]\n", - __func__, adsp_err_get_err_str( - this_adm.sourceTrackingData.apr_cmd_status)); - - ret = adsp_err_get_lnx_err_code( - this_adm.sourceTrackingData.apr_cmd_status); - goto done; - } - - ret = 0; - -done: pr_debug("%s: Exit, ret=%d\n", __func__, ret); return ret; @@ -4633,30 +3929,28 @@ int adm_get_sound_focus(int port_id, int copp_idx, { int ret = 0, i; char *params_value; - uint32_t param_payload_len = sizeof(struct adm_param_data_v5) + - sizeof(struct adm_param_fluence_soundfocus_t); - struct adm_param_fluence_soundfocus_t *soundfocus_params; + uint32_t max_param_size = 0; + struct adm_param_fluence_soundfocus_t *soundfocus_params = NULL; + struct param_hdr_v3 param_hdr = {0}; pr_debug("%s: Enter, port_id %d, copp_idx %d\n", __func__, port_id, copp_idx); - params_value = kzalloc(param_payload_len, GFP_KERNEL); - if (!params_value) { - pr_err("%s, params memory alloc failed\n", __func__); + max_param_size = sizeof(struct adm_param_fluence_soundfocus_t) + + sizeof(union param_hdrs); + params_value = kzalloc(max_param_size, GFP_KERNEL); + if (!params_value) + return -ENOMEM; - ret = -ENOMEM; - goto done; - } - ret = adm_get_params_v2(port_id, copp_idx, - VOICEPROC_MODULE_ID_GENERIC_TX, - VOICEPROC_PARAM_ID_FLUENCE_SOUNDFOCUS, - param_payload_len, - params_value, - ADM_CLIENT_ID_SOURCE_TRACKING); + param_hdr.module_id = VOICEPROC_MODULE_ID_GENERIC_TX; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = VOICEPROC_PARAM_ID_FLUENCE_SOUNDFOCUS; + param_hdr.param_size = max_param_size; + ret = adm_get_pp_params(port_id, copp_idx, + ADM_CLIENT_ID_SOURCE_TRACKING, NULL, ¶m_hdr, + params_value); if (ret) { pr_err("%s: get parameters failed ret:%d\n", __func__, ret); - - kfree(params_value); ret = -EINVAL; goto done; } @@ -4665,8 +3959,6 @@ int adm_get_sound_focus(int port_id, int copp_idx, pr_err("%s - get params returned error [%s]\n", __func__, adsp_err_get_err_str( this_adm.sourceTrackingData.apr_cmd_status)); - - kfree(params_value); ret = adsp_err_get_lnx_err_code( this_adm.sourceTrackingData.apr_cmd_status); goto done; @@ -4686,11 +3978,10 @@ int adm_get_sound_focus(int port_id, int copp_idx, soundFocusData->gain_step = soundfocus_params->gain_step; pr_debug("%s: gain_step = %d\n", __func__, soundFocusData->gain_step); - kfree(params_value); - done: pr_debug("%s: Exit, ret = %d\n", __func__, ret); + kfree(params_value); return ret; } @@ -4755,9 +4046,12 @@ done: int adm_get_source_tracking(int port_id, int copp_idx, struct source_tracking_param *sourceTrackingData) { - struct adm_cmd_get_pp_params_v5 admp; - int p_idx, ret = 0, i; - struct adm_param_fluence_sourcetracking_t *source_tracking_params; + struct adm_param_fluence_sourcetracking_t *source_tracking_params = + NULL; + struct mem_mapping_hdr mem_hdr = {0}; + struct param_hdr_v3 param_hdr = {0}; + int i = 0; + int ret = 0; pr_debug("%s: Enter, port_id %d, copp_idx %d\n", __func__, port_id, copp_idx); @@ -4771,68 +4065,34 @@ int adm_get_source_tracking(int port_id, int copp_idx, } } - port_id = afe_convert_virtual_to_portid(port_id); - p_idx = adm_validate_and_get_port_index(port_id); - if (p_idx < 0) { - pr_err("%s - invalid port index %i, port id %i, copp idx %i\n", - __func__, p_idx, port_id, copp_idx); - - ret = -EINVAL; - goto done; - } - - admp.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - admp.hdr.pkt_size = sizeof(admp); - admp.hdr.src_svc = APR_SVC_ADM; - admp.hdr.src_domain = APR_DOMAIN_APPS; - admp.hdr.src_port = port_id; - admp.hdr.dest_svc = APR_SVC_ADM; - admp.hdr.dest_domain = APR_DOMAIN_ADSP; - admp.hdr.dest_port = atomic_read(&this_adm.copp.id[p_idx][copp_idx]); - admp.hdr.token = p_idx << 16 | ADM_CLIENT_ID_SOURCE_TRACKING << 8 | - copp_idx; - admp.hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5; - admp.data_payload_addr_lsw = + mem_hdr.data_payload_addr_lsw = lower_32_bits(this_adm.sourceTrackingData.memmap.paddr); - admp.data_payload_addr_msw = - msm_audio_populate_upper_32_bits( - this_adm.sourceTrackingData.memmap.paddr); - admp.mem_map_handle = atomic_read(&this_adm.mem_map_handles[ - ADM_MEM_MAP_INDEX_SOURCE_TRACKING]); - admp.module_id = VOICEPROC_MODULE_ID_GENERIC_TX; - admp.param_id = VOICEPROC_PARAM_ID_FLUENCE_SOURCETRACKING; - admp.param_max_size = sizeof(struct adm_param_fluence_sourcetracking_t) - + sizeof(struct adm_param_data_v5); - admp.reserved = 0; - - atomic_set(&this_adm.copp.stat[p_idx][copp_idx], -1); - - ret = apr_send_pkt(this_adm.apr, (uint32_t *)&admp); - if (ret < 0) { - pr_err("%s - failed to get Source Tracking Params\n", - __func__); - - ret = -EINVAL; - goto done; - } - ret = wait_event_timeout(this_adm.copp.wait[p_idx][copp_idx], - atomic_read(&this_adm.copp.stat[p_idx][copp_idx]) >= 0, - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s - get params timed out\n", __func__); + mem_hdr.data_payload_addr_msw = msm_audio_populate_upper_32_bits( + this_adm.sourceTrackingData.memmap.paddr); + mem_hdr.mem_map_handle = atomic_read( + &this_adm.mem_map_handles[ADM_MEM_MAP_INDEX_SOURCE_TRACKING]); + + param_hdr.module_id = VOICEPROC_MODULE_ID_GENERIC_TX; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = VOICEPROC_PARAM_ID_FLUENCE_SOURCETRACKING; + /* + * This size should be the max size of the calibration data + header. + * Use the union size to ensure max size is used. + */ + param_hdr.param_size = + sizeof(struct adm_param_fluence_sourcetracking_t) + + sizeof(union param_hdrs); - ret = -EINVAL; - goto done; - } else if (atomic_read(&this_adm.copp.stat - [p_idx][copp_idx]) > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_adm.copp.stat - [p_idx][copp_idx]))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_adm.copp.stat - [p_idx][copp_idx])); + /* + * Retrieving parameters out of band, so no need to provide a buffer for + * the returned parameter data as it will be at the memory location + * provided. + */ + ret = adm_get_pp_params(port_id, copp_idx, + ADM_CLIENT_ID_SOURCE_TRACKING, &mem_hdr, + ¶m_hdr, NULL); + if (ret) { + pr_err("%s: Failed to get params, error %d\n", __func__, ret); goto done; } @@ -4846,9 +4106,11 @@ int adm_get_source_tracking(int port_id, int copp_idx, goto done; } - source_tracking_params = (struct adm_param_fluence_sourcetracking_t *) - (this_adm.sourceTrackingData.memmap.kvaddr + - sizeof(struct adm_param_data_v5)); + /* How do we know what the param data was retrieved with for hdr size */ + source_tracking_params = + (struct adm_param_fluence_sourcetracking_t + *) (this_adm.sourceTrackingData.memmap.kvaddr + + sizeof(struct param_hdr_v1)); for (i = 0; i < MAX_SECTORS; i++) { sourceTrackingData->vad[i] = source_tracking_params->vad[i]; pr_debug("%s: vad[%d] = %d\n", @@ -4882,49 +4144,24 @@ done: static int __init adm_init(void) { int i = 0, j; - this_adm.apr = NULL; + this_adm.ec_ref_rx = -1; - this_adm.num_ec_ref_rx_chans = 0; - this_adm.ec_ref_rx_bit_width = 0; - this_adm.ec_ref_rx_sampling_rate = 0; - atomic_set(&this_adm.matrix_map_stat, 0); init_waitqueue_head(&this_adm.matrix_map_wait); - atomic_set(&this_adm.adm_stat, 0); init_waitqueue_head(&this_adm.adm_wait); for (i = 0; i < AFE_MAX_PORTS; i++) { for (j = 0; j < MAX_COPPS_PER_PORT; j++) { atomic_set(&this_adm.copp.id[i][j], RESET_COPP_ID); - atomic_set(&this_adm.copp.cnt[i][j], 0); - atomic_set(&this_adm.copp.topology[i][j], 0); - atomic_set(&this_adm.copp.mode[i][j], 0); - atomic_set(&this_adm.copp.stat[i][j], 0); - atomic_set(&this_adm.copp.rate[i][j], 0); - atomic_set(&this_adm.copp.channels[i][j], 0); - atomic_set(&this_adm.copp.bit_width[i][j], 0); - atomic_set(&this_adm.copp.app_type[i][j], 0); - atomic_set(&this_adm.copp.acdb_id[i][j], 0); init_waitqueue_head(&this_adm.copp.wait[i][j]); - atomic_set(&this_adm.copp.adm_delay_stat[i][j], 0); init_waitqueue_head( &this_adm.copp.adm_delay_wait[i][j]); - atomic_set(&this_adm.copp.topology[i][j], 0); - this_adm.copp.adm_delay[i][j] = 0; - this_adm.copp.adm_status[i][j] = 0; } } if (adm_init_cal_data()) pr_err("%s: could not init cal data!\n", __func__); - this_adm.sourceTrackingData.ion_client = NULL; - this_adm.sourceTrackingData.ion_handle = NULL; - this_adm.sourceTrackingData.memmap.size = 0; - this_adm.sourceTrackingData.memmap.kvaddr = NULL; - this_adm.sourceTrackingData.memmap.paddr = 0; this_adm.sourceTrackingData.apr_cmd_status = -1; - atomic_set(&this_adm.mem_map_handles[ADM_MEM_MAP_INDEX_SOURCE_TRACKING], - 0); return 0; } diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c index f0a78dc8aee8..93553f53d68b 100644 --- a/sound/soc/msm/qdsp6v2/q6afe.c +++ b/sound/soc/msm/qdsp6v2/q6afe.c @@ -23,6 +23,7 @@ #include <sound/apr_audio-v2.h> #include <sound/q6afe-v2.h> #include <sound/q6audio-v2.h> +#include <sound/q6common.h> #include "msm-pcm-routing-v2.h" #include <sound/audio_cal_utils.h> #include <sound/adsp_err.h> @@ -190,100 +191,125 @@ static void afe_callback_debug_print(struct apr_client_data *data) __func__, data->opcode, data->payload_size); } -static void av_dev_drift_afe_cb_handler(uint32_t *payload, +static void av_dev_drift_afe_cb_handler(uint32_t opcode, uint32_t *payload, uint32_t payload_size) { u32 param_id; - struct afe_av_dev_drift_get_param_resp *resp = - (struct afe_av_dev_drift_get_param_resp *) payload; - - if (!(&(resp->pdata))) { - pr_err("%s: Error: resp pdata is NULL\n", __func__); + size_t expected_size = + sizeof(u32) + sizeof(struct afe_param_id_dev_timing_stats); + + /* Get param ID depending on command type */ + param_id = (opcode == AFE_PORT_CMDRSP_GET_PARAM_V3) ? payload[3] : + payload[2]; + if (param_id != AFE_PARAM_ID_DEV_TIMING_STATS) { + pr_err("%s: Unrecognized param ID %d\n", __func__, param_id); return; } - param_id = resp->pdata.param_id; - if (param_id == AFE_PARAM_ID_DEV_TIMING_STATS) { - if (payload_size < sizeof(this_afe.av_dev_drift_resp)) { - pr_err("%s: Error: received size %d, resp size %zu\n", - __func__, payload_size, - sizeof(this_afe.av_dev_drift_resp)); + switch (opcode) { + case AFE_PORT_CMDRSP_GET_PARAM_V2: + expected_size += sizeof(struct param_hdr_v1); + if (payload_size < expected_size) { + pr_err("%s: Error: received size %d, expected size %zu\n", + __func__, payload_size, expected_size); + return; + } + /* Repack response to add IID */ + this_afe.av_dev_drift_resp.status = payload[0]; + this_afe.av_dev_drift_resp.pdata.module_id = payload[1]; + this_afe.av_dev_drift_resp.pdata.instance_id = INSTANCE_ID_0; + this_afe.av_dev_drift_resp.pdata.param_id = payload[2]; + this_afe.av_dev_drift_resp.pdata.param_size = payload[3]; + memcpy(&this_afe.av_dev_drift_resp.timing_stats, &payload[4], + sizeof(struct afe_param_id_dev_timing_stats)); + break; + case AFE_PORT_CMDRSP_GET_PARAM_V3: + expected_size += sizeof(struct param_hdr_v3); + if (payload_size < expected_size) { + pr_err("%s: Error: received size %d, expected size %zu\n", + __func__, payload_size, expected_size); return; } memcpy(&this_afe.av_dev_drift_resp, payload, sizeof(this_afe.av_dev_drift_resp)); - if (!this_afe.av_dev_drift_resp.status) { - atomic_set(&this_afe.state, 0); - } else { - pr_debug("%s: av_dev_drift_resp status: %d", __func__, - this_afe.av_dev_drift_resp.status); - atomic_set(&this_afe.state, -1); - } + break; + default: + pr_err("%s: Unrecognized command %d\n", __func__, opcode); + return; + } + + if (!this_afe.av_dev_drift_resp.status) { + atomic_set(&this_afe.state, 0); + } else { + pr_debug("%s: av_dev_drift_resp status: %d", __func__, + this_afe.av_dev_drift_resp.status); + atomic_set(&this_afe.state, -1); } } -static int32_t sp_make_afe_callback(uint32_t *payload, uint32_t payload_size) +static int32_t sp_make_afe_callback(uint32_t opcode, uint32_t *payload, + uint32_t payload_size) { - u32 param_id; - struct afe_spkr_prot_calib_get_resp *resp = - (struct afe_spkr_prot_calib_get_resp *) payload; - - if (!(&(resp->pdata))) { - pr_err("%s: Error: resp pdata is NULL\n", __func__); + struct param_hdr_v3 param_hdr = {0}; + u32 *data_dest = NULL; + u32 *data_start = NULL; + size_t expected_size = sizeof(u32); + + /* Set command specific details */ + switch (opcode) { + case AFE_PORT_CMDRSP_GET_PARAM_V2: + expected_size += sizeof(struct param_hdr_v1); + param_hdr.module_id = payload[1]; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = payload[2]; + param_hdr.param_size = payload[3]; + data_start = &payload[4]; + break; + case AFE_PORT_CMDRSP_GET_PARAM_V3: + expected_size += sizeof(struct param_hdr_v3); + memcpy(¶m_hdr, &payload[1], sizeof(struct param_hdr_v3)); + data_start = &payload[5]; + break; + default: + pr_err("%s: Unrecognized command %d\n", __func__, opcode); return -EINVAL; } - param_id = resp->pdata.param_id; - if (param_id == AFE_PARAM_ID_CALIB_RES_CFG_V2) { - if (payload_size < sizeof(this_afe.calib_data)) { - pr_err("%s: Error: received size %d, calib_data size %zu\n", - __func__, payload_size, - sizeof(this_afe.calib_data)); - return -EINVAL; - } - memcpy(&this_afe.calib_data, payload, - sizeof(this_afe.calib_data)); - if (!this_afe.calib_data.status) { - atomic_set(&this_afe.state, 0); - } else { - pr_debug("%s: calib resp status: %d", __func__, - this_afe.calib_data.status); - atomic_set(&this_afe.state, -1); - } + switch (param_hdr.param_id) { + case AFE_PARAM_ID_CALIB_RES_CFG_V2: + expected_size += sizeof(struct asm_calib_res_cfg); + data_dest = (u32 *) &this_afe.calib_data; + break; + case AFE_PARAM_ID_SP_V2_TH_VI_FTM_PARAMS: + expected_size += sizeof(struct afe_sp_th_vi_ftm_params); + data_dest = (u32 *) &this_afe.th_vi_resp; + break; + case AFE_PARAM_ID_SP_V2_EX_VI_FTM_PARAMS: + expected_size += sizeof(struct afe_sp_ex_vi_ftm_params); + data_dest = (u32 *) &this_afe.ex_vi_resp; + break; + default: + pr_err("%s: Unrecognized param ID %d\n", __func__, + param_hdr.param_id); + return -EINVAL; } - if (param_id == AFE_PARAM_ID_SP_V2_TH_VI_FTM_PARAMS) { - if (payload_size < sizeof(this_afe.th_vi_resp)) { - pr_err("%s: Error: received size %d, th_vi_resp size %zu\n", - __func__, payload_size, - sizeof(this_afe.th_vi_resp)); - return -EINVAL; - } - memcpy(&this_afe.th_vi_resp, payload, - sizeof(this_afe.th_vi_resp)); - if (!this_afe.th_vi_resp.status) { - atomic_set(&this_afe.state, 0); - } else { - pr_debug("%s: th vi resp status: %d", __func__, - this_afe.th_vi_resp.status); - atomic_set(&this_afe.state, -1); - } + + if (payload_size < expected_size) { + pr_err("%s: Error: received size %d, expected size %zu for param %d\n", + __func__, payload_size, expected_size, + param_hdr.param_id); + return -EINVAL; } - if (param_id == AFE_PARAM_ID_SP_V2_EX_VI_FTM_PARAMS) { - if (payload_size < sizeof(this_afe.ex_vi_resp)) { - pr_err("%s: Error: received size %d, ex_vi_resp size %zu\n", - __func__, payload_size, - sizeof(this_afe.ex_vi_resp)); - return -EINVAL; - } - memcpy(&this_afe.ex_vi_resp, payload, - sizeof(this_afe.ex_vi_resp)); - if (!this_afe.ex_vi_resp.status) { - atomic_set(&this_afe.state, 0); - } else { - pr_debug("%s: ex vi resp status: %d", __func__, - this_afe.ex_vi_resp.status); - atomic_set(&this_afe.state, -1); - } + + data_dest[0] = payload[0]; + memcpy(&data_dest[1], ¶m_hdr, sizeof(struct param_hdr_v3)); + memcpy(&data_dest[5], data_start, param_hdr.param_size); + + if (!data_dest[0]) { + atomic_set(&this_afe.state, 0); + } else { + pr_debug("%s: status: %d", __func__, data_dest[0]); + atomic_set(&this_afe.state, -1); } return 0; @@ -341,8 +367,10 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv) return 0; } afe_callback_debug_print(data); - if (data->opcode == AFE_PORT_CMDRSP_GET_PARAM_V2) { + if (data->opcode == AFE_PORT_CMDRSP_GET_PARAM_V2 || + data->opcode == AFE_PORT_CMDRSP_GET_PARAM_V3) { uint32_t *payload = data->payload; + uint32_t param_id; if (!payload || (data->token >= AFE_MAX_PORTS)) { pr_err("%s: Error: size %d payload %pK token %d\n", @@ -351,15 +379,18 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv) return -EINVAL; } - if (payload[2] == AFE_PARAM_ID_DEV_TIMING_STATS) { - av_dev_drift_afe_cb_handler(data->payload, + param_id = (data->opcode == AFE_PORT_CMDRSP_GET_PARAM_V3) ? + payload[3] : + payload[2]; + if (param_id == AFE_PARAM_ID_DEV_TIMING_STATS) { + av_dev_drift_afe_cb_handler(data->opcode, data->payload, data->payload_size); } else { if (rtac_make_afe_callback(data->payload, data->payload_size)) return 0; - if (sp_make_afe_callback(data->payload, + if (sp_make_afe_callback(data->opcode, data->payload, data->payload_size)) return -EINVAL; } @@ -380,8 +411,9 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv) } switch (payload[0]) { case AFE_PORT_CMD_SET_PARAM_V2: + case AFE_PORT_CMD_SET_PARAM_V3: if (rtac_make_afe_callback(payload, - data->payload_size)) + data->payload_size)) return 0; case AFE_PORT_CMD_DEVICE_STOP: case AFE_PORT_CMD_DEVICE_START: @@ -392,6 +424,7 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv) case AFE_SERVICE_CMD_UNREGISTER_RT_PORT_DRIVER: case AFE_PORTS_CMD_DTMF_CTL: case AFE_SVC_CMD_SET_PARAM: + case AFE_SVC_CMD_SET_PARAM_V2: atomic_set(&this_afe.state, 0); wake_up(&this_afe.wait[data->token]); break; @@ -409,6 +442,28 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv) pr_debug("%s: AFE_CMD_ADD_TOPOLOGIES cmd 0x%x\n", __func__, payload[1]); break; + case AFE_PORT_CMD_GET_PARAM_V2: + case AFE_PORT_CMD_GET_PARAM_V3: + /* + * Should only come here if there is an APR + * error or malformed APR packet. Otherwise + * response will be returned as + * AFE_PORT_CMDRSP_GET_PARAM_V2/3 + */ + pr_debug("%s: AFE Get Param opcode 0x%x token 0x%x src %d dest %d\n", + __func__, data->opcode, data->token, + data->src_port, data->dest_port); + if (payload[1] != 0) { + pr_err("%s: ADM Get Param failed with error %d\n", + __func__, payload[1]); + if (rtac_make_afe_callback( + payload, + data->payload_size)) + return 0; + } + atomic_set(&this_afe.state, payload[1]); + wake_up(&this_afe.wait[data->token]); + break; default: pr_err("%s: Unknown cmd 0x%x\n", __func__, payload[0]); @@ -749,11 +804,402 @@ static int afe_apr_send_pkt(void *data, wait_queue_head_t *wait) return ret; } +/* This function shouldn't be called directly. Instead call q6afe_set_params. */ +static int q6afe_set_params_v2(u16 port_id, int index, + struct mem_mapping_hdr *mem_hdr, + u8 *packed_param_data, u32 packed_data_size) +{ + struct afe_port_cmd_set_param_v2 *set_param = NULL; + uint32_t size = sizeof(struct afe_port_cmd_set_param_v2); + int rc = 0; + + if (packed_param_data != NULL) + size += packed_data_size; + set_param = kzalloc(size, GFP_KERNEL); + if (set_param == NULL) + return -ENOMEM; + + set_param->apr_hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + set_param->apr_hdr.pkt_size = size; + set_param->apr_hdr.src_port = 0; + set_param->apr_hdr.dest_port = 0; + set_param->apr_hdr.token = index; + set_param->apr_hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; + set_param->port_id = port_id; + if (packed_data_size > U16_MAX) { + pr_err("%s: Invalid data size for set params V2 %d\n", __func__, + packed_data_size); + rc = -EINVAL; + goto done; + } + set_param->payload_size = packed_data_size; + if (mem_hdr != NULL) { + set_param->mem_hdr = *mem_hdr; + } else if (packed_param_data != NULL) { + memcpy(&set_param->param_data, packed_param_data, + packed_data_size); + } else { + pr_err("%s: Both memory header and param data are NULL\n", + __func__); + rc = -EINVAL; + goto done; + } + + rc = afe_apr_send_pkt(set_param, &this_afe.wait[index]); +done: + kfree(set_param); + return rc; +} + +/* This function shouldn't be called directly. Instead call q6afe_set_params. */ +static int q6afe_set_params_v3(u16 port_id, int index, + struct mem_mapping_hdr *mem_hdr, + u8 *packed_param_data, u32 packed_data_size) +{ + struct afe_port_cmd_set_param_v3 *set_param = NULL; + uint32_t size = sizeof(struct afe_port_cmd_set_param_v3); + int rc = 0; + + if (packed_param_data != NULL) + size += packed_data_size; + set_param = kzalloc(size, GFP_KERNEL); + if (set_param == NULL) + return -ENOMEM; + + set_param->apr_hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + set_param->apr_hdr.pkt_size = size; + set_param->apr_hdr.src_port = 0; + set_param->apr_hdr.dest_port = 0; + set_param->apr_hdr.token = index; + set_param->apr_hdr.opcode = AFE_PORT_CMD_SET_PARAM_V3; + set_param->port_id = port_id; + set_param->payload_size = packed_data_size; + if (mem_hdr != NULL) { + set_param->mem_hdr = *mem_hdr; + } else if (packed_param_data != NULL) { + memcpy(&set_param->param_data, packed_param_data, + packed_data_size); + } else { + pr_err("%s: Both memory header and param data are NULL\n", + __func__); + rc = -EINVAL; + goto done; + } + + rc = afe_apr_send_pkt(set_param, &this_afe.wait[index]); +done: + kfree(set_param); + return rc; +} + +static int q6afe_set_params(u16 port_id, int index, + struct mem_mapping_hdr *mem_hdr, + u8 *packed_param_data, u32 packed_data_size) +{ + int ret = 0; + + ret = afe_q6_interface_prepare(); + if (ret != 0) { + pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret); + return ret; + ; + } + + port_id = q6audio_get_port_id(port_id); + ret = q6audio_validate_port(port_id); + if (ret < 0) { + pr_err("%s: Not a valid port id = 0x%x ret %d\n", __func__, + port_id, ret); + return -EINVAL; + } + + if (index < 0 || index >= AFE_MAX_PORTS) { + pr_err("%s: AFE port index[%d] invalid\n", __func__, index); + return -EINVAL; + } + + if (q6common_is_instance_id_supported()) + return q6afe_set_params_v3(port_id, index, mem_hdr, + packed_param_data, packed_data_size); + else + return q6afe_set_params_v2(port_id, index, mem_hdr, + packed_param_data, packed_data_size); +} + +static int q6afe_pack_and_set_param_in_band(u16 port_id, int index, + struct param_hdr_v3 param_hdr, + u8 *param_data) +{ + u8 *packed_param_data = NULL; + int packed_data_size = sizeof(union param_hdrs) + param_hdr.param_size; + int ret; + + packed_param_data = kzalloc(packed_data_size, GFP_KERNEL); + if (packed_param_data == NULL) + return -ENOMEM; + + ret = q6common_pack_pp_params(packed_param_data, ¶m_hdr, param_data, + &packed_data_size); + if (ret) { + pr_err("%s: Failed to pack param header and data, error %d\n", + __func__, ret); + goto fail_cmd; + } + + ret = q6afe_set_params(port_id, index, NULL, packed_param_data, + packed_data_size); + +fail_cmd: + kfree(packed_param_data); + return ret; +} + +/* This function shouldn't be called directly. Instead call q6afe_get_param. */ +static int q6afe_get_params_v2(u16 port_id, int index, + struct mem_mapping_hdr *mem_hdr, + struct param_hdr_v3 *param_hdr) +{ + struct afe_port_cmd_get_param_v2 afe_get_param; + u32 param_size = param_hdr->param_size; + + memset(&afe_get_param, 0, sizeof(afe_get_param)); + afe_get_param.apr_hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + afe_get_param.apr_hdr.pkt_size = sizeof(afe_get_param) + param_size; + afe_get_param.apr_hdr.src_port = 0; + afe_get_param.apr_hdr.dest_port = 0; + afe_get_param.apr_hdr.token = index; + afe_get_param.apr_hdr.opcode = AFE_PORT_CMD_GET_PARAM_V2; + afe_get_param.port_id = port_id; + afe_get_param.payload_size = sizeof(struct param_hdr_v1) + param_size; + if (mem_hdr != NULL) + afe_get_param.mem_hdr = *mem_hdr; + /* Set MID and PID in command */ + afe_get_param.module_id = param_hdr->module_id; + afe_get_param.param_id = param_hdr->param_id; + /* Set param header in payload */ + afe_get_param.param_hdr.module_id = param_hdr->module_id; + afe_get_param.param_hdr.param_id = param_hdr->param_id; + afe_get_param.param_hdr.param_size = param_size; + + return afe_apr_send_pkt(&afe_get_param, &this_afe.wait[index]); +} + +/* This function shouldn't be called directly. Instead call q6afe_get_param. */ +static int q6afe_get_params_v3(u16 port_id, int index, + struct mem_mapping_hdr *mem_hdr, + struct param_hdr_v3 *param_hdr) +{ + struct afe_port_cmd_get_param_v3 afe_get_param; + + memset(&afe_get_param, 0, sizeof(afe_get_param)); + afe_get_param.apr_hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + afe_get_param.apr_hdr.pkt_size = sizeof(afe_get_param); + afe_get_param.apr_hdr.src_port = 0; + afe_get_param.apr_hdr.dest_port = 0; + afe_get_param.apr_hdr.token = index; + afe_get_param.apr_hdr.opcode = AFE_PORT_CMD_GET_PARAM_V3; + afe_get_param.port_id = port_id; + if (mem_hdr != NULL) + afe_get_param.mem_hdr = *mem_hdr; + /* Set param header in command, no payload in V3 */ + afe_get_param.param_hdr = *param_hdr; + + return afe_apr_send_pkt(&afe_get_param, &this_afe.wait[index]); +} + +/* + * Calling functions copy param data directly from this_afe. Do not copy data + * back to caller here. + */ +static int q6afe_get_params(u16 port_id, struct mem_mapping_hdr *mem_hdr, + struct param_hdr_v3 *param_hdr) +{ + int index; + int ret; + + ret = afe_q6_interface_prepare(); + if (ret != 0) { + pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret); + return ret; + } + + port_id = q6audio_get_port_id(port_id); + ret = q6audio_validate_port(port_id); + if (ret < 0) { + pr_err("%s: Not a valid port id = 0x%x ret %d\n", __func__, + port_id, ret); + return -EINVAL; + } + + index = q6audio_get_port_index(port_id); + if (index < 0 || index >= AFE_MAX_PORTS) { + pr_err("%s: AFE port index[%d] invalid\n", __func__, index); + return -EINVAL; + } + + if (q6common_is_instance_id_supported()) + return q6afe_get_params_v3(port_id, index, NULL, param_hdr); + else + return q6afe_get_params_v2(port_id, index, NULL, param_hdr); +} + +/* + * This function shouldn't be called directly. Instead call + * q6afe_svc_set_params. + */ +static int q6afe_svc_set_params_v1(int index, struct mem_mapping_hdr *mem_hdr, + u8 *packed_param_data, u32 packed_data_size) +{ + struct afe_svc_cmd_set_param_v1 *svc_set_param = NULL; + uint32_t size = sizeof(struct afe_svc_cmd_set_param_v1); + int rc = 0; + + if (packed_param_data != NULL) + size += packed_data_size; + svc_set_param = kzalloc(size, GFP_KERNEL); + if (svc_set_param == NULL) + return -ENOMEM; + + svc_set_param->apr_hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + svc_set_param->apr_hdr.pkt_size = size; + svc_set_param->apr_hdr.src_port = 0; + svc_set_param->apr_hdr.dest_port = 0; + svc_set_param->apr_hdr.token = IDX_GLOBAL_CFG; + svc_set_param->apr_hdr.opcode = AFE_SVC_CMD_SET_PARAM; + svc_set_param->payload_size = packed_data_size; + + if (mem_hdr != NULL) { + /* Out of band case. */ + svc_set_param->mem_hdr = *mem_hdr; + } else if (packed_param_data != NULL) { + /* In band case. */ + memcpy(&svc_set_param->param_data, packed_param_data, + packed_data_size); + } else { + pr_err("%s: Both memory header and param data are NULL\n", + __func__); + rc = -EINVAL; + goto done; + } + + rc = afe_apr_send_pkt(svc_set_param, &this_afe.wait[index]); +done: + kfree(svc_set_param); + return rc; +} + +/* + * This function shouldn't be called directly. Instead call + * q6afe_svc_set_params. + */ +static int q6afe_svc_set_params_v2(int index, struct mem_mapping_hdr *mem_hdr, + u8 *packed_param_data, u32 packed_data_size) +{ + struct afe_svc_cmd_set_param_v2 *svc_set_param = NULL; + uint16_t size = sizeof(struct afe_svc_cmd_set_param_v2); + int rc = 0; + + if (packed_param_data != NULL) + size += packed_data_size; + svc_set_param = kzalloc(size, GFP_KERNEL); + if (svc_set_param == NULL) + return -ENOMEM; + + svc_set_param->apr_hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + svc_set_param->apr_hdr.pkt_size = size; + svc_set_param->apr_hdr.src_port = 0; + svc_set_param->apr_hdr.dest_port = 0; + svc_set_param->apr_hdr.token = IDX_GLOBAL_CFG; + svc_set_param->apr_hdr.opcode = AFE_SVC_CMD_SET_PARAM_V2; + svc_set_param->payload_size = packed_data_size; + + if (mem_hdr != NULL) { + /* Out of band case. */ + svc_set_param->mem_hdr = *mem_hdr; + } else if (packed_param_data != NULL) { + /* In band case. */ + memcpy(&svc_set_param->param_data, packed_param_data, + packed_data_size); + } else { + pr_err("%s: Both memory header and param data are NULL\n", + __func__); + rc = -EINVAL; + goto done; + } + + rc = afe_apr_send_pkt(svc_set_param, &this_afe.wait[index]); +done: + kfree(svc_set_param); + return rc; +} + +static int q6afe_svc_set_params(int index, struct mem_mapping_hdr *mem_hdr, + u8 *packed_param_data, u32 packed_data_size) +{ + int ret; + + ret = afe_q6_interface_prepare(); + if (ret != 0) { + pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret); + return ret; + } + + if (q6common_is_instance_id_supported()) + return q6afe_svc_set_params_v2(index, mem_hdr, + packed_param_data, + packed_data_size); + else + return q6afe_svc_set_params_v1(index, mem_hdr, + packed_param_data, + packed_data_size); +} + +static int q6afe_svc_pack_and_set_param_in_band(int index, + struct param_hdr_v3 param_hdr, + u8 *param_data) +{ + u8 *packed_param_data = NULL; + u32 packed_data_size = + sizeof(struct param_hdr_v3) + param_hdr.param_size; + int ret = 0; + + packed_param_data = kzalloc(packed_data_size, GFP_KERNEL); + if (!packed_param_data) + return -ENOMEM; + + ret = q6common_pack_pp_params(packed_param_data, ¶m_hdr, param_data, + &packed_data_size); + if (ret) { + pr_err("%s: Failed to pack parameter header and data, error %d\n", + __func__, ret); + goto done; + } + + ret = q6afe_svc_set_params(index, NULL, packed_param_data, + packed_data_size); + +done: + kfree(packed_param_data); + return ret; +} + static int afe_send_cal_block(u16 port_id, struct cal_block_data *cal_block) { - int result = 0; - int index = 0; - struct afe_audioif_config_command_no_payload afe_cal; + struct mem_mapping_hdr mem_hdr = {0}; + int payload_size = 0; + int result = 0; if (!cal_block) { pr_debug("%s: No AFE cal to send!\n", __func__); @@ -766,34 +1212,19 @@ static int afe_send_cal_block(u16 port_id, struct cal_block_data *cal_block) goto done; } - index = q6audio_get_port_index(port_id); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - result = -EINVAL; - goto done; - } - - afe_cal.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - afe_cal.hdr.pkt_size = sizeof(afe_cal); - afe_cal.hdr.src_port = 0; - afe_cal.hdr.dest_port = 0; - afe_cal.hdr.token = index; - afe_cal.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - afe_cal.param.port_id = port_id; - afe_cal.param.payload_size = cal_block->cal_data.size; - afe_cal.param.payload_address_lsw = + payload_size = cal_block->cal_data.size; + mem_hdr.data_payload_addr_lsw = lower_32_bits(cal_block->cal_data.paddr); - afe_cal.param.payload_address_msw = + mem_hdr.data_payload_addr_msw = msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr); - afe_cal.param.mem_map_handle = cal_block->map_data.q6map_handle; + mem_hdr.mem_map_handle = cal_block->map_data.q6map_handle; pr_debug("%s: AFE cal sent for device port = 0x%x, cal size = %zd, cal addr = 0x%pK\n", __func__, port_id, cal_block->cal_data.size, &cal_block->cal_data.paddr); - result = afe_apr_send_pkt(&afe_cal, &this_afe.wait[index]); + result = q6afe_set_params(port_id, q6audio_get_port_index(port_id), + &mem_hdr, NULL, payload_size); if (result) pr_err("%s: AFE cal for port 0x%x failed %d\n", __func__, port_id, result); @@ -889,9 +1320,8 @@ unlock: static int afe_spk_ramp_dn_cfg(int port) { + struct param_hdr_v3 param_info = {0}; int ret = -EINVAL; - int index = 0; - struct afe_spkr_prot_config_command config; if (afe_get_port_type(port) != MSM_AFE_PORT_TYPE_RX) { pr_debug("%s: port doesn't match 0x%x\n", __func__, port); @@ -903,84 +1333,39 @@ static int afe_spk_ramp_dn_cfg(int port) __func__, port, ret, this_afe.vi_rx_port); return 0; } - memset(&config, 0 , sizeof(config)); - ret = q6audio_validate_port(port); - if (ret < 0) { - pr_err("%s: Invalid port 0x%x ret %d", __func__, port, ret); - ret = -EINVAL; - goto fail_cmd; - } - index = q6audio_get_port_index(port); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - ret = -EINVAL; - goto fail_cmd; - } - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = index; - - config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - config.param.port_id = q6audio_get_port_id(port); - config.param.payload_size = - sizeof(config) - sizeof(config.hdr) - sizeof(config.param) - - sizeof(config.prot_config); - config.pdata.module_id = AFE_MODULE_FB_SPKR_PROT_V2_RX; - config.pdata.param_id = AFE_PARAM_ID_FBSP_PTONE_RAMP_CFG; - config.pdata.param_size = 0; - atomic_set(&this_afe.state, 1); - atomic_set(&this_afe.status, 0); - ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config); - if (ret < 0) { - pr_err("%s: port = 0x%x param = 0x%x failed %d\n", - __func__, port, config.pdata.param_id, ret); - goto fail_cmd; - } - ret = wait_event_timeout(this_afe.wait[index], - (atomic_read(&this_afe.state) == 0), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", __func__); - ret = -EINVAL; - goto fail_cmd; - } - if (atomic_read(&this_afe.status) > 0) { - pr_err("%s: config cmd failed [%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_afe.status))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_afe.status)); + param_info.module_id = AFE_MODULE_FB_SPKR_PROT_V2_RX; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = AFE_PARAM_ID_FBSP_PTONE_RAMP_CFG; + param_info.param_size = 0; + + ret = q6afe_pack_and_set_param_in_band(port, + q6audio_get_port_index(port), + param_info, NULL); + if (ret) { + pr_err("%s: Failed to set speaker ramp duration param, err %d\n", + __func__, ret); goto fail_cmd; } + /* dsp needs atleast 15ms to ramp down pilot tone*/ usleep_range(15000, 15010); ret = 0; fail_cmd: - pr_debug("%s: config.pdata.param_id 0x%x status %d\n", - __func__, config.pdata.param_id, ret); -return ret; + pr_debug("%s: config.pdata.param_id 0x%x status %d\n", __func__, + param_info.param_id, ret); + return ret; } static int afe_spk_prot_prepare(int src_port, int dst_port, int param_id, - union afe_spkr_prot_config *prot_config) + union afe_spkr_prot_config *prot_config) { + struct param_hdr_v3 param_info = {0}; int ret = -EINVAL; - int index = 0; - struct afe_spkr_prot_config_command config; - memset(&config, 0 , sizeof(config)); - if (!prot_config) { - pr_err("%s: Invalid params\n", __func__); - goto fail_cmd; - } ret = q6audio_validate_port(src_port); if (ret < 0) { - pr_err("%s: Invalid src port 0x%x ret %d", - __func__, src_port, ret); + pr_err("%s: Invalid src port 0x%x ret %d", __func__, src_port, + ret); ret = -EINVAL; goto fail_cmd; } @@ -991,21 +1376,15 @@ static int afe_spk_prot_prepare(int src_port, int dst_port, int param_id, ret = -EINVAL; goto fail_cmd; } - index = q6audio_get_port_index(src_port); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - ret = -EINVAL; - goto fail_cmd; - } + switch (param_id) { case AFE_PARAM_ID_FBSP_MODE_RX_CFG: - config.pdata.module_id = AFE_MODULE_FB_SPKR_PROT_V2_RX; + param_info.module_id = AFE_MODULE_FB_SPKR_PROT_V2_RX; break; case AFE_PARAM_ID_FEEDBACK_PATH_CFG: this_afe.vi_tx_port = src_port; this_afe.vi_rx_port = dst_port; - config.pdata.module_id = AFE_MODULE_FEEDBACK; + param_info.module_id = AFE_MODULE_FEEDBACK; break; /* * AFE_PARAM_ID_SPKR_CALIB_VI_PROC_CFG_V2 is same as @@ -1013,11 +1392,11 @@ static int afe_spk_prot_prepare(int src_port, int dst_port, int param_id, */ case AFE_PARAM_ID_SPKR_CALIB_VI_PROC_CFG_V2: case AFE_PARAM_ID_SP_V2_TH_VI_FTM_CFG: - config.pdata.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_TH_VI; + param_info.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_TH_VI; break; case AFE_PARAM_ID_SP_V2_EX_VI_MODE_CFG: case AFE_PARAM_ID_SP_V2_EX_VI_FTM_CFG: - config.pdata.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_EX_VI; + param_info.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_EX_VI; break; default: pr_err("%s: default case 0x%x\n", __func__, param_id); @@ -1025,48 +1404,20 @@ static int afe_spk_prot_prepare(int src_port, int dst_port, int param_id, break; } - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = index; - - config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - config.param.port_id = q6audio_get_port_id(src_port); - config.param.payload_size = sizeof(config) - sizeof(config.hdr) - - sizeof(config.param); - config.pdata.param_id = param_id; - config.pdata.param_size = sizeof(config.prot_config); - config.prot_config = *prot_config; - atomic_set(&this_afe.state, 1); - atomic_set(&this_afe.status, 0); - ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config); - if (ret < 0) { - pr_err("%s: port = 0x%x param = 0x%x failed %d\n", - __func__, src_port, param_id, ret); - goto fail_cmd; - } - ret = wait_event_timeout(this_afe.wait[index], - (atomic_read(&this_afe.state) == 0), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", __func__); - ret = -EINVAL; - goto fail_cmd; - } - if (atomic_read(&this_afe.status) > 0) { - pr_err("%s: config cmd failed [%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_afe.status))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_afe.status)); - goto fail_cmd; - } - ret = 0; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = param_id; + param_info.param_size = sizeof(union afe_spkr_prot_config); + + ret = q6afe_pack_and_set_param_in_band(src_port, + q6audio_get_port_index(src_port), + param_info, (u8 *) prot_config); + if (ret) + pr_err("%s: port = 0x%x param = 0x%x failed %d\n", __func__, + src_port, param_id, ret); + fail_cmd: - pr_debug("%s: config.pdata.param_id 0x%x status %d 0x%x\n", - __func__, config.pdata.param_id, ret, src_port); + pr_debug("%s: config.pdata.param_id 0x%x status %d 0x%x\n", __func__, + param_info.param_id, ret, src_port); return ret; } @@ -1212,14 +1563,13 @@ done: static int afe_send_hw_delay(u16 port_id, u32 rate) { - struct audio_cal_hw_delay_entry delay_entry; - struct afe_audioif_config_command config; - int index = 0; + struct audio_cal_hw_delay_entry delay_entry = {0}; + struct afe_param_id_device_hw_delay_cfg hw_delay; + struct param_hdr_v3 param_info = {0}; int ret = -EINVAL; pr_debug("%s:\n", __func__); - memset(&delay_entry, 0, sizeof(delay_entry)); delay_entry.sample_rate = rate; if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_TX) ret = afe_get_cal_hw_delay(TX_DEVICE, &delay_entry); @@ -1237,42 +1587,21 @@ static int afe_send_hw_delay(u16 port_id, u32 rate) goto fail_cmd; } - index = q6audio_get_port_index(port_id); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - ret = -EINVAL; - goto fail_cmd; - } + param_info.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = AFE_PARAM_ID_DEVICE_HW_DELAY; + param_info.param_size = sizeof(hw_delay); - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = index; - - config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - config.param.port_id = q6audio_get_port_id(port_id); - config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) - - sizeof(config.param); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; - config.pdata.param_id = AFE_PARAM_ID_DEVICE_HW_DELAY; - config.pdata.param_size = sizeof(config.port); - - config.port.hw_delay.delay_in_us = delay_entry.delay_usec; - config.port.hw_delay.device_hw_delay_minor_version = - AFE_API_VERSION_DEVICE_HW_DELAY; - - ret = afe_apr_send_pkt(&config, &this_afe.wait[index]); - if (ret) { + hw_delay.delay_in_us = delay_entry.delay_usec; + hw_delay.device_hw_delay_minor_version = + AFE_API_VERSION_DEVICE_HW_DELAY; + + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_info, (u8 *) &hw_delay); + if (ret) pr_err("%s: AFE hw delay for port 0x%x failed %d\n", __func__, port_id, ret); - goto fail_cmd; - } fail_cmd: pr_debug("%s: port_id 0x%x rate %u delay_usec %d status %d\n", @@ -1371,10 +1700,11 @@ unlock: static int afe_send_port_topology_id(u16 port_id) { - struct afe_audioif_config_command config; + struct afe_param_id_set_topology_cfg topology = {0}; + struct param_hdr_v3 param_info = {0}; + u32 topology_id = 0; int index = 0; int ret = 0; - u32 topology_id = 0; index = q6audio_get_port_index(port_id); if (index < 0 || index >= AFE_MAX_PORTS) { @@ -1390,32 +1720,17 @@ static int afe_send_port_topology_id(u16 port_id) goto done; } - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = index; - - config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - config.param.port_id = q6audio_get_port_id(port_id); - config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) - - sizeof(config.param); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; - config.pdata.param_id = AFE_PARAM_ID_SET_TOPOLOGY; - config.pdata.param_size = sizeof(config.port); - config.port.topology.minor_version = AFE_API_VERSION_TOPOLOGY_V1; - config.port.topology.topology_id = topology_id; - - pr_debug("%s: param PL size=%d iparam_size[%d][%zd %zd %zd %zd] param_id[0x%x]\n", - __func__, config.param.payload_size, config.pdata.param_size, - sizeof(config), sizeof(config.param), sizeof(config.port), - sizeof(struct apr_hdr), config.pdata.param_id); - - ret = afe_apr_send_pkt(&config, &this_afe.wait[index]); + param_info.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = AFE_PARAM_ID_SET_TOPOLOGY; + param_info.param_size = sizeof(topology); + + topology.minor_version = AFE_API_VERSION_TOPOLOGY_V1; + topology.topology_id = topology_id; + + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_info, (u8 *) &topology); if (ret) { pr_err("%s: AFE set topology id enable for port 0x%x failed %d\n", __func__, port_id, ret); @@ -1568,33 +1883,24 @@ void afe_send_cal(u16 port_id) int afe_turn_onoff_hw_mad(u16 mad_type, u16 enable) { + struct afe_param_hw_mad_ctrl mad_enable_param = {0}; + struct param_hdr_v3 param_info = {0}; int ret; - struct afe_cmd_hw_mad_ctrl config; pr_debug("%s: enter\n", __func__); - memset(&config, 0, sizeof(config)); - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), - APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = IDX_GLOBAL_CFG; - config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - config.param.port_id = SLIMBUS_5_TX; - config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) - - sizeof(config.param); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_HW_MAD; - config.pdata.param_id = AFE_PARAM_ID_HW_MAD_CTRL; - config.pdata.param_size = sizeof(config.payload); - config.payload.minor_version = 1; - config.payload.mad_type = mad_type; - config.payload.mad_enable = enable; - - ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]); + + param_info.module_id = AFE_MODULE_HW_MAD; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = AFE_PARAM_ID_HW_MAD_CTRL; + param_info.param_size = sizeof(mad_enable_param); + + mad_enable_param.minor_version = 1; + mad_enable_param.mad_type = mad_type; + mad_enable_param.mad_enable = enable; + + ret = q6afe_pack_and_set_param_in_band(SLIMBUS_5_TX, IDX_GLOBAL_CFG, + param_info, + (u8 *) &mad_enable_param); if (ret) pr_err("%s: AFE_PARAM_ID_HW_MAD_CTRL failed %d\n", __func__, ret); @@ -1604,31 +1910,18 @@ int afe_turn_onoff_hw_mad(u16 mad_type, u16 enable) static int afe_send_slimbus_slave_cfg( struct afe_param_cdc_slimbus_slave_cfg *sb_slave_cfg) { + struct param_hdr_v3 param_hdr = {0}; int ret; - struct afe_svc_cmd_sb_slave_cfg config; pr_debug("%s: enter\n", __func__); - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), - APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = IDX_GLOBAL_CFG; - config.hdr.opcode = AFE_SVC_CMD_SET_PARAM; - config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) - - sizeof(config.param); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_CDC_DEV_CFG; - config.pdata.param_id = AFE_PARAM_ID_CDC_SLIMBUS_SLAVE_CFG; - config.pdata.param_size = - sizeof(struct afe_param_cdc_slimbus_slave_cfg); - config.sb_slave_cfg = *sb_slave_cfg; - - ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]); + param_hdr.module_id = AFE_MODULE_CDC_DEV_CFG; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_CDC_SLIMBUS_SLAVE_CFG; + param_hdr.param_size = sizeof(struct afe_param_cdc_slimbus_slave_cfg); + + ret = q6afe_svc_pack_and_set_param_in_band(IDX_GLOBAL_CFG, param_hdr, + (u8 *) sb_slave_cfg); if (ret) pr_err("%s: AFE_PARAM_ID_CDC_SLIMBUS_SLAVE_CFG failed %d\n", __func__, ret); @@ -1640,29 +1933,16 @@ static int afe_send_slimbus_slave_cfg( static int afe_send_codec_reg_page_config( struct afe_param_cdc_reg_page_cfg *cdc_reg_page_cfg) { - struct afe_svc_cmd_cdc_reg_page_cfg config; + struct param_hdr_v3 param_hdr = {0}; int ret; - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), - APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = IDX_GLOBAL_CFG; - config.hdr.opcode = AFE_SVC_CMD_SET_PARAM; - config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) - - sizeof(config.param); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_CDC_DEV_CFG; - config.pdata.param_id = AFE_PARAM_ID_CDC_REG_PAGE_CFG; - config.pdata.param_size = - sizeof(struct afe_param_cdc_reg_page_cfg); - config.cdc_reg_page_cfg = *cdc_reg_page_cfg; - - ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]); + param_hdr.module_id = AFE_MODULE_CDC_DEV_CFG; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_CDC_REG_PAGE_CFG; + param_hdr.param_size = sizeof(struct afe_param_cdc_reg_page_cfg); + + ret = q6afe_svc_pack_and_set_param_in_band(IDX_GLOBAL_CFG, param_hdr, + (u8 *) cdc_reg_page_cfg); if (ret) pr_err("%s: AFE_PARAM_ID_CDC_REG_PAGE_CFG failed %d\n", __func__, ret); @@ -1673,186 +1953,116 @@ static int afe_send_codec_reg_page_config( static int afe_send_codec_reg_config( struct afe_param_cdc_reg_cfg_data *cdc_reg_cfg) { - int i, j, ret = -EINVAL; - int pkt_size, payload_size, reg_per_pkt, num_pkts, num_regs; - struct afe_svc_cmd_cdc_reg_cfg *config; - struct afe_svc_cmd_set_param *param; + u8 *packed_param_data = NULL; + u32 packed_data_size = 0; + u32 single_param_size = 0; + u32 max_data_size = 0; + u32 max_single_param = 0; + struct param_hdr_v3 param_hdr = {0}; + int idx = 0; + int ret = -EINVAL; - reg_per_pkt = (APR_MAX_BUF - sizeof(*config)) / - sizeof(struct afe_param_cdc_reg_cfg_payload); - if (reg_per_pkt > 0) { - num_pkts = (cdc_reg_cfg->num_registers / reg_per_pkt) + - (cdc_reg_cfg->num_registers % reg_per_pkt == 0 ? 0 : 1); - } else { - pr_err("%s: Failed to build codec reg config APR packet\n", - __func__); - return -EINVAL; - } + max_single_param = sizeof(struct param_hdr_v3) + + sizeof(struct afe_param_cdc_reg_cfg); + max_data_size = APR_MAX_BUF - sizeof(struct afe_svc_cmd_set_param_v2); + packed_param_data = kzalloc(max_data_size, GFP_KERNEL); + if (!packed_param_data) + return -ENOMEM; - for (j = 0; j < num_pkts; ++j) { - /* - * num_regs is set to reg_per_pkt on each pass through the loop - * except the last, when it is set to the number of registers - * remaining from the total - */ - num_regs = (j < (num_pkts - 1) ? reg_per_pkt : - cdc_reg_cfg->num_registers - (reg_per_pkt * j)); - payload_size = sizeof(struct afe_param_cdc_reg_cfg_payload) * - num_regs; - pkt_size = sizeof(*config) + payload_size; - pr_debug("%s: pkt_size %d, payload_size %d\n", __func__, - pkt_size, payload_size); - config = kzalloc(pkt_size, GFP_KERNEL); - if (!config) - return -ENOMEM; - - config->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), - APR_PKT_VER); - config->hdr.pkt_size = pkt_size; - config->hdr.src_port = 0; - config->hdr.dest_port = 0; - config->hdr.token = IDX_GLOBAL_CFG; - config->hdr.opcode = AFE_SVC_CMD_SET_PARAM; - - param = &config->param; - param->payload_size = payload_size; - param->payload_address_lsw = 0x00; - param->payload_address_msw = 0x00; - param->mem_map_handle = 0x00; - - for (i = 0; i < num_regs; i++) { - config->reg_data[i].common.module_id = - AFE_MODULE_CDC_DEV_CFG; - config->reg_data[i].common.param_id = - AFE_PARAM_ID_CDC_REG_CFG; - config->reg_data[i].common.param_size = - sizeof(config->reg_data[i].reg_cfg); - config->reg_data[i].reg_cfg = - cdc_reg_cfg->reg_data[i + (j * reg_per_pkt)]; + /* param_hdr is the same for all params sent, set once at top */ + param_hdr.module_id = AFE_MODULE_CDC_DEV_CFG; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_CDC_REG_CFG; + param_hdr.param_size = sizeof(struct afe_param_cdc_reg_cfg); + + while (idx < cdc_reg_cfg->num_registers) { + memset(packed_param_data, 0, max_data_size); + packed_data_size = 0; + single_param_size = 0; + + while (packed_data_size + max_single_param < max_data_size && + idx < cdc_reg_cfg->num_registers) { + ret = q6common_pack_pp_params( + packed_param_data + packed_data_size, + ¶m_hdr, (u8 *) &cdc_reg_cfg->reg_data[idx], + &single_param_size); + if (ret) { + pr_err("%s: Failed to pack parameters with error %d\n", + __func__, ret); + goto done; + } + packed_data_size += single_param_size; + idx++; } - ret = afe_apr_send_pkt(config, &this_afe.wait[IDX_GLOBAL_CFG]); + ret = q6afe_svc_set_params(IDX_GLOBAL_CFG, NULL, + packed_param_data, packed_data_size); if (ret) { pr_err("%s: AFE_PARAM_ID_CDC_REG_CFG failed %d\n", __func__, ret); - kfree(config); break; } - kfree(config); } - +done: + kfree(packed_param_data); return ret; } static int afe_init_cdc_reg_config(void) { + struct param_hdr_v3 param_hdr = {0}; int ret; - struct afe_svc_cmd_init_cdc_reg_cfg config; pr_debug("%s: enter\n", __func__); - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), - APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = IDX_GLOBAL_CFG; - config.hdr.opcode = AFE_SVC_CMD_SET_PARAM; - - config.param.payload_size = sizeof(struct afe_port_param_data_v2); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - - config.init.module_id = AFE_MODULE_CDC_DEV_CFG; - config.init.param_id = AFE_PARAM_ID_CDC_REG_CFG_INIT; - config.init.param_size = 0; - - ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]); - if (ret) { + param_hdr.module_id = AFE_MODULE_CDC_DEV_CFG; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_CDC_REG_CFG_INIT; + + ret = q6afe_svc_pack_and_set_param_in_band(IDX_GLOBAL_CFG, param_hdr, + NULL); + if (ret) pr_err("%s: AFE_PARAM_ID_CDC_INIT_REG_CFG failed %d\n", __func__, ret); - } return ret; } static int afe_send_slimbus_slave_port_cfg( - struct afe_param_slimbus_slave_port_cfg *port_config, u16 port_id) + struct afe_param_slimbus_slave_port_cfg *slim_slave_config, u16 port_id) { - int ret, index; - struct afe_cmd_hw_mad_slimbus_slave_port_cfg config; + struct param_hdr_v3 param_hdr = {0}; + int ret; pr_debug("%s: enter, port_id = 0x%x\n", __func__, port_id); - index = q6audio_get_port_index(port_id); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - return -EINVAL; - } - ret = q6audio_validate_port(port_id); - if (ret < 0) { - pr_err("%s: port id = 0x%x ret %d\n", __func__, port_id, ret); - return -EINVAL; - } - - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), - APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = index; - config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - config.param.port_id = port_id; - config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) - - sizeof(config.param); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_HW_MAD; - config.pdata.param_id = AFE_PARAM_ID_SLIMBUS_SLAVE_PORT_CFG; - config.pdata.param_size = sizeof(*port_config); - config.sb_port_cfg = *port_config; - - ret = afe_apr_send_pkt(&config, &this_afe.wait[index]); - if (ret) { + param_hdr.module_id = AFE_MODULE_HW_MAD; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.reserved = 0; + param_hdr.param_id = AFE_PARAM_ID_SLIMBUS_SLAVE_PORT_CFG; + param_hdr.param_size = sizeof(struct afe_param_slimbus_slave_port_cfg); + + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, + (u8 *) slim_slave_config); + if (ret) pr_err("%s: AFE_PARAM_ID_SLIMBUS_SLAVE_PORT_CFG failed %d\n", __func__, ret); - } + pr_debug("%s: leave %d\n", __func__, ret); return ret; } static int afe_aanc_port_cfg(void *apr, uint16_t tx_port, uint16_t rx_port) { - struct afe_port_cmd_set_aanc_param cfg; + struct afe_param_aanc_port_cfg aanc_port_cfg = {0}; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; - int index = 0; pr_debug("%s: tx_port 0x%x, rx_port 0x%x\n", __func__, tx_port, rx_port); - ret = afe_q6_interface_prepare(); - if (ret != 0) { - pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret); - return -EINVAL; - } - - index = q6audio_get_port_index(tx_port); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - return -EINVAL; - } - ret = q6audio_validate_port(tx_port); - if (ret < 0) { - pr_err("%s: port id: 0x%x ret %d\n", __func__, tx_port, ret); - return -EINVAL; - } - pr_debug("%s: AANC sample rate tx rate: %d rx rate %d\n", - __func__, this_afe.aanc_info.aanc_tx_port_sample_rate, - this_afe.aanc_info.aanc_rx_port_sample_rate); + pr_debug("%s: AANC sample rate tx rate: %d rx rate %d\n", __func__, + this_afe.aanc_info.aanc_tx_port_sample_rate, + this_afe.aanc_info.aanc_rx_port_sample_rate); /* * If aanc tx sample rate or rx sample rate is zero, skip aanc * configuration as AFE resampler will fail for invalid sample @@ -1863,176 +2073,103 @@ static int afe_aanc_port_cfg(void *apr, uint16_t tx_port, uint16_t rx_port) return -EINVAL; } - cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - cfg.hdr.pkt_size = sizeof(cfg); - cfg.hdr.src_port = 0; - cfg.hdr.dest_port = 0; - cfg.hdr.token = index; - cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - - cfg.param.port_id = tx_port; - cfg.param.payload_size = sizeof(struct afe_port_param_data_v2) + - sizeof(struct afe_param_aanc_port_cfg); - cfg.param.payload_address_lsw = 0; - cfg.param.payload_address_msw = 0; - cfg.param.mem_map_handle = 0; + param_hdr.module_id = AFE_MODULE_AANC; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_AANC_PORT_CONFIG; + param_hdr.param_size = sizeof(struct afe_param_aanc_port_cfg); - cfg.pdata.module_id = AFE_MODULE_AANC; - cfg.pdata.param_id = AFE_PARAM_ID_AANC_PORT_CONFIG; - cfg.pdata.param_size = sizeof(struct afe_param_aanc_port_cfg); - cfg.pdata.reserved = 0; - - cfg.data.aanc_port_cfg.aanc_port_cfg_minor_version = + aanc_port_cfg.aanc_port_cfg_minor_version = AFE_API_VERSION_AANC_PORT_CONFIG; - cfg.data.aanc_port_cfg.tx_port_sample_rate = + aanc_port_cfg.tx_port_sample_rate = this_afe.aanc_info.aanc_tx_port_sample_rate; - cfg.data.aanc_port_cfg.tx_port_channel_map[0] = AANC_TX_VOICE_MIC; - cfg.data.aanc_port_cfg.tx_port_channel_map[1] = AANC_TX_NOISE_MIC; - cfg.data.aanc_port_cfg.tx_port_channel_map[2] = AANC_TX_ERROR_MIC; - cfg.data.aanc_port_cfg.tx_port_channel_map[3] = AANC_TX_MIC_UNUSED; - cfg.data.aanc_port_cfg.tx_port_channel_map[4] = AANC_TX_MIC_UNUSED; - cfg.data.aanc_port_cfg.tx_port_channel_map[5] = AANC_TX_MIC_UNUSED; - cfg.data.aanc_port_cfg.tx_port_channel_map[6] = AANC_TX_MIC_UNUSED; - cfg.data.aanc_port_cfg.tx_port_channel_map[7] = AANC_TX_MIC_UNUSED; - cfg.data.aanc_port_cfg.tx_port_num_channels = 3; - cfg.data.aanc_port_cfg.rx_path_ref_port_id = rx_port; - cfg.data.aanc_port_cfg.ref_port_sample_rate = - this_afe.aanc_info.aanc_rx_port_sample_rate; - - ret = afe_apr_send_pkt((uint32_t *) &cfg, &this_afe.wait[index]); - if (ret) { + aanc_port_cfg.tx_port_channel_map[0] = AANC_TX_VOICE_MIC; + aanc_port_cfg.tx_port_channel_map[1] = AANC_TX_NOISE_MIC; + aanc_port_cfg.tx_port_channel_map[2] = AANC_TX_ERROR_MIC; + aanc_port_cfg.tx_port_channel_map[3] = AANC_TX_MIC_UNUSED; + aanc_port_cfg.tx_port_channel_map[4] = AANC_TX_MIC_UNUSED; + aanc_port_cfg.tx_port_channel_map[5] = AANC_TX_MIC_UNUSED; + aanc_port_cfg.tx_port_channel_map[6] = AANC_TX_MIC_UNUSED; + aanc_port_cfg.tx_port_channel_map[7] = AANC_TX_MIC_UNUSED; + aanc_port_cfg.tx_port_num_channels = 3; + aanc_port_cfg.rx_path_ref_port_id = rx_port; + aanc_port_cfg.ref_port_sample_rate = + this_afe.aanc_info.aanc_rx_port_sample_rate; + + ret = q6afe_pack_and_set_param_in_band(tx_port, + q6audio_get_port_index(tx_port), + param_hdr, + (u8 *) &aanc_port_cfg); + if (ret) pr_err("%s: AFE AANC port config failed for tx_port 0x%x, rx_port 0x%x ret %d\n", - __func__, tx_port, rx_port, ret); - } + __func__, tx_port, rx_port, ret); return ret; } static int afe_aanc_mod_enable(void *apr, uint16_t tx_port, uint16_t enable) { - struct afe_port_cmd_set_aanc_param cfg; + struct afe_mod_enable_param mod_enable = {0}; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; - int index = 0; - - pr_debug("%s: tx_port 0x%x\n", - __func__, tx_port); - - ret = afe_q6_interface_prepare(); - if (ret != 0) { - pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret); - return -EINVAL; - } - - index = q6audio_get_port_index(tx_port); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - return -EINVAL; - } - ret = q6audio_validate_port(tx_port); - if (ret < 0) { - pr_err("%s: port id: 0x%x ret %d\n", __func__, tx_port, ret); - return -EINVAL; - } - - cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - cfg.hdr.pkt_size = sizeof(cfg); - cfg.hdr.src_port = 0; - cfg.hdr.dest_port = 0; - cfg.hdr.token = index; - cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - cfg.param.port_id = tx_port; - cfg.param.payload_size = sizeof(struct afe_port_param_data_v2) + - sizeof(struct afe_mod_enable_param); - cfg.param.payload_address_lsw = 0; - cfg.param.payload_address_lsw = 0; - cfg.param.mem_map_handle = 0; + pr_debug("%s: tx_port 0x%x\n", __func__, tx_port); - cfg.pdata.module_id = AFE_MODULE_AANC; - cfg.pdata.param_id = AFE_PARAM_ID_ENABLE; - cfg.pdata.param_size = sizeof(struct afe_mod_enable_param); - cfg.pdata.reserved = 0; + param_hdr.module_id = AFE_MODULE_AANC; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_ENABLE; + param_hdr.param_size = sizeof(struct afe_mod_enable_param); - cfg.data.mod_enable.enable = enable; - cfg.data.mod_enable.reserved = 0; + mod_enable.enable = enable; + mod_enable.reserved = 0; - ret = afe_apr_send_pkt((uint32_t *) &cfg, &this_afe.wait[index]); - if (ret) { + ret = q6afe_pack_and_set_param_in_band(tx_port, + q6audio_get_port_index(tx_port), + param_hdr, (u8 *) &mod_enable); + if (ret) pr_err("%s: AFE AANC enable failed for tx_port 0x%x ret %d\n", __func__, tx_port, ret); - } return ret; } static int afe_send_bank_selection_clip( struct afe_param_id_clip_bank_sel *param) { + struct param_hdr_v3 param_hdr = {0}; int ret; - struct afe_svc_cmd_set_clip_bank_selection config; + if (!param) { pr_err("%s: Invalid params", __func__); return -EINVAL; } - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = IDX_GLOBAL_CFG; - config.hdr.opcode = AFE_SVC_CMD_SET_PARAM; - - config.param.payload_size = sizeof(struct afe_port_param_data_v2) + - sizeof(struct afe_param_id_clip_bank_sel); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - - config.pdata.module_id = AFE_MODULE_CDC_DEV_CFG; - config.pdata.param_id = AFE_PARAM_ID_CLIP_BANK_SEL_CFG; - config.pdata.param_size = - sizeof(struct afe_param_id_clip_bank_sel); - config.bank_sel = *param; - ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]); - if (ret) { + param_hdr.module_id = AFE_MODULE_CDC_DEV_CFG; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_CLIP_BANK_SEL_CFG; + param_hdr.param_size = sizeof(struct afe_param_id_clip_bank_sel); + + ret = q6afe_svc_pack_and_set_param_in_band(IDX_GLOBAL_CFG, param_hdr, + (u8 *) param); + if (ret) pr_err("%s: AFE_PARAM_ID_CLIP_BANK_SEL_CFG failed %d\n", __func__, ret); - } return ret; } int afe_send_aanc_version( struct afe_param_id_cdc_aanc_version *version_cfg) { + struct param_hdr_v3 param_hdr = {0}; int ret; - struct afe_svc_cmd_cdc_aanc_version config; pr_debug("%s: enter\n", __func__); - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = IDX_GLOBAL_CFG; - config.hdr.opcode = AFE_SVC_CMD_SET_PARAM; - - config.param.payload_size = sizeof(struct afe_port_param_data_v2) + - sizeof(struct afe_param_id_cdc_aanc_version); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - - config.pdata.module_id = AFE_MODULE_CDC_DEV_CFG; - config.pdata.param_id = AFE_PARAM_ID_CDC_AANC_VERSION; - config.pdata.param_size = - sizeof(struct afe_param_id_cdc_aanc_version); - config.version = *version_cfg; - ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]); - if (ret) { + param_hdr.module_id = AFE_MODULE_CDC_DEV_CFG; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_CDC_AANC_VERSION; + param_hdr.param_size = sizeof(struct afe_param_id_cdc_aanc_version); + + ret = q6afe_svc_pack_and_set_param_in_band(IDX_GLOBAL_CFG, param_hdr, + (u8 *) version_cfg); + if (ret) pr_err("%s: AFE_PARAM_ID_CDC_AANC_VERSION failed %d\n", __func__, ret); - } return ret; } @@ -2139,166 +2276,54 @@ bool afe_has_config(enum afe_config_type config) int afe_send_spdif_clk_cfg(struct afe_param_id_spdif_clk_cfg *cfg, u16 port_id) { - struct afe_spdif_clk_config_command clk_cfg; + struct afe_param_id_spdif_clk_cfg clk_cfg = {0}; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; - int index = 0; if (!cfg) { pr_err("%s: Error, no configuration data\n", __func__); - ret = -EINVAL; - return ret; - } - index = q6audio_get_port_index(port_id); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - return -EINVAL; - } - ret = q6audio_validate_port(port_id); - if (ret < 0) { - pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret); return -EINVAL; } - ret = afe_q6_interface_prepare(); - if (ret) { - pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret); - return ret; - } - clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - clk_cfg.hdr.pkt_size = sizeof(clk_cfg); - clk_cfg.hdr.src_port = 0; - clk_cfg.hdr.dest_port = 0; - clk_cfg.hdr.token = index; - - clk_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - clk_cfg.param.port_id = q6audio_get_port_id(port_id); - clk_cfg.param.payload_address_lsw = 0x00; - clk_cfg.param.payload_address_msw = 0x00; - clk_cfg.param.mem_map_handle = 0x00; - clk_cfg.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; - clk_cfg.pdata.param_id = AFE_PARAM_ID_SPDIF_CLK_CONFIG; - clk_cfg.pdata.param_size = sizeof(clk_cfg.clk_cfg); - clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr) - - sizeof(clk_cfg.param); - clk_cfg.clk_cfg = *cfg; - - pr_debug("%s: Minor version = 0x%x clk val = %d\n" - "clk root = 0x%x\n port id = 0x%x\n", - __func__, cfg->clk_cfg_minor_version, - cfg->clk_value, cfg->clk_root, - q6audio_get_port_id(port_id)); + param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_SPDIF_CLK_CONFIG; + param_hdr.param_size = sizeof(struct afe_param_id_spdif_clk_cfg); - atomic_set(&this_afe.state, 1); - atomic_set(&this_afe.status, 0); - ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg); - if (ret < 0) { + pr_debug("%s: Minor version = 0x%x clk val = %d clk root = 0x%x port id = 0x%x\n", + __func__, clk_cfg.clk_cfg_minor_version, clk_cfg.clk_value, + clk_cfg.clk_root, q6audio_get_port_id(port_id)); + + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, (u8 *) &clk_cfg); + if (ret < 0) pr_err("%s: AFE send clock config for port 0x%x failed ret = %d\n", __func__, port_id, ret); - ret = -EINVAL; - goto fail_cmd; - } - - ret = wait_event_timeout(this_afe.wait[index], - (atomic_read(&this_afe.state) == 0), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", - __func__); - ret = -EINVAL; - goto fail_cmd; - } - if (atomic_read(&this_afe.status) > 0) { - pr_err("%s: config cmd failed [%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_afe.status))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_afe.status)); - goto fail_cmd; - } - -fail_cmd: return ret; } int afe_send_spdif_ch_status_cfg(struct afe_param_id_spdif_ch_status_cfg *ch_status_cfg, u16 port_id) { - struct afe_spdif_chstatus_config_command ch_status; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; - int index = 0; - if (!ch_status_cfg) { + if (!ch_status_cfg) pr_err("%s: Error, no configuration data\n", __func__); - ret = -EINVAL; - return ret; - } - index = q6audio_get_port_index(port_id); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - return -EINVAL; - } - ret = q6audio_validate_port(port_id); - if (ret < 0) { - pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret); - return -EINVAL; - } + return -EINVAL; - ret = afe_q6_interface_prepare(); - if (ret != 0) { - pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret); - return ret; - } - ch_status.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - ch_status.hdr.pkt_size = sizeof(ch_status_cfg); - ch_status.hdr.src_port = 0; - ch_status.hdr.dest_port = 0; - ch_status.hdr.token = index; - - ch_status.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - ch_status.param.port_id = q6audio_get_port_id(port_id); - ch_status.param.payload_address_lsw = 0x00; - ch_status.param.payload_address_msw = 0x00; - ch_status.param.mem_map_handle = 0x00; - ch_status.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; - ch_status.pdata.param_id = AFE_PARAM_ID_SPDIF_CLK_CONFIG; - ch_status.pdata.param_size = sizeof(ch_status.ch_status); - ch_status.param.payload_size = sizeof(ch_status) - - sizeof(struct apr_hdr) - sizeof(ch_status.param); - ch_status.ch_status = *ch_status_cfg; + param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_SPDIF_CLK_CONFIG; + param_hdr.param_size = sizeof(struct afe_param_id_spdif_ch_status_cfg); - atomic_set(&this_afe.state, 1); - atomic_set(&this_afe.status, 0); - ret = apr_send_pkt(this_afe.apr, (uint32_t *) &ch_status); - if (ret < 0) { + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, (u8 *) ch_status_cfg); + if (ret < 0) pr_err("%s: AFE send channel status for port 0x%x failed ret = %d\n", __func__, port_id, ret); - ret = -EINVAL; - goto fail_cmd; - } - - ret = wait_event_timeout(this_afe.wait[index], - (atomic_read(&this_afe.state) == 0), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", - __func__); - ret = -EINVAL; - goto fail_cmd; - } - if (atomic_read(&this_afe.status) > 0) { - pr_err("%s: config cmd failed [%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_afe.status))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_afe.status)); - goto fail_cmd; - } - -fail_cmd: return ret; } @@ -2366,10 +2391,9 @@ fail_cmd: int afe_spdif_port_start(u16 port_id, struct afe_spdif_port_config *spdif_port, u32 rate) { - struct afe_audioif_config_command config; - int ret = 0; - int index = 0; + struct param_hdr_v3 param_hdr = {0}; uint16_t port_index; + int ret = 0; if (!spdif_port) { pr_err("%s: Error, no configuration data\n", __func__); @@ -2379,12 +2403,6 @@ int afe_spdif_port_start(u16 port_id, struct afe_spdif_port_config *spdif_port, pr_debug("%s: port id: 0x%x\n", __func__, port_id); - index = q6audio_get_port_index(port_id); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - return -EINVAL; - } ret = q6audio_validate_port(port_id); if (ret < 0) { pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret); @@ -2394,24 +2412,14 @@ int afe_spdif_port_start(u16 port_id, struct afe_spdif_port_config *spdif_port, afe_send_cal(port_id); afe_send_hw_delay(port_id, rate); - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = index; - config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - config.param.port_id = q6audio_get_port_id(port_id); - config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) - - sizeof(config.param); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; - config.pdata.param_id = AFE_PARAM_ID_SPDIF_CONFIG; - config.pdata.param_size = sizeof(config.port); - config.port.spdif = spdif_port->cfg; - ret = afe_apr_send_pkt(&config, &this_afe.wait[index]); + param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_SPDIF_CONFIG; + param_hdr.param_size = sizeof(struct afe_spdif_port_config); + + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, (u8 *) spdif_port); if (ret) { pr_err("%s: AFE enable for port 0x%x failed ret = %d\n", __func__, port_id, ret); @@ -2443,9 +2451,8 @@ int afe_send_slot_mapping_cfg( struct afe_param_id_slot_mapping_cfg *slot_mapping_cfg, u16 port_id) { - struct afe_slot_mapping_config_command config; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; - int index = 0; if (!slot_mapping_cfg) { pr_err("%s: Error, no configuration data\n", __func__); @@ -2454,67 +2461,18 @@ int afe_send_slot_mapping_cfg( pr_debug("%s: port id: 0x%x\n", __func__, port_id); - index = q6audio_get_port_index(port_id); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - return -EINVAL; - } - ret = q6audio_validate_port(port_id); - if (ret < 0) { - pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret); - return -EINVAL; - } - - memset(&config, 0, sizeof(config)); - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = index; - - config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - config.param.port_id = q6audio_get_port_id(port_id); - config.param.payload_size = sizeof(config) - - sizeof(struct apr_hdr) - sizeof(config.param); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_TDM; - config.pdata.param_id = AFE_PARAM_ID_PORT_SLOT_MAPPING_CONFIG; - config.pdata.param_size = sizeof(config.slot_mapping); - config.slot_mapping = *slot_mapping_cfg; + param_hdr.module_id = AFE_MODULE_TDM; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_PORT_SLOT_MAPPING_CONFIG; + param_hdr.param_size = sizeof(struct afe_param_id_slot_mapping_cfg); - atomic_set(&this_afe.state, 1); - atomic_set(&this_afe.status, 0); - ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config); - if (ret < 0) { + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, + (u8 *) slot_mapping_cfg); + if (ret < 0) pr_err("%s: AFE send slot mapping for port 0x%x failed ret = %d\n", __func__, port_id, ret); - ret = -EINVAL; - goto fail_cmd; - } - - ret = wait_event_timeout(this_afe.wait[index], - (atomic_read(&this_afe.state) == 0), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", - __func__); - ret = -EINVAL; - goto fail_cmd; - } - if (atomic_read(&this_afe.status) > 0) { - pr_err("%s: config cmd failed [%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_afe.status))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_afe.status)); - goto fail_cmd; - } - -fail_cmd: return ret; } @@ -2522,9 +2480,8 @@ int afe_send_custom_tdm_header_cfg( struct afe_param_id_custom_tdm_header_cfg *custom_tdm_header_cfg, u16 port_id) { - struct afe_custom_tdm_header_config_command config; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; - int index = 0; if (!custom_tdm_header_cfg) { pr_err("%s: Error, no configuration data\n", __func__); @@ -2533,78 +2490,30 @@ int afe_send_custom_tdm_header_cfg( pr_debug("%s: port id: 0x%x\n", __func__, port_id); - index = q6audio_get_port_index(port_id); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - return -EINVAL; - } - ret = q6audio_validate_port(port_id); - if (ret < 0) { - pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret); - return -EINVAL; - } + param_hdr.module_id = AFE_MODULE_TDM; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_CUSTOM_TDM_HEADER_CONFIG; + param_hdr.param_size = + sizeof(struct afe_param_id_custom_tdm_header_cfg); - memset(&config, 0, sizeof(config)); - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = index; - - config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - config.param.port_id = q6audio_get_port_id(port_id); - config.param.payload_size = sizeof(config) - - sizeof(struct apr_hdr) - sizeof(config.param); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_TDM; - config.pdata.param_id = AFE_PARAM_ID_CUSTOM_TDM_HEADER_CONFIG; - config.pdata.param_size = sizeof(config.custom_tdm_header); - config.custom_tdm_header = *custom_tdm_header_cfg; - - atomic_set(&this_afe.state, 1); - atomic_set(&this_afe.status, 0); - ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config); - if (ret < 0) { + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, + (u8 *) custom_tdm_header_cfg); + if (ret < 0) pr_err("%s: AFE send custom tdm header for port 0x%x failed ret = %d\n", __func__, port_id, ret); - ret = -EINVAL; - goto fail_cmd; - } - - ret = wait_event_timeout(this_afe.wait[index], - (atomic_read(&this_afe.state) == 0), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", - __func__); - ret = -EINVAL; - goto fail_cmd; - } - if (atomic_read(&this_afe.status) > 0) { - pr_err("%s: config cmd failed [%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_afe.status))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_afe.status)); - goto fail_cmd; - } - -fail_cmd: return ret; } int afe_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port, u32 rate, u16 num_groups) { - struct afe_audioif_config_command config; - int ret = 0; + struct param_hdr_v3 param_hdr = {0}; int index = 0; uint16_t port_index = 0; enum afe_mad_type mad_type = MAD_HW_NONE; + int ret = 0; if (!tdm_port) { pr_err("%s: Error, no configuration data\n", __func__); @@ -2669,26 +2578,15 @@ int afe_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port, } } - memset(&config, 0, sizeof(config)); - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = index; - config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - config.param.port_id = q6audio_get_port_id(port_id); - config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) - - sizeof(config.param); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; - config.pdata.param_id = AFE_PARAM_ID_TDM_CONFIG; - config.pdata.param_size = sizeof(config.port); - config.port.tdm = tdm_port->tdm; - - ret = afe_apr_send_pkt(&config, &this_afe.wait[index]); + param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_TDM_CONFIG; + param_hdr.param_size = sizeof(struct afe_param_id_tdm_cfg); + + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, + (u8 *) &tdm_port->tdm); if (ret) { pr_err("%s: AFE enable for port 0x%x failed ret = %d\n", __func__, port_id, ret); @@ -2743,61 +2641,45 @@ void afe_set_routing_callback(routing_cb cb) int afe_port_send_usb_dev_param(u16 port_id, union afe_port_config *afe_config) { - struct afe_usb_audio_dev_param_command config; - int ret = 0, index = 0; + struct afe_param_id_usb_audio_dev_params usb_dev = {0}; + struct afe_param_id_usb_audio_dev_lpcm_fmt lpcm_fmt = {0}; + struct param_hdr_v3 param_hdr = {0}; + int ret = 0; if (!afe_config) { pr_err("%s: Error, no configuration data\n", __func__); ret = -EINVAL; goto exit; } - index = q6audio_get_port_index(port_id); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid! for port ID 0x%x\n", - __func__, index, port_id); - ret = -EINVAL; - goto exit; - } - memset(&config, 0, sizeof(config)); - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = index; - config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - config.param.port_id = q6audio_get_port_id(port_id); - config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) - - sizeof(config.param); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; - config.pdata.param_id = AFE_PARAM_ID_USB_AUDIO_DEV_PARAMS; - config.pdata.param_size = sizeof(config.usb_dev); - config.usb_dev.cfg_minor_version = - AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG; - config.usb_dev.dev_token = afe_config->usb_audio.dev_token; - - ret = afe_apr_send_pkt(&config, &this_afe.wait[index]); + + param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; + param_hdr.instance_id = INSTANCE_ID_0; + + param_hdr.param_id = AFE_PARAM_ID_USB_AUDIO_DEV_PARAMS; + param_hdr.param_size = sizeof(usb_dev); + usb_dev.cfg_minor_version = AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG; + usb_dev.dev_token = afe_config->usb_audio.dev_token; + + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, (u8 *) &usb_dev); if (ret) { pr_err("%s: AFE device param cmd failed %d\n", __func__, ret); - ret = -EINVAL; goto exit; } - config.pdata.param_id = AFE_PARAM_ID_USB_AUDIO_DEV_LPCM_FMT; - config.pdata.param_size = sizeof(config.lpcm_fmt); - config.lpcm_fmt.cfg_minor_version = - AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG; - config.lpcm_fmt.endian = afe_config->usb_audio.endian; + param_hdr.param_id = AFE_PARAM_ID_USB_AUDIO_DEV_LPCM_FMT; + param_hdr.param_size = sizeof(lpcm_fmt); + lpcm_fmt.cfg_minor_version = AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG; + lpcm_fmt.endian = afe_config->usb_audio.endian; - ret = afe_apr_send_pkt(&config, &this_afe.wait[index]); + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, (u8 *) &lpcm_fmt); if (ret) { pr_err("%s: AFE device param cmd LPCM_FMT failed %d\n", __func__, ret); - ret = -EINVAL; goto exit; } @@ -2810,11 +2692,12 @@ static int q6afe_send_enc_config(u16 port_id, union afe_port_config afe_config, u16 afe_in_channels, u16 afe_in_bit_width) { - struct afe_audioif_config_command config; - int index; + u32 enc_fmt; + struct afe_enc_cfg_blk_param_t enc_blk_param = {0}; + struct avs_enc_packetizer_id_param_t enc_pkt_id_param = {0}; + struct afe_port_media_type_t media_type = {0}; + struct param_hdr_v3 param_hdr = {0}; int ret; - int payload_size = sizeof(config) - sizeof(struct apr_hdr) - - sizeof(config.param) - sizeof(config.port); pr_debug("%s:update DSP for enc format = %d\n", __func__, format); if (format != ASM_MEDIA_FMT_SBC && format != ASM_MEDIA_FMT_AAC_V2 && @@ -2822,94 +2705,76 @@ static int q6afe_send_enc_config(u16 port_id, pr_err("%s:Unsuppported format Ignore AFE config\n", __func__); return 0; } - memset(&config, 0, sizeof(config)); - index = q6audio_get_port_index(port_id); - if (index < 0) { - pr_err("%s: Invalid index number: %d\n", __func__, index); - return -EINVAL; - } - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = index; - - config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - config.param.port_id = q6audio_get_port_id(port_id); - config.param.payload_size = payload_size + sizeof(config.port.enc_fmt); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_ID_ENCODER; - config.pdata.param_id = AFE_ENCODER_PARAM_ID_ENC_FMT_ID; - config.pdata.param_size = sizeof(config.port.enc_fmt); - config.port.enc_fmt.fmt_id = format; - pr_debug("%s:sending AFE_ENCODER_PARAM_ID_ENC_FMT_ID payload: %d\n", - __func__, config.param.payload_size); - ret = afe_apr_send_pkt(&config, &this_afe.wait[index]); + param_hdr.module_id = AFE_MODULE_ID_ENCODER; + param_hdr.instance_id = INSTANCE_ID_0; + + param_hdr.param_id = AFE_ENCODER_PARAM_ID_ENC_FMT_ID; + param_hdr.param_size = sizeof(enc_fmt); + enc_fmt = format; + pr_debug("%s:sending AFE_ENCODER_PARAM_ID_ENC_FMT_ID payload\n", + __func__); + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, (u8 *) &enc_fmt); if (ret) { pr_err("%s:unable to send AFE_ENCODER_PARAM_ID_ENC_FMT_ID", __func__); goto exit; } - config.param.payload_size = payload_size - + sizeof(config.port.enc_blk_param); - pr_debug("%s:send AFE_ENCODER_PARAM_ID_ENC_CFG_BLK to DSP payload:%d\n", - __func__, config.param.payload_size); - config.pdata.param_id = AFE_ENCODER_PARAM_ID_ENC_CFG_BLK; - config.pdata.param_size = sizeof(config.port.enc_blk_param); - config.port.enc_blk_param.enc_cfg_blk_size = - sizeof(config.port.enc_blk_param.enc_blk_config); - config.port.enc_blk_param.enc_blk_config = *cfg; - ret = afe_apr_send_pkt(&config, &this_afe.wait[index]); + pr_debug("%s:send AFE_ENCODER_PARAM_ID_ENC_CFG_BLK to DSP payloadn", + __func__); + param_hdr.param_id = AFE_ENCODER_PARAM_ID_ENC_CFG_BLK; + param_hdr.param_size = sizeof(struct afe_enc_cfg_blk_param_t); + enc_blk_param.enc_cfg_blk_size = sizeof(union afe_enc_config_data); + enc_blk_param.enc_blk_config = *cfg; + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, + (u8 *) &enc_blk_param); if (ret) { pr_err("%s: AFE_ENCODER_PARAM_ID_ENC_CFG_BLK for port 0x%x failed %d\n", __func__, port_id, ret); goto exit; } - config.param.payload_size = - payload_size + sizeof(config.port.enc_pkt_id_param); - pr_debug("%s:sending AFE_ENCODER_PARAM_ID_PACKETIZER to DSP payload = %d", - __func__, config.param.payload_size); - config.pdata.param_id = AFE_ENCODER_PARAM_ID_PACKETIZER_ID; - config.pdata.param_size = sizeof(config.port.enc_pkt_id_param); - config.port.enc_pkt_id_param.enc_packetizer_id = - AFE_MODULE_ID_PACKETIZER_COP; - ret = afe_apr_send_pkt(&config, &this_afe.wait[index]); + pr_debug("%s:sending AFE_ENCODER_PARAM_ID_PACKETIZER to DSP\n", + __func__); + param_hdr.param_id = AFE_ENCODER_PARAM_ID_PACKETIZER_ID; + param_hdr.param_size = sizeof(struct avs_enc_packetizer_id_param_t); + enc_pkt_id_param.enc_packetizer_id = AFE_MODULE_ID_PACKETIZER_COP; + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, + (u8 *) &enc_pkt_id_param); if (ret) { pr_err("%s: AFE_ENCODER_PARAM_ID_PACKETIZER for port 0x%x failed %d\n", __func__, port_id, ret); goto exit; } - config.param.payload_size = - payload_size + sizeof(config.port.media_type); - config.pdata.param_size = sizeof(config.port.media_type); - pr_debug("%s:Sending AFE_API_VERSION_PORT_MEDIA_TYPE to DSP", __func__); - config.pdata.module_id = AFE_MODULE_PORT; - config.pdata.param_id = AFE_PARAM_ID_PORT_MEDIA_TYPE; - config.port.media_type.minor_version = AFE_API_VERSION_PORT_MEDIA_TYPE; - config.port.media_type.sample_rate = afe_config.slim_sch.sample_rate; + param_hdr.module_id = AFE_MODULE_PORT; + param_hdr.param_id = AFE_PARAM_ID_PORT_MEDIA_TYPE; + param_hdr.param_size = sizeof(struct afe_port_media_type_t); + media_type.minor_version = AFE_API_VERSION_PORT_MEDIA_TYPE; + media_type.sample_rate = afe_config.slim_sch.sample_rate; if (afe_in_bit_width) - config.port.media_type.bit_width = afe_in_bit_width; + media_type.bit_width = afe_in_bit_width; else - config.port.media_type.bit_width = - afe_config.slim_sch.bit_width; + media_type.bit_width = afe_config.slim_sch.bit_width; if (afe_in_channels) - config.port.media_type.num_channels = afe_in_channels; + media_type.num_channels = afe_in_channels; else - config.port.media_type.num_channels = - afe_config.slim_sch.num_channels; - config.port.media_type.data_format = AFE_PORT_DATA_FORMAT_PCM; - config.port.media_type.reserved = 0; + media_type.num_channels = afe_config.slim_sch.num_channels; + media_type.data_format = AFE_PORT_DATA_FORMAT_PCM; + media_type.reserved = 0; - ret = afe_apr_send_pkt(&config, &this_afe.wait[index]); + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, (u8 *) &media_type); if (ret) { pr_err("%s: AFE_API_VERSION_PORT_MEDIA_TYPE for port 0x%x failed %d\n", __func__, port_id, ret); @@ -2924,13 +2789,16 @@ static int __afe_port_start(u16 port_id, union afe_port_config *afe_config, u32 rate, u16 afe_in_channels, u16 afe_in_bit_width, union afe_enc_config_data *cfg, u32 enc_format) { - struct afe_audioif_config_command config; + union afe_port_config port_cfg; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; int cfg_type; int index = 0; enum afe_mad_type mad_type; uint16_t port_index; + memset(&port_cfg, 0, sizeof(port_cfg)); + if (!afe_config) { pr_err("%s: Error, no configuration data\n", __func__); ret = -EINVAL; @@ -3051,13 +2919,6 @@ static int __afe_port_start(u16 port_id, union afe_port_config *afe_config, } } - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = index; - switch (port_id) { case AFE_PORT_ID_PRIMARY_PCM_RX: case AFE_PORT_ID_PRIMARY_PCM_TX: @@ -3153,24 +3014,21 @@ static int __afe_port_start(u16 port_id, union afe_port_config *afe_config, ret = -EINVAL; goto fail_cmd; } - config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - config.param.port_id = q6audio_get_port_id(port_id); - config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) - - sizeof(config.param); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; - config.pdata.param_id = cfg_type; - config.pdata.param_size = sizeof(config.port); - - config.port = *afe_config; + + param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = cfg_type; + param_hdr.param_size = sizeof(union afe_port_config); + + port_cfg = *afe_config; if ((enc_format != ASM_MEDIA_FMT_NONE) && (cfg_type == AFE_PARAM_ID_SLIMBUS_CONFIG)) { - config.port.slim_sch.data_format = - AFE_SB_DATA_FORMAT_GENERIC_COMPRESSED; + port_cfg.slim_sch.data_format = + AFE_SB_DATA_FORMAT_GENERIC_COMPRESSED; } - ret = afe_apr_send_pkt(&config, &this_afe.wait[index]); + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, (u8 *) &port_cfg); if (ret) { pr_err("%s: AFE enable for port 0x%x failed %d\n", __func__, port_id, ret); @@ -3515,11 +3373,15 @@ int afe_open(u16 port_id, union afe_port_config *afe_config, int rate) { struct afe_port_cmd_device_start start; - struct afe_audioif_config_command config; + union afe_port_config port_cfg; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; int cfg_type; int index = 0; + memset(&start, 0, sizeof(start)); + memset(&port_cfg, 0, sizeof(port_cfg)); + if (!afe_config) { pr_err("%s: Error, no configuration data\n", __func__); ret = -EINVAL; @@ -3574,12 +3436,6 @@ int afe_open(u16 port_id, } mutex_lock(&this_afe.afe_cmd_lock); - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = index; switch (port_id) { case PRIMARY_I2S_RX: case PRIMARY_I2S_TX: @@ -3641,24 +3497,16 @@ int afe_open(u16 port_id, ret = -EINVAL; goto fail_cmd; } - config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - config.param.port_id = q6audio_get_port_id(port_id); - config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) - - sizeof(config.param); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; - config.pdata.param_id = cfg_type; - config.pdata.param_size = sizeof(config.port); - - config.port = *afe_config; - pr_debug("%s: param PL size=%d iparam_size[%d][%zd %zd %zd %zd] param_id[0x%x]\n", - __func__, config.param.payload_size, config.pdata.param_size, - sizeof(config), sizeof(config.param), sizeof(config.port), - sizeof(struct apr_hdr), config.pdata.param_id); - - ret = afe_apr_send_pkt(&config, &this_afe.wait[index]); + + param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = cfg_type; + param_hdr.param_size = sizeof(union afe_port_config); + port_cfg = *afe_config; + + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, (u8 *) &port_cfg); if (ret) { pr_err("%s: AFE enable for port 0x%x opcode[0x%x]failed %d\n", __func__, port_id, cfg_type, ret); @@ -3689,57 +3537,28 @@ fail_cmd: int afe_loopback(u16 enable, u16 rx_port, u16 tx_port) { - struct afe_loopback_cfg_v1 lb_cmd; + struct afe_loopback_cfg_v1 lb_param = {0}; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; - int index = 0; if (rx_port == MI2S_RX) rx_port = AFE_PORT_ID_PRIMARY_MI2S_RX; if (tx_port == MI2S_TX) tx_port = AFE_PORT_ID_PRIMARY_MI2S_TX; - ret = afe_q6_interface_prepare(); - if (ret != 0) { - pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret); - return ret; - } + param_hdr.module_id = AFE_MODULE_LOOPBACK; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG; + param_hdr.param_size = sizeof(struct afe_loopback_cfg_v1); - index = q6audio_get_port_index(rx_port); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - return -EINVAL; - } - ret = q6audio_validate_port(rx_port); - if (ret < 0) { - pr_err("%s: Invalid port 0x%x ret %d", __func__, rx_port, ret); - return -EINVAL; - } + lb_param.dst_port_id = rx_port; + lb_param.routing_mode = LB_MODE_DEFAULT; + lb_param.enable = (enable ? 1 : 0); + lb_param.loopback_cfg_minor_version = AFE_API_VERSION_LOOPBACK_CONFIG; - lb_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(20), APR_PKT_VER); - lb_cmd.hdr.pkt_size = sizeof(lb_cmd); - lb_cmd.hdr.src_port = 0; - lb_cmd.hdr.dest_port = 0; - lb_cmd.hdr.token = index; - lb_cmd.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - lb_cmd.param.port_id = tx_port; - lb_cmd.param.payload_size = (sizeof(lb_cmd) - sizeof(struct apr_hdr) - - sizeof(struct afe_port_cmd_set_param_v2)); - lb_cmd.param.payload_address_lsw = 0x00; - lb_cmd.param.payload_address_msw = 0x00; - lb_cmd.param.mem_map_handle = 0x00; - lb_cmd.pdata.module_id = AFE_MODULE_LOOPBACK; - lb_cmd.pdata.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG; - lb_cmd.pdata.param_size = lb_cmd.param.payload_size - - sizeof(struct afe_port_param_data_v2); - - lb_cmd.dst_port_id = rx_port; - lb_cmd.routing_mode = LB_MODE_DEFAULT; - lb_cmd.enable = (enable ? 1 : 0); - lb_cmd.loopback_cfg_minor_version = AFE_API_VERSION_LOOPBACK_CONFIG; - - ret = afe_apr_send_pkt(&lb_cmd, &this_afe.wait[index]); + ret = q6afe_pack_and_set_param_in_band(tx_port, + q6audio_get_port_index(tx_port), + param_hdr, (u8 *) &lb_param); if (ret) pr_err("%s: AFE loopback failed %d\n", __func__, ret); return ret; @@ -3747,9 +3566,9 @@ int afe_loopback(u16 enable, u16 rx_port, u16 tx_port) int afe_loopback_gain(u16 port_id, u16 volume) { - struct afe_loopback_gain_per_path_param set_param; + struct afe_loopback_gain_per_path_param set_param = {0}; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; - int index = 0; if (this_afe.apr == NULL) { this_afe.apr = apr_register("ADSP", "AFE", afe_callback, @@ -3770,18 +3589,6 @@ int afe_loopback_gain(u16 port_id, u16 volume) ret = -EINVAL; goto fail_cmd; } - index = q6audio_get_port_index(port_id); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - return -EINVAL; - } - ret = q6audio_validate_port(port_id); - if (ret < 0) { - pr_err("%s: Invalid port 0x%x ret %d", - __func__, port_id, ret); - return -EINVAL; - } /* RX ports numbers are even .TX ports numbers are odd. */ if (port_id % 2 == 0) { @@ -3793,36 +3600,19 @@ int afe_loopback_gain(u16 port_id, u16 volume) pr_debug("%s: port 0x%x volume %d\n", __func__, port_id, volume); - set_param.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - set_param.hdr.pkt_size = sizeof(set_param); - set_param.hdr.src_port = 0; - set_param.hdr.dest_port = 0; - set_param.hdr.token = index; - set_param.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - - set_param.param.port_id = port_id; - set_param.param.payload_size = - (sizeof(struct afe_loopback_gain_per_path_param) - - sizeof(struct apr_hdr) - sizeof(struct afe_port_cmd_set_param_v2)); - set_param.param.payload_address_lsw = 0; - set_param.param.payload_address_msw = 0; - set_param.param.mem_map_handle = 0; - - set_param.pdata.module_id = AFE_MODULE_LOOPBACK; - set_param.pdata.param_id = AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH; - set_param.pdata.param_size = - (set_param.param.payload_size - - sizeof(struct afe_port_param_data_v2)); + param_hdr.module_id = AFE_MODULE_LOOPBACK; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH; + param_hdr.param_size = sizeof(struct afe_loopback_gain_per_path_param); set_param.rx_port_id = port_id; set_param.gain = volume; - ret = afe_apr_send_pkt(&set_param, &this_afe.wait[index]); - if (ret) { + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, (u8 *) &set_param); + if (ret) pr_err("%s: AFE param set failed for port 0x%x ret %d\n", __func__, port_id, ret); - goto fail_cmd; - } fail_cmd: return ret; @@ -3950,9 +3740,9 @@ int afe_pseudo_port_stop_nowait(u16 port_id) int afe_port_group_set_param(u16 group_id, union afe_port_group_config *afe_group_config) { - int ret; - struct afe_port_group_create config; + struct param_hdr_v3 param_hdr = {0}; int cfg_type; + int ret; if (!afe_group_config) { pr_err("%s: Error, no configuration data\n", __func__); @@ -3983,27 +3773,13 @@ int afe_port_group_set_param(u16 group_id, return -EINVAL; } - memset(&config, 0, sizeof(config)); - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), - APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = IDX_GLOBAL_CFG; - config.hdr.opcode = AFE_SVC_CMD_SET_PARAM; - - config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) - - sizeof(config.param); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_GROUP_DEVICE; - config.pdata.param_id = cfg_type; - config.pdata.param_size = sizeof(config.data); - config.data = *afe_group_config; - - ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]); + param_hdr.module_id = AFE_MODULE_GROUP_DEVICE; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = cfg_type; + param_hdr.param_size = sizeof(union afe_port_group_config); + + ret = q6afe_svc_pack_and_set_param_in_band(IDX_GLOBAL_CFG, param_hdr, + (u8 *) afe_group_config); if (ret) pr_err("%s: AFE_PARAM_ID_GROUP_DEVICE_CFG failed %d\n", __func__, ret); @@ -4015,8 +3791,9 @@ int afe_port_group_enable(u16 group_id, union afe_port_group_config *afe_group_config, u16 enable) { + struct afe_group_device_enable group_enable = {0}; + struct param_hdr_v3 param_hdr = {0}; int ret; - struct afe_port_group_create config; pr_debug("%s: group id: 0x%x enable: %d\n", __func__, group_id, enable); @@ -4035,28 +3812,15 @@ int afe_port_group_enable(u16 group_id, } } - memset(&config, 0, sizeof(config)); - config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), - APR_PKT_VER); - config.hdr.pkt_size = sizeof(config); - config.hdr.src_port = 0; - config.hdr.dest_port = 0; - config.hdr.token = IDX_GLOBAL_CFG; - config.hdr.opcode = AFE_SVC_CMD_SET_PARAM; - - config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) - - sizeof(config.param); - config.param.payload_address_lsw = 0x00; - config.param.payload_address_msw = 0x00; - config.param.mem_map_handle = 0x00; - config.pdata.module_id = AFE_MODULE_GROUP_DEVICE; - config.pdata.param_id = AFE_PARAM_ID_GROUP_DEVICE_ENABLE; - config.pdata.param_size = sizeof(config.data); - config.data.group_enable.group_id = group_id; - config.data.group_enable.enable = enable; - - ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]); + param_hdr.module_id = AFE_MODULE_GROUP_DEVICE; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_GROUP_DEVICE_ENABLE; + param_hdr.param_size = sizeof(struct afe_group_device_enable); + group_enable.group_id = group_id; + group_enable.enable = enable; + + ret = q6afe_svc_pack_and_set_param_in_band(IDX_GLOBAL_CFG, param_hdr, + (u8 *) &group_enable); if (ret) pr_err("%s: AFE_PARAM_ID_GROUP_DEVICE_ENABLE failed %d\n", __func__, ret); @@ -5006,9 +4770,7 @@ fail_cmd: static int afe_sidetone_iir(u16 tx_port_id) { - struct afe_loopback_iir_cfg_v2 iir_sidetone; int ret; - int index = 0; uint16_t size = 0; int cal_index = AFE_SIDETONE_IIR_CAL; int iir_pregain = 0; @@ -5016,20 +4778,13 @@ static int afe_sidetone_iir(u16 tx_port_id) int iir_enable; struct cal_block_data *cal_block; int mid; - - memset(&iir_sidetone, 0, sizeof(iir_sidetone)); - index = q6audio_get_port_index(tx_port_id); - iir_sidetone.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - iir_sidetone.hdr.pkt_size = sizeof(iir_sidetone); - iir_sidetone.hdr.src_port = 0; - iir_sidetone.hdr.dest_port = 0; - iir_sidetone.hdr.token = index; - iir_sidetone.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - iir_sidetone.param.port_id = tx_port_id; - iir_sidetone.param.payload_address_lsw = 0x00; - iir_sidetone.param.payload_address_msw = 0x00; - iir_sidetone.param.mem_map_handle = 0x00; + struct afe_mod_enable_param enable = {0}; + struct afe_sidetone_iir_filter_config_params filter_data = {0}; + struct param_hdr_v3 param_hdr = {0}; + u8 *packed_param_data = NULL; + u32 packed_param_size = 0; + u32 single_param_size = 0; + struct audio_cal_info_sidetone_iir *st_iir_cal_info = NULL; if (this_afe.cal_data[cal_index] == NULL) { pr_err("%s: cal data is NULL\n", __func__); @@ -5045,14 +4800,13 @@ static int afe_sidetone_iir(u16 tx_port_id) goto done; } - iir_pregain = ((struct audio_cal_info_sidetone_iir *) - cal_block->cal_info)->pregain; - iir_enable = ((struct audio_cal_info_sidetone_iir *) - cal_block->cal_info)->iir_enable; - iir_num_biquad_stages = ((struct audio_cal_info_sidetone_iir *) - cal_block->cal_info)->num_biquad_stages; - mid = ((struct audio_cal_info_sidetone_iir *) - cal_block->cal_info)->mid; + /* Cache data from cal block while inside lock to reduce locked time */ + st_iir_cal_info = + (struct audio_cal_info_sidetone_iir *) cal_block->cal_info; + iir_pregain = st_iir_cal_info->pregain; + iir_enable = st_iir_cal_info->iir_enable; + iir_num_biquad_stages = st_iir_cal_info->num_biquad_stages; + mid = st_iir_cal_info->mid; /* * calculate the actual size of payload based on no of stages @@ -5068,75 +4822,85 @@ static int afe_sidetone_iir(u16 tx_port_id) pr_debug("%s: adding 2 to size:%d\n", __func__, size); size = size + 2; } - memcpy(&iir_sidetone.st_iir_filter_config_data.iir_config, - &((struct audio_cal_info_sidetone_iir *) - cal_block->cal_info)->iir_config, - sizeof(iir_sidetone.st_iir_filter_config_data.iir_config)); + memcpy(&filter_data.iir_config, &st_iir_cal_info->iir_config, size); mutex_unlock(&this_afe.cal_data[cal_index]->lock); - /* - * Calculate the payload size for setparams command - */ - iir_sidetone.param.payload_size = (sizeof(iir_sidetone) - - sizeof(struct apr_hdr) - - sizeof(struct afe_port_cmd_set_param_v2) - - (MAX_SIDETONE_IIR_DATA_SIZE - size)); - - pr_debug("%s: payload size :%d\n", __func__, - iir_sidetone.param.payload_size); + packed_param_size = + sizeof(param_hdr) * 2 + sizeof(enable) + sizeof(filter_data); + packed_param_data = kzalloc(packed_param_size, GFP_KERNEL); + if (!packed_param_data) + return -ENOMEM; + packed_param_size = 0; /* * Set IIR enable params */ - iir_sidetone.st_iir_enable_pdata.module_id = mid; - iir_sidetone.st_iir_enable_pdata.param_id = - AFE_PARAM_ID_ENABLE; - iir_sidetone.st_iir_enable_pdata.param_size = - sizeof(iir_sidetone.st_iir_mode_enable_data); - iir_sidetone.st_iir_mode_enable_data.enable = iir_enable; + param_hdr.module_id = mid; + param_hdr.param_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_ENABLE; + param_hdr.param_size = sizeof(enable); + enable.enable = iir_enable; + ret = q6common_pack_pp_params(packed_param_data, ¶m_hdr, + (u8 *) &enable, &single_param_size); + if (ret) { + pr_err("%s: Failed to pack param data, error %d\n", __func__, + ret); + goto done; + } + packed_param_size += single_param_size; /* * Set IIR filter config params */ - iir_sidetone.st_iir_filter_config_pdata.module_id = mid; - iir_sidetone.st_iir_filter_config_pdata.param_id = - AFE_PARAM_ID_SIDETONE_IIR_FILTER_CONFIG; - iir_sidetone.st_iir_filter_config_pdata.param_size = - sizeof(iir_sidetone.st_iir_filter_config_data.num_biquad_stages) - + - sizeof(iir_sidetone.st_iir_filter_config_data.pregain) + size; - iir_sidetone.st_iir_filter_config_pdata.reserved = 0; - iir_sidetone.st_iir_filter_config_data.num_biquad_stages = - iir_num_biquad_stages; - iir_sidetone.st_iir_filter_config_data.pregain = iir_pregain; + param_hdr.module_id = mid; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_SIDETONE_IIR_FILTER_CONFIG; + param_hdr.param_size = sizeof(filter_data.num_biquad_stages) + + sizeof(filter_data.pregain) + size; + filter_data.num_biquad_stages = iir_num_biquad_stages; + filter_data.pregain = iir_pregain; + ret = q6common_pack_pp_params(packed_param_data + packed_param_size, + ¶m_hdr, (u8 *) &filter_data, + &single_param_size); + if (ret) { + pr_err("%s: Failed to pack param data, error %d\n", __func__, + ret); + goto done; + } + packed_param_size += single_param_size; + pr_debug("%s: tx(0x%x)mid(0x%x)iir_en(%d)stg(%d)gain(0x%x)size(%d)\n", - __func__, tx_port_id, mid, - iir_sidetone.st_iir_mode_enable_data.enable, - iir_sidetone.st_iir_filter_config_data.num_biquad_stages, - iir_sidetone.st_iir_filter_config_data.pregain, - iir_sidetone.st_iir_filter_config_pdata.param_size); - ret = afe_apr_send_pkt(&iir_sidetone, &this_afe.wait[index]); + __func__, tx_port_id, mid, enable.enable, + filter_data.num_biquad_stages, filter_data.pregain, + param_hdr.param_size); + + ret = q6afe_set_params(tx_port_id, q6audio_get_port_index(tx_port_id), + NULL, packed_param_data, packed_param_size); if (ret) pr_err("%s: AFE sidetone failed for tx_port(0x%x)\n", __func__, tx_port_id); done: + kfree(packed_param_data); return ret; - } static int afe_sidetone(u16 tx_port_id, u16 rx_port_id, bool enable) { - struct afe_st_loopback_cfg_v1 cmd_sidetone; int ret; - int index; int cal_index = AFE_SIDETONE_CAL; int sidetone_gain; int sidetone_enable; struct cal_block_data *cal_block; int mid = 0; + struct afe_loopback_sidetone_gain gain_data = {0}; + struct loopback_cfg_data cfg_data = {0}; + struct param_hdr_v3 param_hdr = {0}; + u8 *packed_param_data = NULL; + u32 packed_param_size = 0; + u32 single_param_size = 0; + struct audio_cal_info_sidetone *st_cal_info = NULL; - memset(&cmd_sidetone, 0, sizeof(cmd_sidetone)); if (this_afe.cal_data[cal_index] == NULL) { pr_err("%s: cal data is NULL\n", __func__); ret = -EINVAL; @@ -5150,60 +4914,61 @@ static int afe_sidetone(u16 tx_port_id, u16 rx_port_id, bool enable) ret = -EINVAL; goto done; } - sidetone_gain = ((struct audio_cal_info_sidetone *) - cal_block->cal_info)->gain; - sidetone_enable = ((struct audio_cal_info_sidetone *) - cal_block->cal_info)->enable; - mid = ((struct audio_cal_info_sidetone *) - cal_block->cal_info)->mid; - mutex_unlock(&this_afe.cal_data[cal_index]->lock); - index = q6audio_get_port_index(tx_port_id); - cmd_sidetone.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - cmd_sidetone.hdr.pkt_size = sizeof(cmd_sidetone); - cmd_sidetone.hdr.src_port = 0; - cmd_sidetone.hdr.dest_port = 0; - cmd_sidetone.hdr.token = index; - cmd_sidetone.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - cmd_sidetone.param.port_id = tx_port_id; - cmd_sidetone.param.payload_size = (sizeof(cmd_sidetone) - - sizeof(struct apr_hdr) - - sizeof(struct afe_port_cmd_set_param_v2)); - cmd_sidetone.param.payload_address_lsw = 0x00; - cmd_sidetone.param.payload_address_msw = 0x00; - cmd_sidetone.param.mem_map_handle = 0x00; - cmd_sidetone.gain_pdata.module_id = AFE_MODULE_LOOPBACK; - cmd_sidetone.gain_pdata.param_id = AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH; - /* - * size of actual payload only - */ - cmd_sidetone.gain_pdata.param_size = sizeof( - struct afe_loopback_sidetone_gain); - cmd_sidetone.gain_data.rx_port_id = rx_port_id; - cmd_sidetone.gain_data.gain = sidetone_gain; + /* Cache data from cal block while inside lock to reduce locked time */ + st_cal_info = (struct audio_cal_info_sidetone *) cal_block->cal_info; + sidetone_gain = st_cal_info->gain; + sidetone_enable = st_cal_info->enable; + mid = st_cal_info->mid; + mutex_unlock(&this_afe.cal_data[cal_index]->lock); - cmd_sidetone.cfg_pdata.module_id = AFE_MODULE_LOOPBACK; - cmd_sidetone.cfg_pdata.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG; - /* - * size of actual payload only - */ - cmd_sidetone.cfg_pdata.param_size = sizeof(struct loopback_cfg_data); - cmd_sidetone.cfg_data.loopback_cfg_minor_version = - AFE_API_VERSION_LOOPBACK_CONFIG; - cmd_sidetone.cfg_data.dst_port_id = rx_port_id; - cmd_sidetone.cfg_data.routing_mode = LB_MODE_SIDETONE; - cmd_sidetone.cfg_data.enable = enable; + /* Set gain data. */ + param_hdr.module_id = AFE_MODULE_LOOPBACK; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH; + param_hdr.param_size = sizeof(struct afe_loopback_sidetone_gain); + gain_data.rx_port_id = rx_port_id; + gain_data.gain = sidetone_gain; + ret = q6common_pack_pp_params(packed_param_data, ¶m_hdr, + (u8 *) &gain_data, &single_param_size); + if (ret) { + pr_err("%s: Failed to pack param data, error %d\n", __func__, + ret); + goto done; + } + packed_param_size += single_param_size; + + /* Set configuration data. */ + param_hdr.module_id = AFE_MODULE_LOOPBACK; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG; + param_hdr.param_size = sizeof(struct loopback_cfg_data); + cfg_data.loopback_cfg_minor_version = AFE_API_VERSION_LOOPBACK_CONFIG; + cfg_data.dst_port_id = rx_port_id; + cfg_data.routing_mode = LB_MODE_SIDETONE; + cfg_data.enable = enable; + ret = q6common_pack_pp_params(packed_param_data + packed_param_size, + ¶m_hdr, (u8 *) &cfg_data, + &single_param_size); + if (ret) { + pr_err("%s: Failed to pack param data, error %d\n", __func__, + ret); + goto done; + } + packed_param_size += single_param_size; pr_debug("%s rx(0x%x) tx(0x%x) enable(%d) mid(0x%x) gain(%d) sidetone_enable(%d)\n", __func__, rx_port_id, tx_port_id, enable, mid, sidetone_gain, sidetone_enable); - ret = afe_apr_send_pkt(&cmd_sidetone, &this_afe.wait[index]); + ret = q6afe_set_params(tx_port_id, q6audio_get_port_index(tx_port_id), + NULL, packed_param_data, packed_param_size); if (ret) pr_err("%s: AFE sidetone send failed for tx_port:%d rx_port:%d ret:%d\n", __func__, tx_port_id, rx_port_id, ret); + done: + kfree(packed_param_data); return ret; } @@ -5588,93 +5353,44 @@ fail_cmd: int afe_set_digital_codec_core_clock(u16 port_id, struct afe_digital_clk_cfg *cfg) { - struct afe_lpass_digital_clk_config_command clk_cfg; - int index = 0; + struct afe_digital_clk_cfg clk_cfg = {0}; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; if (!cfg) { pr_err("%s: clock cfg is NULL\n", __func__); - ret = -EINVAL; - return ret; - } - - ret = afe_q6_interface_prepare(); - if (ret != 0) { - pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret); - return ret; + return -EINVAL; } - clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - clk_cfg.hdr.pkt_size = sizeof(clk_cfg); - clk_cfg.hdr.src_port = 0; - clk_cfg.hdr.dest_port = 0; - clk_cfg.hdr.token = index; - - clk_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; /*default rx port is taken to enable the codec digital clock*/ - clk_cfg.param.port_id = q6audio_get_port_id(port_id); - clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr) - - sizeof(clk_cfg.param); - clk_cfg.param.payload_address_lsw = 0x00; - clk_cfg.param.payload_address_msw = 0x00; - clk_cfg.param.mem_map_handle = 0x00; - clk_cfg.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; - clk_cfg.pdata.param_id = AFE_PARAM_ID_INTERNAL_DIGIATL_CDC_CLK_CONFIG; - clk_cfg.pdata.param_size = sizeof(clk_cfg.clk_cfg); - clk_cfg.clk_cfg = *cfg; + param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_INTERNAL_DIGIATL_CDC_CLK_CONFIG; + param_hdr.param_size = sizeof(struct afe_digital_clk_cfg); + clk_cfg = *cfg; pr_debug("%s: Minor version =0x%x clk val = %d\n" "clk root = 0x%x resrv = 0x%x\n", - __func__, cfg->i2s_cfg_minor_version, - cfg->clk_val, cfg->clk_root, cfg->reserved); - - atomic_set(&this_afe.state, 1); - atomic_set(&this_afe.status, 0); - ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg); - if (ret < 0) { - pr_err("%s: AFE enable for port 0x%x ret %d\n", - __func__, port_id, ret); - ret = -EINVAL; - goto fail_cmd; - } - - ret = wait_event_timeout(this_afe.wait[index], - (atomic_read(&this_afe.state) == 0), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", __func__); - ret = -EINVAL; - goto fail_cmd; - } - if (atomic_read(&this_afe.status) > 0) { - pr_err("%s: config cmd failed [%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_afe.status))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_afe.status)); - goto fail_cmd; - } + __func__, cfg->i2s_cfg_minor_version, cfg->clk_val, + cfg->clk_root, cfg->reserved); -fail_cmd: + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, (u8 *) &clk_cfg); + if (ret < 0) + pr_err("%s: AFE enable for port 0x%x ret %d\n", __func__, + port_id, ret); return ret; } int afe_set_lpass_clock(u16 port_id, struct afe_clk_cfg *cfg) { - struct afe_lpass_clk_config_command clk_cfg; - int index = 0; + struct afe_clk_cfg clk_cfg = {0}; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; if (!cfg) { pr_err("%s: clock cfg is NULL\n", __func__); - ret = -EINVAL; - return ret; - } - index = q6audio_get_port_index(port_id); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); return -EINVAL; } ret = q6audio_is_digital_pcm_interface(port_id); @@ -5684,31 +5400,12 @@ int afe_set_lpass_clock(u16 port_id, struct afe_clk_cfg *cfg) return -EINVAL; } - ret = afe_q6_interface_prepare(); - if (ret != 0) { - pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret); - return ret; - } - mutex_lock(&this_afe.afe_cmd_lock); - clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - clk_cfg.hdr.pkt_size = sizeof(clk_cfg); - clk_cfg.hdr.src_port = 0; - clk_cfg.hdr.dest_port = 0; - clk_cfg.hdr.token = index; - - clk_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - clk_cfg.param.port_id = q6audio_get_port_id(port_id); - clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr) - - sizeof(clk_cfg.param); - clk_cfg.param.payload_address_lsw = 0x00; - clk_cfg.param.payload_address_msw = 0x00; - clk_cfg.param.mem_map_handle = 0x00; - clk_cfg.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; - clk_cfg.pdata.param_id = AFE_PARAM_ID_LPAIF_CLK_CONFIG; - clk_cfg.pdata.param_size = sizeof(clk_cfg.clk_cfg); - clk_cfg.clk_cfg = *cfg; + param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_LPAIF_CLK_CONFIG; + param_hdr.param_size = sizeof(clk_cfg); + clk_cfg = *cfg; pr_debug("%s: Minor version =0x%x clk val1 = %d\n" "clk val2 = %d, clk src = 0x%x\n" @@ -5719,41 +5416,20 @@ int afe_set_lpass_clock(u16 port_id, struct afe_clk_cfg *cfg) cfg->clk_root, cfg->clk_set_mode, cfg->reserved, q6audio_get_port_id(port_id)); - atomic_set(&this_afe.state, 1); - atomic_set(&this_afe.status, 0); - ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg); - if (ret < 0) { + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, (u8 *) &clk_cfg); + if (ret < 0) pr_err("%s: AFE enable for port 0x%x ret %d\n", __func__, port_id, ret); - ret = -EINVAL; - goto fail_cmd; - } - - ret = wait_event_timeout(this_afe.wait[index], - (atomic_read(&this_afe.state) == 0), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", __func__); - ret = -EINVAL; - goto fail_cmd; - } - if (atomic_read(&this_afe.status) > 0) { - pr_err("%s: config cmd failed [%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_afe.status))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_afe.status)); - goto fail_cmd; - } -fail_cmd: mutex_unlock(&this_afe.afe_cmd_lock); return ret; } int afe_set_lpass_clk_cfg(int index, struct afe_clk_set *cfg) { - struct afe_lpass_clk_config_command_v2 clk_cfg; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; if (!cfg) { @@ -5774,24 +5450,10 @@ int afe_set_lpass_clk_cfg(int index, struct afe_clk_set *cfg) } mutex_lock(&this_afe.afe_cmd_lock); - clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - clk_cfg.hdr.pkt_size = sizeof(clk_cfg); - clk_cfg.hdr.src_port = 0; - clk_cfg.hdr.dest_port = 0; - clk_cfg.hdr.token = index; - - clk_cfg.hdr.opcode = AFE_SVC_CMD_SET_PARAM; - clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr) - - sizeof(clk_cfg.param); - clk_cfg.param.payload_address_lsw = 0x00; - clk_cfg.param.payload_address_msw = 0x00; - clk_cfg.param.mem_map_handle = 0x00; - clk_cfg.pdata.module_id = AFE_MODULE_CLOCK_SET; - clk_cfg.pdata.param_id = AFE_PARAM_ID_CLOCK_SET; - clk_cfg.pdata.param_size = sizeof(clk_cfg.clk_cfg); - clk_cfg.clk_cfg = *cfg; - + param_hdr.module_id = AFE_MODULE_CLOCK_SET; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_CLOCK_SET; + param_hdr.param_size = sizeof(struct afe_clk_set); pr_debug("%s: Minor version =0x%x clk id = %d\n" "clk freq (Hz) = %d, clk attri = 0x%x\n" @@ -5800,34 +5462,12 @@ int afe_set_lpass_clk_cfg(int index, struct afe_clk_set *cfg) cfg->clk_id, cfg->clk_freq_in_hz, cfg->clk_attri, cfg->clk_root, cfg->enable); - atomic_set(&this_afe.state, 1); - atomic_set(&this_afe.status, 0); - ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg); - if (ret < 0) { + ret = q6afe_svc_pack_and_set_param_in_band(index, param_hdr, + (u8 *) cfg); + if (ret < 0) pr_err("%s: AFE clk cfg failed with ret %d\n", __func__, ret); - ret = -EINVAL; - goto fail_cmd; - } - - ret = wait_event_timeout(this_afe.wait[index], - (atomic_read(&this_afe.state) == 0), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", __func__); - ret = -EINVAL; - goto fail_cmd; - } else { - /* set ret to 0 as no timeout happened */ - ret = 0; - } - if (atomic_read(&this_afe.status) != 0) { - pr_err("%s: config cmd failed\n", __func__); - ret = -EINVAL; - goto fail_cmd; - } -fail_cmd: mutex_unlock(&this_afe.afe_cmd_lock); return ret; } @@ -5861,19 +5501,12 @@ int afe_set_lpass_clock_v2(u16 port_id, struct afe_clk_set *cfg) int afe_set_lpass_internal_digital_codec_clock(u16 port_id, struct afe_digital_clk_cfg *cfg) { - struct afe_lpass_digital_clk_config_command clk_cfg; - int index = 0; + struct afe_digital_clk_cfg clk_cfg = {0}; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; if (!cfg) { pr_err("%s: clock cfg is NULL\n", __func__); - ret = -EINVAL; - return ret; - } - index = q6audio_get_port_index(port_id); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); return -EINVAL; } ret = q6audio_is_digital_pcm_interface(port_id); @@ -5883,30 +5516,11 @@ int afe_set_lpass_internal_digital_codec_clock(u16 port_id, return -EINVAL; } - ret = afe_q6_interface_prepare(); - if (ret != 0) { - pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret); - return ret; - } - - clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - clk_cfg.hdr.pkt_size = sizeof(clk_cfg); - clk_cfg.hdr.src_port = 0; - clk_cfg.hdr.dest_port = 0; - clk_cfg.hdr.token = index; - - clk_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - clk_cfg.param.port_id = q6audio_get_port_id(port_id); - clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr) - - sizeof(clk_cfg.param); - clk_cfg.param.payload_address_lsw = 0x00; - clk_cfg.param.payload_address_msw = 0x00; - clk_cfg.param.mem_map_handle = 0x00; - clk_cfg.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; - clk_cfg.pdata.param_id = AFE_PARAM_ID_INTERNAL_DIGIATL_CDC_CLK_CONFIG; - clk_cfg.pdata.param_size = sizeof(clk_cfg.clk_cfg); - clk_cfg.clk_cfg = *cfg; + param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_INTERNAL_DIGIATL_CDC_CLK_CONFIG; + param_hdr.param_size = sizeof(clk_cfg); + clk_cfg = *cfg; pr_debug("%s: Minor version =0x%x clk val = %d\n" "clk root = 0x%x resrv = 0x%x port id = 0x%x\n", @@ -5914,49 +5528,22 @@ int afe_set_lpass_internal_digital_codec_clock(u16 port_id, cfg->clk_val, cfg->clk_root, cfg->reserved, q6audio_get_port_id(port_id)); - atomic_set(&this_afe.state, 1); - atomic_set(&this_afe.status, 0); - ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg); - if (ret < 0) { + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, (u8 *) &clk_cfg); + if (ret < 0) pr_err("%s: AFE enable for port 0x0x%x ret %d\n", __func__, port_id, ret); - ret = -EINVAL; - goto fail_cmd; - } - - ret = wait_event_timeout(this_afe.wait[index], - (atomic_read(&this_afe.state) == 0), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", __func__); - ret = -EINVAL; - goto fail_cmd; - } - if (atomic_read(&this_afe.status) > 0) { - pr_err("%s: config cmd failed [%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_afe.status))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_afe.status)); - goto fail_cmd; - } -fail_cmd: return ret; } int afe_enable_lpass_core_shared_clock(u16 port_id, u32 enable) { - struct afe_lpass_core_shared_clk_config_command clk_cfg; - int index = 0; + struct afe_param_id_lpass_core_shared_clk_cfg clk_cfg = {0}; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; - index = q6audio_get_port_index(port_id); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - return -EINVAL; - } ret = q6audio_is_digital_pcm_interface(port_id); if (ret < 0) { pr_err("%s: q6audio_is_digital_pcm_interface fail %d\n", @@ -5964,65 +5551,25 @@ int afe_enable_lpass_core_shared_clock(u16 port_id, u32 enable) return -EINVAL; } - ret = afe_q6_interface_prepare(); - if (ret != 0) { - pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret); - return ret; - } - mutex_lock(&this_afe.afe_cmd_lock); - clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - clk_cfg.hdr.pkt_size = sizeof(clk_cfg); - clk_cfg.hdr.src_port = 0; - clk_cfg.hdr.dest_port = 0; - clk_cfg.hdr.token = index; - - clk_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2; - clk_cfg.param.port_id = q6audio_get_port_id(port_id); - clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr) - - sizeof(clk_cfg.param); - clk_cfg.param.payload_address_lsw = 0x00; - clk_cfg.param.payload_address_msw = 0x00; - clk_cfg.param.mem_map_handle = 0x00; - clk_cfg.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; - clk_cfg.pdata.param_id = AFE_PARAM_ID_LPASS_CORE_SHARED_CLOCK_CONFIG; - clk_cfg.pdata.param_size = sizeof(clk_cfg.clk_cfg); - clk_cfg.clk_cfg.lpass_core_shared_clk_cfg_minor_version = - AFE_API_VERSION_LPASS_CORE_SHARED_CLK_CONFIG; - clk_cfg.clk_cfg.enable = enable; + param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_LPASS_CORE_SHARED_CLOCK_CONFIG; + param_hdr.param_size = sizeof(clk_cfg); + clk_cfg.lpass_core_shared_clk_cfg_minor_version = + AFE_API_VERSION_LPASS_CORE_SHARED_CLK_CONFIG; + clk_cfg.enable = enable; pr_debug("%s: port id = %d, enable = %d\n", __func__, q6audio_get_port_id(port_id), enable); - atomic_set(&this_afe.state, 1); - atomic_set(&this_afe.status, 0); - ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg); - if (ret < 0) { + ret = q6afe_pack_and_set_param_in_band(port_id, + q6audio_get_port_index(port_id), + param_hdr, (u8 *) &clk_cfg); + if (ret < 0) pr_err("%s: AFE enable for port 0x%x ret %d\n", __func__, port_id, ret); - ret = -EINVAL; - goto fail_cmd; - } - - ret = wait_event_timeout(this_afe.wait[index], - (atomic_read(&this_afe.state) == 0), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", __func__); - ret = -EINVAL; - goto fail_cmd; - } - if (atomic_read(&this_afe.status) > 0) { - pr_err("%s: config cmd failed [%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_afe.status))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_afe.status)); - goto fail_cmd; - } -fail_cmd: mutex_unlock(&this_afe.afe_cmd_lock); return ret; } @@ -6052,8 +5599,9 @@ int q6afe_check_osr_clk_freq(u32 freq) int afe_get_sp_th_vi_ftm_data(struct afe_sp_th_vi_get_param *th_vi) { + struct param_hdr_v3 param_hdr = {0}; + int port = SLIMBUS_4_TX; int ret = -EINVAL; - int index = 0, port = SLIMBUS_4_TX; if (!th_vi) { pr_err("%s: Invalid params\n", __func__); @@ -6062,59 +5610,18 @@ int afe_get_sp_th_vi_ftm_data(struct afe_sp_th_vi_get_param *th_vi) if (this_afe.vi_tx_port != -1) port = this_afe.vi_tx_port; - ret = q6audio_validate_port(port); - if (ret < 0) { - pr_err("%s: invalid port 0x%x ret %d\n", __func__, port, ret); - goto done; - } - index = q6audio_get_port_index(port); - if (index < 0) { - pr_err("%s: invalid port 0x%x, index %d\n", - __func__, port, index); - ret = -EINVAL; - goto done; - } - th_vi->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - th_vi->hdr.pkt_size = sizeof(*th_vi); - th_vi->hdr.src_port = 0; - th_vi->hdr.dest_port = 0; - th_vi->hdr.token = index; - th_vi->hdr.opcode = AFE_PORT_CMD_GET_PARAM_V2; - th_vi->get_param.mem_map_handle = 0; - th_vi->get_param.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_TH_VI; - th_vi->get_param.param_id = AFE_PARAM_ID_SP_V2_TH_VI_FTM_PARAMS; - th_vi->get_param.payload_address_lsw = 0; - th_vi->get_param.payload_address_msw = 0; - th_vi->get_param.payload_size = sizeof(*th_vi) - - sizeof(th_vi->get_param) - sizeof(th_vi->hdr); - th_vi->get_param.port_id = q6audio_get_port_id(port); - th_vi->pdata.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_TH_VI; - th_vi->pdata.param_id = AFE_PARAM_ID_SP_V2_TH_VI_FTM_PARAMS; - th_vi->pdata.param_size = sizeof(th_vi->param); - atomic_set(&this_afe.status, 0); - atomic_set(&this_afe.state, 1); - ret = apr_send_pkt(this_afe.apr, (uint32_t *)th_vi); - if (ret < 0) { - pr_err("%s: get param port 0x%x param id[0x%x]failed %d\n", - __func__, port, th_vi->get_param.param_id, ret); - goto done; - } - ret = wait_event_timeout(this_afe.wait[index], - (atomic_read(&this_afe.state) == 0), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", __func__); - ret = -EINVAL; - goto done; - } - if (atomic_read(&this_afe.status) > 0) { - pr_err("%s: config cmd failed [%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_afe.status))); - ret = adsp_err_get_lnx_err_code(atomic_read(&this_afe.status)); + param_hdr.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_TH_VI; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_SP_V2_TH_VI_FTM_PARAMS; + param_hdr.param_size = sizeof(struct afe_sp_th_vi_ftm_params); + + ret = q6afe_get_params(port, NULL, ¶m_hdr); + if (ret) { + pr_err("%s: Failed to get TH VI FTM data\n", __func__); goto done; } + + th_vi->pdata = param_hdr; memcpy(&th_vi->param , &this_afe.th_vi_resp.param, sizeof(this_afe.th_vi_resp.param)); pr_debug("%s: DC resistance %d %d temp %d %d status %d %d\n", @@ -6131,8 +5638,9 @@ done: int afe_get_sp_ex_vi_ftm_data(struct afe_sp_ex_vi_get_param *ex_vi) { + struct param_hdr_v3 param_hdr = {0}; + int port = SLIMBUS_4_TX; int ret = -EINVAL; - int index = 0, port = SLIMBUS_4_TX; if (!ex_vi) { pr_err("%s: Invalid params\n", __func__); @@ -6141,61 +5649,19 @@ int afe_get_sp_ex_vi_ftm_data(struct afe_sp_ex_vi_get_param *ex_vi) if (this_afe.vi_tx_port != -1) port = this_afe.vi_tx_port; - ret = q6audio_validate_port(port); - if (ret < 0) { - pr_err("%s: invalid port 0x%x ret %d\n", __func__, port, ret); - goto done; - } + param_hdr.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_EX_VI; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_SP_V2_EX_VI_FTM_PARAMS; + param_hdr.param_size = sizeof(struct afe_sp_ex_vi_ftm_params); - index = q6audio_get_port_index(port); - if (index < 0) { - pr_err("%s: invalid index %d port 0x%x\n", __func__, - index, port); - ret = -EINVAL; - goto done; - } - - ex_vi->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - ex_vi->hdr.pkt_size = sizeof(*ex_vi); - ex_vi->hdr.src_port = 0; - ex_vi->hdr.dest_port = 0; - ex_vi->hdr.token = index; - ex_vi->hdr.opcode = AFE_PORT_CMD_GET_PARAM_V2; - ex_vi->get_param.mem_map_handle = 0; - ex_vi->get_param.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_EX_VI; - ex_vi->get_param.param_id = AFE_PARAM_ID_SP_V2_EX_VI_FTM_PARAMS; - ex_vi->get_param.payload_address_lsw = 0; - ex_vi->get_param.payload_address_msw = 0; - ex_vi->get_param.payload_size = sizeof(*ex_vi) - - sizeof(ex_vi->get_param) - sizeof(ex_vi->hdr); - ex_vi->get_param.port_id = q6audio_get_port_id(port); - ex_vi->pdata.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_EX_VI; - ex_vi->pdata.param_id = AFE_PARAM_ID_SP_V2_EX_VI_FTM_PARAMS; - ex_vi->pdata.param_size = sizeof(ex_vi->param); - atomic_set(&this_afe.status, 0); - atomic_set(&this_afe.state, 1); - ret = apr_send_pkt(this_afe.apr, (uint32_t *)ex_vi); + ret = q6afe_get_params(port, NULL, ¶m_hdr); if (ret < 0) { pr_err("%s: get param port 0x%x param id[0x%x]failed %d\n", - __func__, port, ex_vi->get_param.param_id, ret); - goto done; - } - ret = wait_event_timeout(this_afe.wait[index], - (atomic_read(&this_afe.state) == 0), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", __func__); - ret = -EINVAL; - goto done; - } - if (atomic_read(&this_afe.status) > 0) { - pr_err("%s: config cmd failed [%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_afe.status))); - ret = adsp_err_get_lnx_err_code(atomic_read(&this_afe.status)); + __func__, port, param_hdr.param_id, ret); goto done; } + + ex_vi->pdata = param_hdr; memcpy(&ex_vi->param , &this_afe.ex_vi_resp.param, sizeof(this_afe.ex_vi_resp.param)); pr_debug("%s: freq %d %d resistance %d %d qfactor %d %d state %d %d\n", @@ -6215,80 +5681,28 @@ done: int afe_get_av_dev_drift(struct afe_param_id_dev_timing_stats *timing_stats, u16 port) { + struct param_hdr_v3 param_hdr = {0}; int ret = -EINVAL; - int index = 0; - struct afe_av_dev_drift_get_param av_dev_drift; if (!timing_stats) { pr_err("%s: Invalid params\n", __func__); goto exit; } - ret = q6audio_validate_port(port); - if (ret < 0) { - pr_err("%s: invalid port 0x%x ret %d\n", __func__, port, ret); - ret = -EINVAL; - goto exit; - } + param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_DEV_TIMING_STATS; + param_hdr.param_size = sizeof(struct afe_param_id_dev_timing_stats); - index = q6audio_get_port_index(port); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: Invalid AFE port index[%d]\n", - __func__, index); - ret = -EINVAL; - goto exit; - } - - memset(&av_dev_drift, 0, sizeof(struct afe_av_dev_drift_get_param)); - - av_dev_drift.hdr.hdr_field = - APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - av_dev_drift.hdr.pkt_size = sizeof(av_dev_drift); - av_dev_drift.hdr.src_port = 0; - av_dev_drift.hdr.dest_port = 0; - av_dev_drift.hdr.token = index; - av_dev_drift.hdr.opcode = AFE_PORT_CMD_GET_PARAM_V2; - av_dev_drift.get_param.mem_map_handle = 0; - av_dev_drift.get_param.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; - av_dev_drift.get_param.param_id = AFE_PARAM_ID_DEV_TIMING_STATS; - av_dev_drift.get_param.payload_address_lsw = 0; - av_dev_drift.get_param.payload_address_msw = 0; - av_dev_drift.get_param.payload_size = sizeof(av_dev_drift) - - sizeof(av_dev_drift.get_param) - sizeof(av_dev_drift.hdr); - av_dev_drift.get_param.port_id = q6audio_get_port_id(port); - av_dev_drift.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE; - av_dev_drift.pdata.param_id = AFE_PARAM_ID_DEV_TIMING_STATS; - av_dev_drift.pdata.param_size = sizeof(av_dev_drift.timing_stats); - atomic_set(&this_afe.status, 0); - atomic_set(&this_afe.state, 1); - ret = apr_send_pkt(this_afe.apr, (uint32_t *)&av_dev_drift); + ret = q6afe_get_params(port, NULL, ¶m_hdr); if (ret < 0) { pr_err("%s: get param port 0x%x param id[0x%x] failed %d\n", - __func__, port, av_dev_drift.get_param.param_id, ret); - goto exit; - } - - ret = wait_event_timeout(this_afe.wait[index], - (atomic_read(&this_afe.state) == 0), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", __func__); - ret = -EINVAL; - goto exit; - } - - if (atomic_read(&this_afe.status) > 0) { - pr_err("%s: config cmd failed [%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_afe.status))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_afe.status)); + __func__, port, param_hdr.param_id, ret); goto exit; } memcpy(timing_stats, &this_afe.av_dev_drift_resp.timing_stats, - sizeof(this_afe.av_dev_drift_resp.timing_stats)); + param_hdr.param_size); ret = 0; exit: return ret; @@ -6296,8 +5710,9 @@ exit: int afe_spk_prot_get_calib_data(struct afe_spkr_prot_get_vi_calib *calib_resp) { + struct param_hdr_v3 param_hdr = {0}; + int port = SLIMBUS_4_TX; int ret = -EINVAL; - int index = 0, port = SLIMBUS_4_TX; if (!calib_resp) { pr_err("%s: Invalid params\n", __func__); @@ -6306,60 +5721,15 @@ int afe_spk_prot_get_calib_data(struct afe_spkr_prot_get_vi_calib *calib_resp) if (this_afe.vi_tx_port != -1) port = this_afe.vi_tx_port; - ret = q6audio_validate_port(port); - if (ret < 0) { - pr_err("%s: invalid port 0x%x ret %d\n", __func__, port, ret); - ret = -EINVAL; - goto fail_cmd; - } - index = q6audio_get_port_index(port); - if (index < 0 || index >= AFE_MAX_PORTS) { - pr_err("%s: AFE port index[%d] invalid!\n", - __func__, index); - ret = -EINVAL; - goto fail_cmd; - } - calib_resp->hdr.hdr_field = - APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); - calib_resp->hdr.pkt_size = sizeof(*calib_resp); - calib_resp->hdr.src_port = 0; - calib_resp->hdr.dest_port = 0; - calib_resp->hdr.token = index; - calib_resp->hdr.opcode = AFE_PORT_CMD_GET_PARAM_V2; - calib_resp->get_param.mem_map_handle = 0; - calib_resp->get_param.module_id = AFE_MODULE_FB_SPKR_PROT_VI_PROC_V2; - calib_resp->get_param.param_id = AFE_PARAM_ID_CALIB_RES_CFG_V2; - calib_resp->get_param.payload_address_lsw = 0; - calib_resp->get_param.payload_address_msw = 0; - calib_resp->get_param.payload_size = sizeof(*calib_resp) - - sizeof(calib_resp->get_param) - sizeof(calib_resp->hdr); - calib_resp->get_param.port_id = q6audio_get_port_id(port); - calib_resp->pdata.module_id = AFE_MODULE_FB_SPKR_PROT_VI_PROC_V2; - calib_resp->pdata.param_id = AFE_PARAM_ID_CALIB_RES_CFG_V2; - calib_resp->pdata.param_size = sizeof(calib_resp->res_cfg); - atomic_set(&this_afe.status, 0); - atomic_set(&this_afe.state, 1); - ret = apr_send_pkt(this_afe.apr, (uint32_t *)calib_resp); + param_hdr.module_id = AFE_MODULE_FB_SPKR_PROT_VI_PROC_V2; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AFE_PARAM_ID_CALIB_RES_CFG_V2; + param_hdr.param_size = sizeof(struct afe_spkr_prot_get_vi_calib); + + ret = q6afe_get_params(port, NULL, ¶m_hdr); if (ret < 0) { pr_err("%s: get param port 0x%x param id[0x%x]failed %d\n", - __func__, port, calib_resp->get_param.param_id, ret); - goto fail_cmd; - } - ret = wait_event_timeout(this_afe.wait[index], - (atomic_read(&this_afe.state) == 0), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", __func__); - ret = -EINVAL; - goto fail_cmd; - } - if (atomic_read(&this_afe.status) > 0) { - pr_err("%s: config cmd failed [%s]\n", - __func__, adsp_err_get_err_str( - atomic_read(&this_afe.status))); - ret = adsp_err_get_lnx_err_code( - atomic_read(&this_afe.status)); + __func__, port, param_hdr.param_id, ret); goto fail_cmd; } memcpy(&calib_resp->res_cfg , &this_afe.calib_data.res_cfg, diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c index c3d86e6cced2..201234a25bd9 100644 --- a/sound/soc/msm/qdsp6v2/q6asm.c +++ b/sound/soc/msm/qdsp6v2/q6asm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public @@ -41,6 +41,7 @@ #include <sound/audio_cal_utils.h> #include <sound/adsp_err.h> #include <sound/compress_params.h> +#include <sound/q6common.h> #define TRUE 0x01 #define FALSE 0x00 @@ -92,8 +93,13 @@ struct asm_mmap { }; static struct asm_mmap this_mmap; + +struct audio_session { + struct audio_client *ac; + spinlock_t session_lock; +}; /* session id: 0 reserved */ -static struct audio_client *session[ASM_ACTIVE_STREAMS_ALLOWED + 1]; +static struct audio_session session[ASM_ACTIVE_STREAMS_ALLOWED + 1]; struct asm_buffer_node { struct list_head list; @@ -545,8 +551,8 @@ static int q6asm_session_alloc(struct audio_client *ac) { int n; for (n = 1; n <= ASM_ACTIVE_STREAMS_ALLOWED; n++) { - if (!session[n]) { - session[n] = ac; + if (!(session[n].ac)) { + session[n].ac = ac; return n; } } @@ -554,24 +560,39 @@ static int q6asm_session_alloc(struct audio_client *ac) return -ENOMEM; } -static bool q6asm_is_valid_audio_client(struct audio_client *ac) +static int q6asm_get_session_id_from_audio_client(struct audio_client *ac) { int n; for (n = 1; n <= ASM_ACTIVE_STREAMS_ALLOWED; n++) { - if (session[n] == ac) - return 1; + if (session[n].ac == ac) + return n; } return 0; } +static bool q6asm_is_valid_audio_client(struct audio_client *ac) +{ + return q6asm_get_session_id_from_audio_client(ac) ? 1 : 0; +} + static void q6asm_session_free(struct audio_client *ac) { + int session_id; + unsigned long flags; + pr_debug("%s: sessionid[%d]\n", __func__, ac->session); + session_id = ac->session; rtac_remove_popp_from_adm_devices(ac->session); - session[ac->session] = 0; + spin_lock_irqsave(&(session[session_id].session_lock), flags); + session[ac->session].ac = NULL; ac->session = 0; ac->perf_mode = LEGACY_PCM_MODE; ac->fptr_cache_ops = NULL; + ac->cb = NULL; + ac->priv = NULL; + kfree(ac); + ac = NULL; + spin_unlock_irqrestore(&(session[session_id].session_lock), flags); return; } @@ -1083,8 +1104,6 @@ void q6asm_audio_client_free(struct audio_client *ac) pr_debug("%s: APR De-Register\n", __func__); /*done:*/ - kfree(ac); - ac = NULL; mutex_unlock(&session_lock); return; @@ -1219,6 +1238,7 @@ struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv) if (n <= 0) { pr_err("%s: ASM Session alloc fail n=%d\n", __func__, n); mutex_unlock(&session_lock); + kfree(ac); goto fail_session; } ac->session = n; @@ -1295,7 +1315,6 @@ fail_apr2: fail_apr1: q6asm_session_free(ac); fail_session: - kfree(ac); return NULL; } @@ -1310,11 +1329,11 @@ struct audio_client *q6asm_get_audio_client(int session_id) goto err; } - if (!session[session_id]) { + if (!(session[session_id].ac)) { pr_err("%s: session not active: %d\n", __func__, session_id); goto err; } - return session[session_id]; + return session[session_id].ac; err: return NULL; } @@ -1518,6 +1537,7 @@ static int32_t q6asm_srvc_callback(struct apr_client_data *data, void *priv) uint32_t i = IN; uint32_t *payload; unsigned long dsp_flags; + unsigned long flags; struct asm_buffer_node *buf_node = NULL; struct list_head *ptr, *next; union asm_token_struct asm_token; @@ -1525,6 +1545,8 @@ static int32_t q6asm_srvc_callback(struct apr_client_data *data, void *priv) struct audio_client *ac = NULL; struct audio_port_data *port; + int session_id; + if (!data) { pr_err("%s: Invalid CB\n", __func__); return 0; @@ -1565,13 +1587,23 @@ static int32_t q6asm_srvc_callback(struct apr_client_data *data, void *priv) rtac_clear_mapping(ASM_RTAC_CAL); return 0; } + asm_token.token = data->token; - ac = q6asm_get_audio_client(asm_token._token.session_id); + session_id = asm_token._token.session_id; + + if ((session_id > 0 && session_id <= ASM_ACTIVE_STREAMS_ALLOWED)) + spin_lock_irqsave(&(session[session_id].session_lock), flags); + + ac = q6asm_get_audio_client(session_id); dir = q6asm_get_flag_from_token(&asm_token, ASM_DIRECTION_OFFSET); if (!ac) { pr_debug("%s: session[%d] already freed\n", - __func__, asm_token._token.session_id); + __func__, session_id); + if ((session_id > 0 && + session_id <= ASM_ACTIVE_STREAMS_ALLOWED)) + spin_unlock_irqrestore( + &(session[session_id].session_lock), flags); return 0; } @@ -1622,6 +1654,10 @@ static int32_t q6asm_srvc_callback(struct apr_client_data *data, void *priv) __func__, payload[0]); break; } + if ((session_id > 0 && + session_id <= ASM_ACTIVE_STREAMS_ALLOWED)) + spin_unlock_irqrestore( + &(session[session_id].session_lock), flags); return 0; } @@ -1656,6 +1692,10 @@ static int32_t q6asm_srvc_callback(struct apr_client_data *data, void *priv) if (ac->cb) ac->cb(data->opcode, data->token, data->payload, ac->priv); + if ((session_id > 0 && session_id <= ASM_ACTIVE_STREAMS_ALLOWED)) + spin_unlock_irqrestore( + &(session[session_id].session_lock), flags); + return 0; } @@ -1723,6 +1763,8 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) uint8_t buf_index; struct msm_adsp_event_data *pp_event_package = NULL; uint32_t payload_size = 0; + unsigned long flags; + int session_id; if (ac == NULL) { pr_err("%s: ac NULL\n", __func__); @@ -1732,15 +1774,21 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) pr_err("%s: data NULL\n", __func__); return -EINVAL; } - if (!q6asm_is_valid_audio_client(ac)) { - pr_err("%s: audio client pointer is invalid, ac = %pK\n", - __func__, ac); + + session_id = q6asm_get_session_id_from_audio_client(ac); + if (session_id <= 0 || session_id > ASM_ACTIVE_STREAMS_ALLOWED) { + pr_err("%s: Session ID is invalid, session = %d\n", __func__, + session_id); return -EINVAL; } - if (ac->session <= 0 || ac->session > ASM_ACTIVE_STREAMS_ALLOWED) { - pr_err("%s: Session ID is invalid, session = %d\n", __func__, - ac->session); + spin_lock_irqsave(&(session[session_id].session_lock), flags); + + if (!q6asm_is_valid_audio_client(ac)) { + pr_err("%s: audio client pointer is invalid, ac = %pK\n", + __func__, ac); + spin_unlock_irqrestore( + &(session[session_id].session_lock), flags); return -EINVAL; } @@ -1753,7 +1801,6 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) } if (data->opcode == RESET_EVENTS) { - mutex_lock(&ac->cmd_lock); atomic_set(&ac->reset, 1); if (ac->apr == NULL) { ac->apr = ac->apr2; @@ -1774,7 +1821,8 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) wake_up(&ac->time_wait); wake_up(&ac->cmd_wait); wake_up(&ac->mem_wait); - mutex_unlock(&ac->cmd_lock); + spin_unlock_irqrestore( + &(session[session_id].session_lock), flags); return 0; } @@ -1788,6 +1836,8 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) (data->opcode != ASM_SESSION_EVENT_RX_UNDERFLOW)) { if (payload == NULL) { pr_err("%s: payload is null\n", __func__); + spin_unlock_irqrestore( + &(session[session_id].session_lock), flags); return -EINVAL; } dev_vdbg(ac->dev, "%s: Payload = [0x%x] status[0x%x] opcode 0x%x\n", @@ -1796,6 +1846,7 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) if (data->opcode == APR_BASIC_RSP_RESULT) { switch (payload[0]) { case ASM_STREAM_CMD_SET_PP_PARAMS_V2: + case ASM_STREAM_CMD_SET_PP_PARAMS_V3: if (rtac_make_asm_callback(ac->session, payload, data->payload_size)) break; @@ -1813,6 +1864,8 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) ret = q6asm_is_valid_session(data, priv); if (ret != 0) { pr_err("%s: session invalid %d\n", __func__, ret); + spin_unlock_irqrestore( + &(session[session_id].session_lock), flags); return ret; } case ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2: @@ -1842,9 +1895,12 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) pr_err("%s: cmd = 0x%x returned error = 0x%x\n", __func__, payload[0], payload[1]); if (wakeup_flag) { - if ((is_adsp_reg_event(payload[0]) >= 0) - || (payload[0] == - ASM_STREAM_CMD_SET_PP_PARAMS_V2)) + if ((is_adsp_reg_event(payload[0]) >= + 0) || + (payload[0] == + ASM_STREAM_CMD_SET_PP_PARAMS_V2) || + (payload[0] == + ASM_STREAM_CMD_SET_PP_PARAMS_V3)) atomic_set(&ac->cmd_state_pp, payload[1]); else @@ -1852,10 +1908,14 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) payload[1]); wake_up(&ac->cmd_wait); } + spin_unlock_irqrestore( + &(session[session_id].session_lock), + flags); return 0; } if ((is_adsp_reg_event(payload[0]) >= 0) || - (payload[0] == ASM_STREAM_CMD_SET_PP_PARAMS_V2)) { + (payload[0] == ASM_STREAM_CMD_SET_PP_PARAMS_V2) || + (payload[0] == ASM_STREAM_CMD_SET_PP_PARAMS_V3)) { if (atomic_read(&ac->cmd_state_pp) && wakeup_flag) { atomic_set(&ac->cmd_state_pp, 0); @@ -1882,6 +1942,9 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) atomic_set(&ac->mem_state, payload[1]); wake_up(&ac->mem_wait); } + spin_unlock_irqrestore( + &(session[session_id].session_lock), + flags); return 0; } if (atomic_read(&ac->mem_state) && wakeup_flag) { @@ -1898,10 +1961,10 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) break; } case ASM_STREAM_CMD_GET_PP_PARAMS_V2: - pr_debug("%s: ASM_STREAM_CMD_GET_PP_PARAMS_V2 session %d opcode 0x%x token 0x%x src %d dest %d\n", - __func__, ac->session, - data->opcode, data->token, - data->src_port, data->dest_port); + case ASM_STREAM_CMD_GET_PP_PARAMS_V3: + pr_debug("%s: ASM_STREAM_CMD_GET_PP_PARAMS session %d opcode 0x%x token 0x%x src %d dest %d\n", + __func__, ac->session, data->opcode, + data->token, data->src_port, data->dest_port); /* Should only come here if there is an APR */ /* error or malformed APR packet. Otherwise */ /* response will be returned as */ @@ -1929,6 +1992,9 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) __func__, payload[0]); break; } + + spin_unlock_irqrestore( + &(session[session_id].session_lock), flags); return 0; } @@ -1942,6 +2008,9 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) if (port->buf == NULL) { pr_err("%s: Unexpected Write Done\n", __func__); + spin_unlock_irqrestore( + &(session[session_id].session_lock), + flags); return -EINVAL; } spin_lock_irqsave(&port->dsp_lock, dsp_flags); @@ -1956,6 +2025,9 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) __func__, payload[0], payload[1]); spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); + spin_unlock_irqrestore( + &(session[session_id].session_lock), + flags); return -EINVAL; } port->buf[buf_index].used = 1; @@ -1971,13 +2043,13 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) break; } case ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2: - pr_debug("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 session %d opcode 0x%x token 0x%x src %d dest %d\n", - __func__, ac->session, data->opcode, - data->token, - data->src_port, data->dest_port); + case ASM_STREAM_CMDRSP_GET_PP_PARAMS_V3: + pr_debug("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS session %d opcode 0x%x token 0x%x src %d dest %d\n", + __func__, ac->session, data->opcode, data->token, + data->src_port, data->dest_port); if (payload[0] != 0) { - pr_err("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 returned error = 0x%x\n", - __func__, payload[0]); + pr_err("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS returned error = 0x%x\n", + __func__, payload[0]); } else if (generic_get_data) { generic_get_data->valid = 1; if (generic_get_data->is_inband) { @@ -2026,6 +2098,9 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) if (ac->io_mode & SYNC_IO_MODE) { if (port->buf == NULL) { pr_err("%s: Unexpected Write Done\n", __func__); + spin_unlock_irqrestore( + &(session[session_id].session_lock), + flags); return -EINVAL; } spin_lock_irqsave(&port->dsp_lock, dsp_flags); @@ -2100,8 +2175,11 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) pr_debug("%s: ASM_STREAM_EVENT payload[0][0x%x] payload[1][0x%x]", __func__, payload[0], payload[1]); i = is_adsp_raise_event(data->opcode); - if (i < 0) + if (i < 0) { + spin_unlock_irqrestore( + &(session[session_id].session_lock), flags); return 0; + } /* repack payload for asm_stream_pp_event * package is composed of event type + size + actual payload @@ -2110,8 +2188,11 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) pp_event_package = kzalloc(payload_size + sizeof(struct msm_adsp_event_data), GFP_ATOMIC); - if (!pp_event_package) + if (!pp_event_package) { + spin_unlock_irqrestore( + &(session[session_id].session_lock), flags); return -ENOMEM; + } pp_event_package->event_type = i; pp_event_package->payload_len = payload_size; @@ -2120,6 +2201,8 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) ac->cb(data->opcode, data->token, (void *)pp_event_package, ac->priv); kfree(pp_event_package); + spin_unlock_irqrestore( + &(session[session_id].session_lock), flags); return 0; case ASM_SESSION_CMDRSP_ADJUST_SESSION_CLOCK_V2: pr_debug("%s: ASM_SESSION_CMDRSP_ADJUST_SESSION_CLOCK_V2 sesion %d status 0x%x msw %u lsw %u\n", @@ -2145,7 +2228,8 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) if (ac->cb) ac->cb(data->opcode, data->token, data->payload, ac->priv); - + spin_unlock_irqrestore( + &(session[session_id].session_lock), flags); return 0; } @@ -2321,11 +2405,16 @@ int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac) static void __q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr, uint32_t pkt_size, uint32_t cmd_flg, uint32_t stream_id) { + unsigned long flags; + dev_vdbg(ac->dev, "%s: pkt_size=%d cmd_flg=%d session=%d stream_id=%d\n", __func__, pkt_size, cmd_flg, ac->session, stream_id); mutex_lock(&ac->cmd_lock); + spin_lock_irqsave(&(session[ac->session].session_lock), flags); if (ac->apr == NULL) { pr_err("%s: AC APR handle NULL", __func__); + spin_unlock_irqrestore( + &(session[ac->session].session_lock), flags); mutex_unlock(&ac->cmd_lock); return; } @@ -2348,6 +2437,8 @@ static void __q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr, WAIT_CMD); hdr->pkt_size = pkt_size; + spin_unlock_irqrestore( + &(session[ac->session].session_lock), flags); mutex_unlock(&ac->cmd_lock); return; } @@ -2466,6 +2557,136 @@ static void q6asm_add_mmaphdr(struct audio_client *ac, struct apr_hdr *hdr, return; } +int q6asm_set_pp_params(struct audio_client *ac, + struct mem_mapping_hdr *mem_hdr, u8 *param_data, + u32 param_size) +{ + struct asm_stream_cmd_set_pp_params *asm_set_param = NULL; + int pkt_size = 0; + int ret = 0; + + if (ac == NULL) { + pr_err("%s: Audio Client is NULL\n", __func__); + return -EINVAL; + } else if (ac->apr == NULL) { + pr_err("%s: APR pointer is NULL\n", __func__); + return -EINVAL; + } + + pkt_size = sizeof(struct asm_stream_cmd_set_pp_params); + /* Add param size to packet size when sending in-band only */ + if (param_data != NULL) + pkt_size += param_size; + asm_set_param = kzalloc(pkt_size, GFP_KERNEL); + if (!asm_set_param) + return -ENOMEM; + + q6asm_add_hdr_async(ac, &asm_set_param->apr_hdr, pkt_size, TRUE); + + /* With pre-packed data, only the opcode differes from V2 and V3. */ + if (q6common_is_instance_id_supported()) + asm_set_param->apr_hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V3; + else + asm_set_param->apr_hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; + + asm_set_param->payload_size = param_size; + + if (mem_hdr != NULL) { + /* Out of band case */ + asm_set_param->mem_hdr = *mem_hdr; + } else if (param_data != NULL) { + /* In band case. Parameter data must be pre-packed with its + * header before calling this function. Use + * q6common_pack_pp_params to pack parameter data and header + * correctly. + */ + memcpy(&asm_set_param->param_data, param_data, param_size); + } else { + pr_err("%s: Received NULL pointers for both mem header and param data\n", + __func__); + ret = -EINVAL; + goto done; + } + + atomic_set(&ac->cmd_state_pp, -1); + ret = apr_send_pkt(ac->apr, (uint32_t *) asm_set_param); + if (ret < 0) { + pr_err("%s: apr send failed rc %d\n", __func__, ret); + ret = -EINVAL; + goto done; + } + + ret = wait_event_timeout(ac->cmd_wait, + atomic_read(&ac->cmd_state_pp) >= 0, 5 * HZ); + if (!ret) { + pr_err("%s: timeout sending apr pkt\n", __func__); + ret = -ETIMEDOUT; + goto done; + } + + if (atomic_read(&ac->cmd_state_pp) > 0) { + pr_err("%s: DSP returned error[%s]\n", __func__, + adsp_err_get_err_str(atomic_read(&ac->cmd_state_pp))); + ret = adsp_err_get_lnx_err_code(atomic_read(&ac->cmd_state_pp)); + goto done; + } + ret = 0; +done: + kfree(asm_set_param); + return ret; +} +EXPORT_SYMBOL(q6asm_set_pp_params); + +int q6asm_pack_and_set_pp_param_in_band(struct audio_client *ac, + struct param_hdr_v3 param_hdr, + u8 *param_data) +{ + u8 *packed_data = NULL; + u32 packed_size = sizeof(union param_hdrs) + param_hdr.param_size; + int ret = 0; + + packed_data = kzalloc(packed_size, GFP_KERNEL); + if (packed_data == NULL) + return -ENOMEM; + + ret = q6common_pack_pp_params(packed_data, ¶m_hdr, param_data, + &packed_size); + if (ret) { + pr_err("%s: Failed to pack params, error %d\n", __func__, ret); + goto done; + } + + ret = q6asm_set_pp_params(ac, NULL, packed_data, packed_size); +done: + kfree(packed_data); + return ret; +} +EXPORT_SYMBOL(q6asm_pack_and_set_pp_param_in_band); + +int q6asm_set_soft_volume_module_instance_ids(int instance, + struct param_hdr_v3 *param_hdr) +{ + if (param_hdr == NULL) { + pr_err("%s: Param header is NULL\n", __func__); + return -EINVAL; + } + + switch (instance) { + case SOFT_VOLUME_INSTANCE_2: + param_hdr->module_id = ASM_MODULE_ID_VOL_CTRL2; + param_hdr->instance_id = INSTANCE_ID_0; + return 0; + case SOFT_VOLUME_INSTANCE_1: + param_hdr->module_id = ASM_MODULE_ID_VOL_CTRL; + param_hdr->instance_id = INSTANCE_ID_0; + return 0; + default: + pr_err("%s: Invalid instance %d\n", __func__, instance); + return -EINVAL; + } +} +EXPORT_SYMBOL(q6asm_set_soft_volume_module_instance_ids); + static int __q6asm_open_read(struct audio_client *ac, uint32_t format, uint16_t bits_per_sample, uint32_t pcm_format_block_ver, @@ -6741,67 +6962,27 @@ fail_cmd: int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain) { struct asm_volume_ctrl_multichannel_gain multi_ch_gain; - int sz = 0; + struct param_hdr_v3 param_info = {0}; int rc = 0; - if (ac == NULL) { - pr_err("%s: APR handle NULL\n", __func__); - rc = -EINVAL; - goto fail_cmd; - } - if (ac->apr == NULL) { - pr_err("%s: AC APR handle NULL\n", __func__); - rc = -EINVAL; - goto fail_cmd; - } - memset(&multi_ch_gain, 0, sizeof(multi_ch_gain)); - sz = sizeof(struct asm_volume_ctrl_multichannel_gain); - q6asm_add_hdr_async(ac, &multi_ch_gain.hdr, sz, TRUE); - atomic_set(&ac->cmd_state_pp, -1); - multi_ch_gain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; - multi_ch_gain.param.data_payload_addr_lsw = 0; - multi_ch_gain.param.data_payload_addr_msw = 0; - multi_ch_gain.param.mem_map_handle = 0; - multi_ch_gain.param.data_payload_size = sizeof(multi_ch_gain) - - sizeof(multi_ch_gain.hdr) - sizeof(multi_ch_gain.param); - multi_ch_gain.data.module_id = ASM_MODULE_ID_VOL_CTRL; - multi_ch_gain.data.param_id = ASM_PARAM_ID_MULTICHANNEL_GAIN; - multi_ch_gain.data.param_size = multi_ch_gain.param.data_payload_size - - sizeof(multi_ch_gain.data); - multi_ch_gain.data.reserved = 0; + + param_info.module_id = ASM_MODULE_ID_VOL_CTRL; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = ASM_PARAM_ID_MULTICHANNEL_GAIN; + param_info.param_size = sizeof(multi_ch_gain); + multi_ch_gain.gain_data[0].channeltype = PCM_CHANNEL_FL; multi_ch_gain.gain_data[0].gain = left_gain << 15; multi_ch_gain.gain_data[1].channeltype = PCM_CHANNEL_FR; multi_ch_gain.gain_data[1].gain = right_gain << 15; multi_ch_gain.num_channels = 2; - rc = apr_send_pkt(ac->apr, (uint32_t *) &multi_ch_gain); - if (rc < 0) { + rc = q6asm_pack_and_set_pp_param_in_band(ac, param_info, + (u8 *) &multi_ch_gain); + if (rc < 0) pr_err("%s: set-params send failed paramid[0x%x] rc %d\n", - __func__, multi_ch_gain.data.param_id, rc); - rc = -EINVAL; - goto fail_cmd; - } + __func__, param_info.param_id, rc); - rc = wait_event_timeout(ac->cmd_wait, - (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ); - if (!rc) { - pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, - multi_ch_gain.data.param_id); - rc = -ETIMEDOUT; - goto fail_cmd; - } - if (atomic_read(&ac->cmd_state_pp) > 0) { - pr_err("%s: DSP returned error[%s] , set-params paramid[0x%x]\n", - __func__, adsp_err_get_err_str( - atomic_read(&ac->cmd_state_pp)), - multi_ch_gain.data.param_id); - rc = adsp_err_get_lnx_err_code( - atomic_read(&ac->cmd_state_pp)); - goto fail_cmd; - } - rc = 0; -fail_cmd: return rc; } @@ -6817,20 +6998,14 @@ int q6asm_set_multich_gain(struct audio_client *ac, uint32_t channels, uint32_t *gains, uint8_t *ch_map, bool use_default) { struct asm_volume_ctrl_multichannel_gain multich_gain; - int sz = 0; + struct param_hdr_v3 param_info = {0}; int rc = 0; int i; u8 default_chmap[VOLUME_CONTROL_MAX_CHANNELS]; if (ac == NULL) { - pr_err("%s: ac is NULL\n", __func__); - rc = -EINVAL; - goto done; - } - if (ac->apr == NULL) { - dev_err(ac->dev, "%s: AC APR handle NULL\n", __func__); - rc = -EINVAL; - goto done; + pr_err("%s: Audio client is NULL\n", __func__); + return -EINVAL; } if (gains == NULL) { dev_err(ac->dev, "%s: gain_list is NULL\n", __func__); @@ -6850,20 +7025,10 @@ int q6asm_set_multich_gain(struct audio_client *ac, uint32_t channels, } memset(&multich_gain, 0, sizeof(multich_gain)); - sz = sizeof(struct asm_volume_ctrl_multichannel_gain); - q6asm_add_hdr_async(ac, &multich_gain.hdr, sz, TRUE); - atomic_set(&ac->cmd_state_pp, -1); - multich_gain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; - multich_gain.param.data_payload_addr_lsw = 0; - multich_gain.param.data_payload_addr_msw = 0; - multich_gain.param.mem_map_handle = 0; - multich_gain.param.data_payload_size = sizeof(multich_gain) - - sizeof(multich_gain.hdr) - sizeof(multich_gain.param); - multich_gain.data.module_id = ASM_MODULE_ID_VOL_CTRL; - multich_gain.data.param_id = ASM_PARAM_ID_MULTICHANNEL_GAIN; - multich_gain.data.param_size = multich_gain.param.data_payload_size - - sizeof(multich_gain.data); - multich_gain.data.reserved = 0; + param_info.module_id = ASM_MODULE_ID_VOL_CTRL; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = ASM_PARAM_ID_MULTICHANNEL_GAIN; + param_info.param_size = sizeof(multich_gain); if (use_default) { rc = q6asm_map_channels(default_chmap, channels, false); @@ -6882,166 +7047,56 @@ int q6asm_set_multich_gain(struct audio_client *ac, uint32_t channels, } multich_gain.num_channels = channels; - rc = apr_send_pkt(ac->apr, (uint32_t *) &multich_gain); - if (rc < 0) { + rc = q6asm_pack_and_set_pp_param_in_band(ac, param_info, + (u8 *) &multich_gain); + if (rc) pr_err("%s: set-params send failed paramid[0x%x] rc %d\n", - __func__, multich_gain.data.param_id, rc); - goto done; - } - - rc = wait_event_timeout(ac->cmd_wait, - (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ); - if (!rc) { - pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, - multich_gain.data.param_id); - rc = -EINVAL; - goto done; - } - if (atomic_read(&ac->cmd_state_pp) > 0) { - pr_err("%s: DSP returned error[%d] , set-params paramid[0x%x]\n", - __func__, atomic_read(&ac->cmd_state_pp), - multich_gain.data.param_id); - rc = -EINVAL; - goto done; - } - rc = 0; + __func__, param_info.param_id, rc); done: return rc; } int q6asm_set_mute(struct audio_client *ac, int muteflag) { - struct asm_volume_ctrl_mute_config mute; - int sz = 0; + struct asm_volume_ctrl_mute_config mute = {0}; + struct param_hdr_v3 param_info = {0}; int rc = 0; - if (ac == NULL) { - pr_err("%s: APR handle NULL\n", __func__); - rc = -EINVAL; - goto fail_cmd; - } - if (ac->apr == NULL) { - pr_err("%s: AC APR handle NULL\n", __func__); - rc = -EINVAL; - goto fail_cmd; - } - - sz = sizeof(struct asm_volume_ctrl_mute_config); - q6asm_add_hdr_async(ac, &mute.hdr, sz, TRUE); - atomic_set(&ac->cmd_state_pp, -1); - mute.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; - mute.param.data_payload_addr_lsw = 0; - mute.param.data_payload_addr_msw = 0; - mute.param.mem_map_handle = 0; - mute.param.data_payload_size = sizeof(mute) - - sizeof(mute.hdr) - sizeof(mute.param); - mute.data.module_id = ASM_MODULE_ID_VOL_CTRL; - mute.data.param_id = ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG; - mute.data.param_size = mute.param.data_payload_size - sizeof(mute.data); - mute.data.reserved = 0; + param_info.module_id = ASM_MODULE_ID_VOL_CTRL; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG; + param_info.param_size = sizeof(mute); mute.mute_flag = muteflag; - rc = apr_send_pkt(ac->apr, (uint32_t *) &mute); - if (rc < 0) { + rc = q6asm_pack_and_set_pp_param_in_band(ac, param_info, (u8 *) &mute); + if (rc) pr_err("%s: set-params send failed paramid[0x%x] rc %d\n", - __func__, mute.data.param_id, rc); - rc = -EINVAL; - goto fail_cmd; - } - - rc = wait_event_timeout(ac->cmd_wait, - (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ); - if (!rc) { - pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, - mute.data.param_id); - rc = -ETIMEDOUT; - goto fail_cmd; - } - if (atomic_read(&ac->cmd_state_pp) > 0) { - pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n", - __func__, adsp_err_get_err_str( - atomic_read(&ac->cmd_state_pp)), - mute.data.param_id); - rc = adsp_err_get_lnx_err_code( - atomic_read(&ac->cmd_state_pp)); - goto fail_cmd; - } - rc = 0; -fail_cmd: + __func__, param_info.param_id, rc); return rc; } static int __q6asm_set_volume(struct audio_client *ac, int volume, int instance) { - struct asm_volume_ctrl_master_gain vol; - int sz = 0; - int rc = 0; - int module_id; - - if (ac == NULL) { - pr_err("%s: APR handle NULL\n", __func__); - rc = -EINVAL; - goto fail_cmd; - } - if (ac->apr == NULL) { - pr_err("%s: AC APR handle NULL\n", __func__); - rc = -EINVAL; - goto fail_cmd; - } + struct asm_volume_ctrl_master_gain vol = {0}; + struct param_hdr_v3 param_info = {0}; + int rc = 0; - switch (instance) { - case SOFT_VOLUME_INSTANCE_2: - module_id = ASM_MODULE_ID_VOL_CTRL2; - break; - case SOFT_VOLUME_INSTANCE_1: - default: - module_id = ASM_MODULE_ID_VOL_CTRL; - break; + rc = q6asm_set_soft_volume_module_instance_ids(instance, ¶m_info); + if (rc) { + pr_err("%s: Failed to pack soft volume module and instance IDs, error %d\n", + __func__, rc); + return rc; } - sz = sizeof(struct asm_volume_ctrl_master_gain); - q6asm_add_hdr_async(ac, &vol.hdr, sz, TRUE); - atomic_set(&ac->cmd_state_pp, -1); - vol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; - vol.param.data_payload_addr_lsw = 0; - vol.param.data_payload_addr_msw = 0; - vol.param.mem_map_handle = 0; - vol.param.data_payload_size = sizeof(vol) - - sizeof(vol.hdr) - sizeof(vol.param); - vol.data.module_id = module_id; - vol.data.param_id = ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN; - vol.data.param_size = vol.param.data_payload_size - sizeof(vol.data); - vol.data.reserved = 0; + param_info.param_id = ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN; + param_info.param_size = sizeof(vol); vol.master_gain = volume; - rc = apr_send_pkt(ac->apr, (uint32_t *) &vol); - if (rc < 0) { + rc = q6asm_pack_and_set_pp_param_in_band(ac, param_info, (u8 *) &vol); + if (rc) pr_err("%s: set-params send failed paramid[0x%x] rc %d\n", - __func__, vol.data.param_id, rc); - rc = -EINVAL; - goto fail_cmd; - } - - rc = wait_event_timeout(ac->cmd_wait, - (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ); - if (!rc) { - pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, - vol.data.param_id); - rc = -ETIMEDOUT; - goto fail_cmd; - } - if (atomic_read(&ac->cmd_state_pp) > 0) { - pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n", - __func__, adsp_err_get_err_str( - atomic_read(&ac->cmd_state_pp)), - vol.data.param_id); - rc = adsp_err_get_lnx_err_code( - atomic_read(&ac->cmd_state_pp)); - goto fail_cmd; - } + __func__, param_info.param_id, rc); - rc = 0; -fail_cmd: return rc; } @@ -7240,68 +7295,26 @@ done: int q6asm_set_softpause(struct audio_client *ac, struct asm_softpause_params *pause_param) { - struct asm_soft_pause_params softpause; - int sz = 0; + struct asm_soft_pause_params softpause = {0}; + struct param_hdr_v3 param_info = {0}; int rc = 0; - if (ac == NULL) { - pr_err("%s: APR handle NULL\n", __func__); - rc = -EINVAL; - goto fail_cmd; - } - if (ac->apr == NULL) { - pr_err("%s: AC APR handle NULL\n", __func__); - rc = -EINVAL; - goto fail_cmd; - } + param_info.module_id = ASM_MODULE_ID_VOL_CTRL; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = ASM_PARAM_ID_SOFT_PAUSE_PARAMETERS; + param_info.param_size = sizeof(softpause); - sz = sizeof(struct asm_soft_pause_params); - q6asm_add_hdr_async(ac, &softpause.hdr, sz, TRUE); - atomic_set(&ac->cmd_state_pp, -1); - softpause.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; - - softpause.param.data_payload_addr_lsw = 0; - softpause.param.data_payload_addr_msw = 0; - softpause.param.mem_map_handle = 0; - softpause.param.data_payload_size = sizeof(softpause) - - sizeof(softpause.hdr) - sizeof(softpause.param); - softpause.data.module_id = ASM_MODULE_ID_VOL_CTRL; - softpause.data.param_id = ASM_PARAM_ID_SOFT_PAUSE_PARAMETERS; - softpause.data.param_size = softpause.param.data_payload_size - - sizeof(softpause.data); - softpause.data.reserved = 0; softpause.enable_flag = pause_param->enable; softpause.period = pause_param->period; softpause.step = pause_param->step; softpause.ramping_curve = pause_param->rampingcurve; - rc = apr_send_pkt(ac->apr, (uint32_t *) &softpause); - if (rc < 0) { + rc = q6asm_pack_and_set_pp_param_in_band(ac, param_info, + (u8 *) &softpause); + if (rc) pr_err("%s: set-params send failed paramid[0x%x] rc %d\n", - __func__, softpause.data.param_id, rc); - rc = -EINVAL; - goto fail_cmd; - } + __func__, param_info.param_id, rc); - rc = wait_event_timeout(ac->cmd_wait, - (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ); - if (!rc) { - pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, - softpause.data.param_id); - rc = -ETIMEDOUT; - goto fail_cmd; - } - if (atomic_read(&ac->cmd_state_pp) > 0) { - pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n", - __func__, adsp_err_get_err_str( - atomic_read(&ac->cmd_state_pp)), - softpause.data.param_id); - rc = adsp_err_get_lnx_err_code( - atomic_read(&ac->cmd_state_pp)); - goto fail_cmd; - } - rc = 0; -fail_cmd: return rc; } @@ -7309,77 +7322,30 @@ static int __q6asm_set_softvolume(struct audio_client *ac, struct asm_softvolume_params *softvol_param, int instance) { - struct asm_soft_step_volume_params softvol; - int sz = 0; - int rc = 0; - int module_id; + struct asm_soft_step_volume_params softvol = {0}; + struct param_hdr_v3 param_info = {0}; + int rc = 0; - if (ac == NULL) { - pr_err("%s: APR handle NULL\n", __func__); - rc = -EINVAL; - goto fail_cmd; - } - if (ac->apr == NULL) { - pr_err("%s: AC APR handle NULL\n", __func__); - rc = -EINVAL; - goto fail_cmd; + rc = q6asm_set_soft_volume_module_instance_ids(instance, ¶m_info); + if (rc) { + pr_err("%s: Failed to pack soft volume module and instance IDs, error %d\n", + __func__, rc); + return rc; } - switch (instance) { - case SOFT_VOLUME_INSTANCE_2: - module_id = ASM_MODULE_ID_VOL_CTRL2; - break; - case SOFT_VOLUME_INSTANCE_1: - default: - module_id = ASM_MODULE_ID_VOL_CTRL; - break; - } + param_info.param_id = ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS; + param_info.param_size = sizeof(softvol); - sz = sizeof(struct asm_soft_step_volume_params); - q6asm_add_hdr_async(ac, &softvol.hdr, sz, TRUE); - atomic_set(&ac->cmd_state_pp, -1); - softvol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; - softvol.param.data_payload_addr_lsw = 0; - softvol.param.data_payload_addr_msw = 0; - softvol.param.mem_map_handle = 0; - softvol.param.data_payload_size = sizeof(softvol) - - sizeof(softvol.hdr) - sizeof(softvol.param); - softvol.data.module_id = module_id; - softvol.data.param_id = ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS; - softvol.data.param_size = softvol.param.data_payload_size - - sizeof(softvol.data); - softvol.data.reserved = 0; softvol.period = softvol_param->period; softvol.step = softvol_param->step; softvol.ramping_curve = softvol_param->rampingcurve; - rc = apr_send_pkt(ac->apr, (uint32_t *) &softvol); - if (rc < 0) { + rc = q6asm_pack_and_set_pp_param_in_band(ac, param_info, + (u8 *) &softvol); + if (rc) pr_err("%s: set-params send failed paramid[0x%x] rc %d\n", - __func__, softvol.data.param_id, rc); - rc = -EINVAL; - goto fail_cmd; - } + __func__, param_info.param_id, rc); - rc = wait_event_timeout(ac->cmd_wait, - (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ); - if (!rc) { - pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, - softvol.data.param_id); - rc = -ETIMEDOUT; - goto fail_cmd; - } - if (atomic_read(&ac->cmd_state_pp) > 0) { - pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n", - __func__, adsp_err_get_err_str( - atomic_read(&ac->cmd_state_pp)), - softvol.data.param_id); - rc = adsp_err_get_lnx_err_code( - atomic_read(&ac->cmd_state_pp)); - goto fail_cmd; - } - rc = 0; -fail_cmd: return rc; } @@ -7400,334 +7366,156 @@ int q6asm_set_softvolume_v2(struct audio_client *ac, int q6asm_set_vol_ctrl_gain_pair(struct audio_client *ac, struct asm_stream_pan_ctrl_params *pan_param) { - int sz = 0; - int rc = 0; + struct audproc_volume_ctrl_multichannel_gain gain_data; + struct param_hdr_v3 param_hdr = {0}; + int num_out_ch = 0; int i = 0; - int32_t ch = 0; - struct apr_hdr hdr; - struct audproc_volume_ctrl_channel_type_gain_pair - gain_data[ASM_MAX_CHANNELS]; - struct asm_stream_cmd_set_pp_params_v2 payload_params; - struct asm_stream_param_data_v2 data; - uint16_t *asm_params = NULL; - - if (ac == NULL) { - pr_err("%s: ac is NULL\n", __func__); - rc = -EINVAL; - goto fail; - } - if (ac->apr == NULL) { - dev_err(ac->dev, "%s: ac apr handle NULL\n", __func__); - rc = -EINVAL; - goto fail; - } + int rc = 0; - sz = sizeof(struct apr_hdr) + - sizeof(struct asm_stream_cmd_set_pp_params_v2) + - sizeof(struct asm_stream_param_data_v2) + - sizeof(uint32_t) + - (sizeof(struct audproc_volume_ctrl_channel_type_gain_pair) * - ASM_MAX_CHANNELS); - asm_params = kzalloc(sz, GFP_KERNEL); - if (!asm_params) { - rc = -ENOMEM; - goto fail; + if (pan_param == NULL) { + pr_err("%s: Pan parameter is NULL\n", __func__); + return -EINVAL; } - q6asm_add_hdr_async(ac, &hdr, sz, TRUE); - atomic_set(&ac->cmd_state_pp, -1); + memset(&gain_data, 0, sizeof(gain_data)); - hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; - memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr)); - - payload_params.data_payload_addr_lsw = 0; - payload_params.data_payload_addr_msw = 0; - payload_params.mem_map_handle = 0; - payload_params.data_payload_size = - sizeof(struct asm_stream_param_data_v2) + - sizeof(uint32_t) + - (sizeof(struct audproc_volume_ctrl_channel_type_gain_pair) * - ASM_MAX_CHANNELS); - memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)), - &payload_params, - sizeof(struct asm_stream_cmd_set_pp_params_v2)); - - data.module_id = AUDPROC_MODULE_ID_VOL_CTRL; - data.param_id = AUDPROC_PARAM_ID_MULTICHANNEL_GAIN; - data.param_size = sizeof(uint32_t) + - (sizeof(struct audproc_volume_ctrl_channel_type_gain_pair) * - ASM_MAX_CHANNELS); - data.reserved = 0; - memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + - sizeof(struct asm_stream_cmd_set_pp_params_v2)), - &data, sizeof(struct asm_stream_param_data_v2)); - - ch = pan_param->num_output_channels; - memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + - sizeof(struct asm_stream_cmd_set_pp_params_v2) + - sizeof(struct asm_stream_param_data_v2)), - &ch, - sizeof(uint32_t)); - - memset(gain_data, 0, - ASM_MAX_CHANNELS * - sizeof(struct audproc_volume_ctrl_channel_type_gain_pair)); - for (i = 0; i < pan_param->num_output_channels; i++) { - gain_data[i].channel_type = - pan_param->output_channel_map[i]; - gain_data[i].gain = pan_param->gain[i]; - } - memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + - sizeof(struct asm_stream_cmd_set_pp_params_v2) + - sizeof(struct asm_stream_param_data_v2) + - sizeof(uint32_t)), - gain_data, - ASM_MAX_CHANNELS * - sizeof(struct audproc_volume_ctrl_channel_type_gain_pair)); + param_hdr.module_id = AUDPROC_MODULE_ID_VOL_CTRL; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AUDPROC_PARAM_ID_MULTICHANNEL_GAIN; + param_hdr.param_size = sizeof(gain_data); - rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params); - if (rc < 0) { - pr_err("%s: set-params send failed paramid[0x%x] rc %d\n", - __func__, data.param_id, rc); - goto done; + num_out_ch = pan_param->num_output_channels; + if (num_out_ch > ASM_MAX_CHANNELS) { + pr_err("%s: Invalid number of output channels %d\n", __func__, + num_out_ch); + return -EINVAL; } + gain_data.num_channels = num_out_ch; - rc = wait_event_timeout(ac->cmd_wait, - (atomic_read(&ac->cmd_state_pp) >= 0), 5 * HZ); - if (!rc) { - pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, - data.param_id); - rc = -EINVAL; - goto done; + for (i = 0; i < num_out_ch; i++) { + gain_data.gain_data[i].channel_type = + pan_param->output_channel_map[i]; + gain_data.gain_data[i].gain = pan_param->gain[i]; } - if (atomic_read(&ac->cmd_state_pp) > 0) { - pr_err("%s: DSP returned error[%d], set-params paramid[0x%x]\n", - __func__, atomic_read(&ac->cmd_state_pp), - data.param_id); - rc = -EINVAL; - goto done; - } - rc = 0; -done: - kfree(asm_params); -fail: + + rc = q6asm_pack_and_set_pp_param_in_band(ac, param_hdr, + (uint8_t *) &gain_data); + if (rc < 0) + pr_err("%s: set-params send failed paramid[0x%x] rc %d\n", + __func__, param_hdr.param_id, rc); return rc; } int q6asm_set_mfc_panning_params(struct audio_client *ac, struct asm_stream_pan_ctrl_params *pan_param) { - int sz, rc, i; - struct audproc_mfc_output_media_fmt mfc_cfg; - struct apr_hdr hdr; - struct asm_stream_cmd_set_pp_params_v2 payload_params; - struct asm_stream_param_data_v2 data; - struct audproc_chmixer_param_coeff pan_cfg; - uint16_t variable_payload = 0; - char *asm_params = NULL; - uint16_t ii; - uint16_t *dst_gain_ptr = NULL; - sz = rc = i = 0; - if (ac == NULL) { - pr_err("%s: ac handle NULL\n", __func__); - rc = -EINVAL; - goto fail_cmd1; - } - if (ac->apr == NULL) { - pr_err("%s: ac apr handle NULL\n", __func__); - rc = -EINVAL; - goto fail_cmd1; - } + struct audproc_mfc_param_media_fmt mfc_cfg = {0}; + struct audproc_chmixer_param_coeff *chmixer_cfg; + struct param_hdr_v3 param_hdr = {0}; + u16 *dst_gain_ptr = NULL; + int num_out_ch; + int num_in_ch; + int chmixer_cfg_size = 0; + int packed_data_size = 0; + int out_ch_map_size; + int in_ch_map_size; + int gain_size; + int i = 0; + int rc = 0; + + if (!pan_param) + return -EINVAL; + + num_out_ch = pan_param->num_output_channels; + num_in_ch = pan_param->num_input_channels; + + param_hdr.module_id = AUDPROC_MODULE_ID_MFC; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT; + param_hdr.param_size = sizeof(mfc_cfg); - sz = sizeof(struct audproc_mfc_output_media_fmt); - q6asm_add_hdr_async(ac, &mfc_cfg.params.hdr, sz, TRUE); - atomic_set(&ac->cmd_state_pp, -1); - mfc_cfg.params.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; - mfc_cfg.params.payload_addr_lsw = 0; - mfc_cfg.params.payload_addr_msw = 0; - mfc_cfg.params.mem_map_handle = 0; - mfc_cfg.params.payload_size = sizeof(mfc_cfg) - sizeof(mfc_cfg.params); - mfc_cfg.data.module_id = AUDPROC_MODULE_ID_MFC; - mfc_cfg.data.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT; - mfc_cfg.data.param_size = mfc_cfg.params.payload_size - - sizeof(mfc_cfg.data); - mfc_cfg.data.reserved = 0; mfc_cfg.sampling_rate = 0; mfc_cfg.bits_per_sample = 0; - mfc_cfg.num_channels = pan_param->num_output_channels; - for (i = 0; i < mfc_cfg.num_channels; i++) + mfc_cfg.num_channels = num_out_ch; + for (i = 0; i < num_out_ch; i++) mfc_cfg.channel_type[i] = pan_param->output_channel_map[i]; - rc = apr_send_pkt(ac->apr, (uint32_t *) &mfc_cfg); + rc = q6asm_pack_and_set_pp_param_in_band(ac, param_hdr, + (uint8_t *) &mfc_cfg); if (rc < 0) { pr_err("%s: set-params send failed paramid[0x%x] rc %d\n", - __func__, mfc_cfg.data.param_id, rc); - rc = -EINVAL; - goto fail_cmd1; + __func__, param_hdr.param_id, rc); + return rc; } - rc = wait_event_timeout(ac->cmd_wait, - (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ); - if (!rc) { - pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, - mfc_cfg.data.param_id); - rc = -ETIMEDOUT; - goto fail_cmd1; - } - if (atomic_read(&ac->cmd_state_pp) > 0) { - pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n", - __func__, adsp_err_get_err_str( - atomic_read(&ac->cmd_state_pp)), - mfc_cfg.data.param_id); - rc = adsp_err_get_lnx_err_code( - atomic_read(&ac->cmd_state_pp)); - goto fail_cmd1; - } + out_ch_map_size = num_out_ch * sizeof(uint16_t); + in_ch_map_size = num_in_ch * sizeof(uint16_t); + gain_size = num_out_ch * num_in_ch * sizeof(uint16_t); - variable_payload = pan_param->num_output_channels * sizeof(uint16_t)+ - pan_param->num_input_channels * sizeof(uint16_t) + - pan_param->num_output_channels * - pan_param->num_input_channels * sizeof(uint16_t); - i = (variable_payload % sizeof(uint32_t)); - variable_payload += (i == 0) ? 0 : sizeof(uint32_t) - i; - sz = sizeof(struct apr_hdr) + - sizeof(struct asm_stream_cmd_set_pp_params_v2) + - sizeof(struct asm_stream_param_data_v2) + - sizeof(struct audproc_chmixer_param_coeff) + - variable_payload; + chmixer_cfg_size = sizeof(struct audproc_chmixer_param_coeff) + + out_ch_map_size + in_ch_map_size + gain_size; + chmixer_cfg = kzalloc(chmixer_cfg_size, GFP_KERNEL); + if (!chmixer_cfg) + return -ENOMEM; - asm_params = kzalloc(sz, GFP_KERNEL); - if (!asm_params) { - rc = -ENOMEM; - goto fail_cmd1; - } + param_hdr.module_id = AUDPROC_MODULE_ID_MFC; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = AUDPROC_CHMIXER_PARAM_ID_COEFF; + param_hdr.param_size = chmixer_cfg_size; - q6asm_add_hdr_async(ac, &hdr, sz, TRUE); - atomic_set(&ac->cmd_state_pp, -1); - hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; - memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr)); - - payload_params.data_payload_addr_lsw = 0; - payload_params.data_payload_addr_msw = 0; - payload_params.mem_map_handle = 0; - payload_params.data_payload_size = - sizeof(struct audproc_chmixer_param_coeff) + - variable_payload + sizeof(struct asm_stream_param_data_v2); - memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)), - &payload_params, - sizeof(struct asm_stream_cmd_set_pp_params_v2)); - - data.module_id = AUDPROC_MODULE_ID_MFC; - data.param_id = AUDPROC_CHMIXER_PARAM_ID_COEFF; - data.param_size = sizeof(struct audproc_chmixer_param_coeff) + - variable_payload; - data.reserved = 0; - memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + - sizeof(struct asm_stream_cmd_set_pp_params_v2)), - &data, sizeof(struct asm_stream_param_data_v2)); - - pan_cfg.index = 0; - pan_cfg.num_output_channels = pan_param->num_output_channels; - pan_cfg.num_input_channels = pan_param->num_input_channels; - memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + - sizeof(struct asm_stream_cmd_set_pp_params_v2) + - sizeof(struct asm_stream_param_data_v2)), - &pan_cfg, sizeof(struct audproc_chmixer_param_coeff)); - memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + - sizeof(struct asm_stream_cmd_set_pp_params_v2) + - sizeof(struct asm_stream_param_data_v2) + - sizeof(struct audproc_chmixer_param_coeff)), - pan_param->output_channel_map, - pan_param->num_output_channels * sizeof(uint16_t)); - memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + - sizeof(struct asm_stream_cmd_set_pp_params_v2) + - sizeof(struct asm_stream_param_data_v2) + - sizeof(struct audproc_chmixer_param_coeff) + - pan_param->num_output_channels * sizeof(uint16_t)), - pan_param->input_channel_map, - pan_param->num_input_channels * sizeof(uint16_t)); - - dst_gain_ptr = (uint16_t *) ((u8 *)asm_params + sizeof(struct apr_hdr) + - sizeof(struct asm_stream_cmd_set_pp_params_v2) + - sizeof(struct asm_stream_param_data_v2) + - sizeof(struct audproc_chmixer_param_coeff) + - (pan_param->num_output_channels * sizeof(uint16_t)) + - (pan_param->num_input_channels * sizeof(uint16_t))); - for (ii = 0; ii < pan_param->num_output_channels * - pan_param->num_input_channels; ii++) - dst_gain_ptr[ii] = (uint16_t) pan_param->gain[ii]; + chmixer_cfg->index = 0; + chmixer_cfg->num_output_channels = num_out_ch; + chmixer_cfg->num_input_channels = num_in_ch; - rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params); + /* Repack channel data as max channels may not be used */ + memcpy(chmixer_cfg->payload, pan_param->output_channel_map, + out_ch_map_size); + packed_data_size += out_ch_map_size; + + memcpy(chmixer_cfg->payload + packed_data_size, + pan_param->input_channel_map, in_ch_map_size); + packed_data_size += in_ch_map_size; + + dst_gain_ptr = (uint16_t *) chmixer_cfg->payload + packed_data_size; + for (i = 0; i < num_out_ch * num_in_ch; i++) + dst_gain_ptr[i] = (uint16_t) pan_param->gain[i]; + + rc = q6asm_pack_and_set_pp_param_in_band(ac, param_hdr, + (uint8_t *) chmixer_cfg); if (rc < 0) { pr_err("%s: set-params send failed paramid[0x%x] rc %d\n", - __func__, data.param_id, rc); + __func__, param_hdr.param_id, rc); rc = -EINVAL; - goto fail_cmd2; } + kfree(chmixer_cfg); - rc = wait_event_timeout(ac->cmd_wait, - (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ); - if (!rc) { - pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, - data.param_id); - rc = -ETIMEDOUT; - goto fail_cmd2; - } - if (atomic_read(&ac->cmd_state_pp) > 0) { - pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n", - __func__, adsp_err_get_err_str( - atomic_read(&ac->cmd_state_pp)), - data.param_id); - rc = adsp_err_get_lnx_err_code( - atomic_read(&ac->cmd_state_pp)); - goto fail_cmd2; - } - rc = 0; -fail_cmd2: - kfree(asm_params); -fail_cmd1: return rc; } int q6asm_equalizer(struct audio_client *ac, void *eq_p) { - struct asm_eq_params eq; + struct asm_eq_params eq = {0}; struct msm_audio_eq_stream_config *eq_params = NULL; + struct param_hdr_v3 param_info = {0}; int i = 0; - int sz = 0; int rc = 0; if (ac == NULL) { - pr_err("%s: APR handle NULL\n", __func__); - rc = -EINVAL; - goto fail_cmd; - } - if (ac->apr == NULL) { - pr_err("%s: AC APR handle NULL\n", __func__); - rc = -EINVAL; - goto fail_cmd; + pr_err("%s: Audio client is NULL\n", __func__); + return -EINVAL; } - if (eq_p == NULL) { pr_err("%s: [%d]: Invalid Eq param\n", __func__, ac->session); rc = -EINVAL; goto fail_cmd; } - sz = sizeof(struct asm_eq_params); - eq_params = (struct msm_audio_eq_stream_config *) eq_p; - q6asm_add_hdr(ac, &eq.hdr, sz, TRUE); - atomic_set(&ac->cmd_state_pp, -1); - eq.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; - eq.param.data_payload_addr_lsw = 0; - eq.param.data_payload_addr_msw = 0; - eq.param.mem_map_handle = 0; - eq.param.data_payload_size = sizeof(eq) - - sizeof(eq.hdr) - sizeof(eq.param); - eq.data.module_id = ASM_MODULE_ID_EQUALIZER; - eq.data.param_id = ASM_PARAM_ID_EQUALIZER_PARAMETERS; - eq.data.param_size = eq.param.data_payload_size - sizeof(eq.data); + eq_params = (struct msm_audio_eq_stream_config *) eq_p; + param_info.module_id = ASM_MODULE_ID_EQUALIZER; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = ASM_PARAM_ID_EQUALIZER_PARAMETERS; + param_info.param_size = sizeof(eq); eq.enable_flag = eq_params->enable; eq.num_bands = eq_params->num_bands; @@ -7753,32 +7541,11 @@ int q6asm_equalizer(struct audio_client *ac, void *eq_p) pr_debug("%s: q_factor:%d bandnum:%d\n", __func__, eq_params->eq_bands[i].q_factor, i); } - rc = apr_send_pkt(ac->apr, (uint32_t *)&eq); - if (rc < 0) { + rc = q6asm_pack_and_set_pp_param_in_band(ac, param_info, (u8 *) &eq); + if (rc) pr_err("%s: set-params send failed paramid[0x%x] rc %d\n", - __func__, eq.data.param_id, rc); - rc = -EINVAL; - goto fail_cmd; - } + __func__, param_info.param_id, rc); - rc = wait_event_timeout(ac->cmd_wait, - (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ); - if (!rc) { - pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, - eq.data.param_id); - rc = -ETIMEDOUT; - goto fail_cmd; - } - if (atomic_read(&ac->cmd_state_pp) > 0) { - pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n", - __func__, adsp_err_get_err_str( - atomic_read(&ac->cmd_state_pp)), - eq.data.param_id); - rc = adsp_err_get_lnx_err_code( - atomic_read(&ac->cmd_state_pp)); - goto fail_cmd; - } - rc = 0; fail_cmd: return rc; } @@ -8293,7 +8060,7 @@ int q6asm_get_session_time(struct audio_client *ac, uint64_t *tstamp) mtmx_params.param_info.param_id = ASM_SESSION_MTMX_STRTR_PARAM_SESSION_TIME_V3; mtmx_params.param_info.param_max_size = - sizeof(struct asm_stream_param_data_v2) + + sizeof(struct param_hdr_v1) + sizeof(struct asm_session_mtmx_strtr_param_session_time_v3_t); atomic_set(&ac->time_flag, 1); @@ -8366,79 +8133,6 @@ fail_cmd: return -EINVAL; } - -int q6asm_send_audio_effects_params(struct audio_client *ac, char *params, - uint32_t params_length) -{ - char *asm_params = NULL; - struct apr_hdr hdr; - struct asm_stream_cmd_set_pp_params_v2 payload_params; - int sz, rc; - - pr_debug("%s:\n", __func__); - if (!ac) { - pr_err("%s: APR handle NULL\n", __func__); - return -EINVAL; - } - if (ac->apr == NULL) { - pr_err("%s: AC APR handle NULL\n", __func__); - return -EINVAL; - } - if (params == NULL) { - pr_err("%s: params NULL\n", __func__); - return -EINVAL; - } - sz = sizeof(struct apr_hdr) + - sizeof(struct asm_stream_cmd_set_pp_params_v2) + - params_length; - asm_params = kzalloc(sz, GFP_KERNEL); - if (!asm_params) { - pr_err("%s, asm params memory alloc failed", __func__); - return -ENOMEM; - } - q6asm_add_hdr_async(ac, &hdr, (sizeof(struct apr_hdr) + - sizeof(struct asm_stream_cmd_set_pp_params_v2) + - params_length), TRUE); - atomic_set(&ac->cmd_state_pp, -1); - hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; - payload_params.data_payload_addr_lsw = 0; - payload_params.data_payload_addr_msw = 0; - payload_params.mem_map_handle = 0; - payload_params.data_payload_size = params_length; - memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr)); - memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)), &payload_params, - sizeof(struct asm_stream_cmd_set_pp_params_v2)); - memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + - sizeof(struct asm_stream_cmd_set_pp_params_v2)), - params, params_length); - rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params); - if (rc < 0) { - pr_err("%s: audio effects set-params send failed\n", __func__); - rc = -EINVAL; - goto fail_send_param; - } - rc = wait_event_timeout(ac->cmd_wait, - (atomic_read(&ac->cmd_state_pp) >= 0), 1*HZ); - if (!rc) { - pr_err("%s: timeout, audio effects set-params\n", __func__); - rc = -ETIMEDOUT; - goto fail_send_param; - } - if (atomic_read(&ac->cmd_state_pp) > 0) { - pr_err("%s: DSP returned error[%s] set-params\n", - __func__, adsp_err_get_err_str( - atomic_read(&ac->cmd_state_pp))); - rc = adsp_err_get_lnx_err_code( - atomic_read(&ac->cmd_state_pp)); - goto fail_send_param; - } - - rc = 0; -fail_send_param: - kfree(asm_params); - return rc; -} - int q6asm_send_mtmx_strtr_window(struct audio_client *ac, struct asm_session_mtmx_strtr_param_window_v2_t *window_param, uint32_t param_id) @@ -8471,7 +8165,7 @@ int q6asm_send_mtmx_strtr_window(struct audio_client *ac, matrix.param.data_payload_addr_msw = 0; matrix.param.mem_map_handle = 0; matrix.param.data_payload_size = - sizeof(struct asm_stream_param_data_v2) + + sizeof(struct param_hdr_v1) + sizeof(struct asm_session_mtmx_strtr_param_window_v2_t); matrix.param.direction = 0; /* RX */ matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC; @@ -8556,7 +8250,7 @@ int q6asm_send_mtmx_strtr_render_mode(struct audio_client *ac, matrix.param.data_payload_addr_msw = 0; matrix.param.mem_map_handle = 0; matrix.param.data_payload_size = - sizeof(struct asm_stream_param_data_v2) + + sizeof(struct param_hdr_v1) + sizeof(struct asm_session_mtmx_strtr_param_render_mode_t); matrix.param.direction = 0; /* RX */ matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC; @@ -8641,7 +8335,7 @@ int q6asm_send_mtmx_strtr_clk_rec_mode(struct audio_client *ac, matrix.param.data_payload_addr_msw = 0; matrix.param.mem_map_handle = 0; matrix.param.data_payload_size = - sizeof(struct asm_stream_param_data_v2) + + sizeof(struct param_hdr_v1) + sizeof(struct asm_session_mtmx_strtr_param_clk_rec_t); matrix.param.direction = 0; /* RX */ matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC; @@ -8716,7 +8410,7 @@ int q6asm_send_mtmx_strtr_enable_adjust_session_clock(struct audio_client *ac, matrix.param.data_payload_addr_msw = 0; matrix.param.mem_map_handle = 0; matrix.param.data_payload_size = - sizeof(struct asm_stream_param_data_v2) + + sizeof(struct param_hdr_v1) + sizeof(struct asm_session_mtmx_param_adjust_session_time_ctl_t); matrix.param.direction = 0; /* RX */ matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC; @@ -9266,7 +8960,7 @@ int q6asm_get_apr_service_id(int session_id) return -EINVAL; } - return ((struct apr_svc *)session[session_id]->apr)->id; + return ((struct apr_svc *)(session[session_id].ac)->apr)->id; } int q6asm_get_asm_topology(int session_id) @@ -9277,12 +8971,12 @@ int q6asm_get_asm_topology(int session_id) pr_err("%s: invalid session_id = %d\n", __func__, session_id); goto done; } - if (session[session_id] == NULL) { + if (session[session_id].ac == NULL) { pr_err("%s: session not created for session id = %d\n", __func__, session_id); goto done; } - topology = session[session_id]->topology; + topology = (session[session_id].ac)->topology; done: return topology; } @@ -9295,12 +8989,12 @@ int q6asm_get_asm_app_type(int session_id) pr_err("%s: invalid session_id = %d\n", __func__, session_id); goto done; } - if (session[session_id] == NULL) { + if (session[session_id].ac == NULL) { pr_err("%s: session not created for session id = %d\n", __func__, session_id); goto done; } - app_type = session[session_id]->app_type; + app_type = (session[session_id].ac)->app_type; done: return app_type; } @@ -9355,19 +9049,14 @@ done: int q6asm_send_cal(struct audio_client *ac) { struct cal_block_data *cal_block = NULL; - struct apr_hdr hdr; - char *asm_params = NULL; - struct asm_stream_cmd_set_pp_params_v2 payload_params; - int sz, rc = -EINVAL; + struct mem_mapping_hdr mem_hdr = {0}; + u32 payload_size = 0; + int rc = -EINVAL; pr_debug("%s:\n", __func__); if (!ac) { - pr_err("%s: APR handle NULL\n", __func__); - goto done; - } - if (ac->apr == NULL) { - pr_err("%s: AC APR handle NULL\n", __func__); - goto done; + pr_err("%s: Audio client is NULL\n", __func__); + return -EINVAL; } if (ac->io_mode & NT_MODE) { pr_debug("%s: called for NT MODE, exiting\n", __func__); @@ -9404,62 +9093,26 @@ int q6asm_send_cal(struct audio_client *ac) goto unlock; } - sz = sizeof(struct apr_hdr) + - sizeof(struct asm_stream_cmd_set_pp_params_v2); - asm_params = kzalloc(sz, GFP_KERNEL); - if (!asm_params) { - pr_err("%s, asm params memory alloc failed", __func__); - rc = -ENOMEM; - goto unlock; - } - - /* asm_stream_cmd_set_pp_params_v2 has no APR header in it */ - q6asm_add_hdr_async(ac, &hdr, (sizeof(struct apr_hdr) + - sizeof(struct asm_stream_cmd_set_pp_params_v2)), TRUE); - - atomic_set(&ac->cmd_state_pp, -1); - hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; - payload_params.data_payload_addr_lsw = - lower_32_bits(cal_block->cal_data.paddr); - payload_params.data_payload_addr_msw = - msm_audio_populate_upper_32_bits( - cal_block->cal_data.paddr); - payload_params.mem_map_handle = cal_block->map_data.q6map_handle; - payload_params.data_payload_size = cal_block->cal_data.size; - memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr)); - memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)), &payload_params, - sizeof(struct asm_stream_cmd_set_pp_params_v2)); + mem_hdr.data_payload_addr_lsw = + lower_32_bits(cal_block->cal_data.paddr); + mem_hdr.data_payload_addr_msw = + msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr); + mem_hdr.mem_map_handle = cal_block->map_data.q6map_handle; + payload_size = cal_block->cal_data.size; pr_debug("%s: phyaddr lsw = %x msw = %x, maphdl = %x calsize = %d\n", - __func__, payload_params.data_payload_addr_lsw, - payload_params.data_payload_addr_msw, - payload_params.mem_map_handle, - payload_params.data_payload_size); + __func__, mem_hdr.data_payload_addr_lsw, + mem_hdr.data_payload_addr_msw, mem_hdr.mem_map_handle, + payload_size); - rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params); - if (rc < 0) { + rc = q6asm_set_pp_params(ac, &mem_hdr, NULL, payload_size); + if (rc) { pr_err("%s: audio audstrm cal send failed\n", __func__); - rc = -EINVAL; - goto free; - } - rc = wait_event_timeout(ac->cmd_wait, - (atomic_read(&ac->cmd_state_pp) >= 0), 5 * HZ); - if (!rc) { - pr_err("%s: timeout, audio audstrm cal send\n", __func__); - rc = -ETIMEDOUT; - goto free; - } - if (atomic_read(&ac->cmd_state_pp) > 0) { - pr_err("%s: DSP returned error[%d] audio audstrm cal send\n", - __func__, atomic_read(&ac->cmd_state_pp)); - rc = -EINVAL; - goto free; + goto unlock; } rc = 0; -free: - kfree(asm_params); unlock: mutex_unlock(&cal_data[ASM_AUDSTRM_CAL]->lock); done: @@ -9643,7 +9296,10 @@ static int __init q6asm_init(void) int lcnt, ret; pr_debug("%s:\n", __func__); - memset(session, 0, sizeof(session)); + memset(session, 0, sizeof(struct audio_session) * + (ASM_ACTIVE_STREAMS_ALLOWED + 1)); + for (lcnt = 0; lcnt <= ASM_ACTIVE_STREAMS_ALLOWED; lcnt++) + spin_lock_init(&(session[lcnt].session_lock)); set_custom_topology = 1; /*setup common client used for cal mem map */ diff --git a/sound/soc/msm/qdsp6v2/q6lsm.c b/sound/soc/msm/qdsp6v2/q6lsm.c index 11574a874a5a..1161bb31c434 100644 --- a/sound/soc/msm/qdsp6v2/q6lsm.c +++ b/sound/soc/msm/qdsp6v2/q6lsm.c @@ -26,6 +26,7 @@ #include <sound/apr_audio-v2.h> #include <sound/lsm_params.h> #include <sound/q6core.h> +#include <sound/q6common.h> #include <sound/q6lsm.h> #include <asm/ioctls.h> #include <linux/memory.h> @@ -73,11 +74,6 @@ struct lsm_common { struct mutex apr_lock; }; -struct lsm_module_param_ids { - uint32_t module_id; - uint32_t param_id; -}; - static struct lsm_common lsm_common; /* * mmap_handle_p can point either client->sound_model.mem_map_handle or @@ -98,38 +94,6 @@ static int q6lsm_memory_map_regions(struct lsm_client *client, static int q6lsm_memory_unmap_regions(struct lsm_client *client, uint32_t handle); -static void q6lsm_set_param_hdr_info( - struct lsm_set_params_hdr *param_hdr, - u32 payload_size, u32 addr_lsw, u32 addr_msw, - u32 mmap_handle) -{ - param_hdr->data_payload_size = payload_size; - param_hdr->data_payload_addr_lsw = addr_lsw; - param_hdr->data_payload_addr_msw = addr_msw; - param_hdr->mem_map_handle = mmap_handle; -} - -static void q6lsm_set_param_common( - struct lsm_param_payload_common *common, - struct lsm_module_param_ids *ids, - u32 param_size, u32 set_param_version) -{ - common->module_id = ids->module_id; - common->param_id = ids->param_id; - - switch (set_param_version) { - case LSM_SESSION_CMD_SET_PARAMS_V2: - common->p_size.param_size = param_size; - break; - case LSM_SESSION_CMD_SET_PARAMS: - default: - common->p_size.sr.param_size = - (u16) param_size; - common->p_size.sr.reserved = 0; - break; - } -} - static int q6lsm_callback(struct apr_client_data *data, void *priv) { struct lsm_client *client = (struct lsm_client *)priv; @@ -199,6 +163,7 @@ static int q6lsm_callback(struct apr_client_data *data, void *priv) case LSM_SESSION_CMD_OPEN_TX_V2: case LSM_CMD_ADD_TOPOLOGIES: case LSM_SESSION_CMD_SET_PARAMS_V2: + case LSM_SESSION_CMD_SET_PARAMS_V3: if (token != client->session && payload[0] != LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL) { @@ -433,6 +398,189 @@ static void q6lsm_add_hdr(struct lsm_client *client, struct apr_hdr *hdr, hdr->token = client->session; } +/* + * LSM still supports 3 versions of commands so it cannot use the common + * Q6Common packing function. No need to check parameter pointers as it + * is static and should only be called internally. + */ +static int q6lsm_pack_params(u8 *dest, struct param_hdr_v3 *param_info, + u8 *param_data, size_t *final_length, + u32 set_param_opcode) +{ + bool iid_supported = q6common_is_instance_id_supported(); + union param_hdrs *param_hdr = NULL; + u32 param_size = param_info->param_size; + size_t hdr_size; + size_t provided_size = *final_length; + + hdr_size = iid_supported ? sizeof(struct param_hdr_v3) : + sizeof(struct param_hdr_v2); + if (provided_size < hdr_size) { + pr_err("%s: Provided size %zu is not large enough, need %zu\n", + __func__, provided_size, hdr_size); + return -EINVAL; + } + + if (iid_supported) { + memcpy(dest, param_info, hdr_size); + } else { + /* MID, PID and structure size are the same in V1 and V2 */ + param_hdr = (union param_hdrs *) dest; + param_hdr->v2.module_id = param_info->module_id; + param_hdr->v2.param_id = param_info->param_id; + + switch (set_param_opcode) { + case LSM_SESSION_CMD_SET_PARAMS_V2: + param_hdr->v2.param_size = param_size; + break; + case LSM_SESSION_CMD_SET_PARAMS: + default: + if (param_size > U16_MAX) { + pr_err("%s: Invalid param size %d\n", __func__, + param_size); + return -EINVAL; + } + + param_hdr->v1.param_size = param_size; + param_hdr->v1.reserved = 0; + break; + } + } + + *final_length = hdr_size; + + if (param_data != NULL) { + if (provided_size < hdr_size + param_size) { + pr_err("%s: Provided size %zu is not large enough, need %zu\n", + __func__, provided_size, hdr_size + param_size); + return -EINVAL; + } + memcpy(dest + hdr_size, param_data, param_size); + *final_length += param_size; + } + return 0; +} + +static int q6lsm_set_params_v2(struct lsm_client *client, + struct mem_mapping_hdr *mem_hdr, + uint8_t *param_data, uint32_t param_size, + uint32_t set_param_opcode) +{ + struct lsm_session_cmd_set_params_v2 *lsm_set_param = NULL; + uint32_t pkt_size = 0; + int ret; + + pkt_size = sizeof(struct lsm_session_cmd_set_params_v2); + /* Only include param size in packet size when inband */ + if (param_data != NULL) + pkt_size += param_size; + + lsm_set_param = kzalloc(pkt_size, GFP_KERNEL); + if (!lsm_set_param) + return -ENOMEM; + + q6lsm_add_hdr(client, &lsm_set_param->apr_hdr, pkt_size, true); + lsm_set_param->apr_hdr.opcode = set_param_opcode; + lsm_set_param->payload_size = param_size; + + if (mem_hdr != NULL) { + lsm_set_param->mem_hdr = *mem_hdr; + } else if (param_data != NULL) { + memcpy(lsm_set_param->param_data, param_data, param_size); + } else { + pr_err("%s: Received NULL pointers for both memory header and data\n", + __func__); + ret = -EINVAL; + goto done; + } + + ret = q6lsm_apr_send_pkt(client, client->apr, lsm_set_param, true, + NULL); +done: + kfree(lsm_set_param); + return ret; +} + +static int q6lsm_set_params_v3(struct lsm_client *client, + struct mem_mapping_hdr *mem_hdr, + uint8_t *param_data, uint32_t param_size) +{ + struct lsm_session_cmd_set_params_v3 *lsm_set_param = NULL; + uint16_t pkt_size = 0; + int ret = 0; + + pkt_size = sizeof(struct lsm_session_cmd_set_params_v3); + /* Only include param size in packet size when inband */ + if (param_data != NULL) + pkt_size += param_size; + + lsm_set_param = kzalloc(pkt_size, GFP_KERNEL); + if (!lsm_set_param) + return -ENOMEM; + + q6lsm_add_hdr(client, &lsm_set_param->apr_hdr, pkt_size, true); + lsm_set_param->apr_hdr.opcode = LSM_SESSION_CMD_SET_PARAMS_V3; + lsm_set_param->payload_size = param_size; + + if (mem_hdr != NULL) { + lsm_set_param->mem_hdr = *mem_hdr; + } else if (param_data != NULL) { + memcpy(lsm_set_param->param_data, param_data, param_size); + } else { + pr_err("%s: Received NULL pointers for both memory header and data\n", + __func__); + ret = -EINVAL; + goto done; + } + + ret = q6lsm_apr_send_pkt(client, client->apr, lsm_set_param, true, + NULL); +done: + kfree(lsm_set_param); + return ret; +} + +static int q6lsm_set_params(struct lsm_client *client, + struct mem_mapping_hdr *mem_hdr, + uint8_t *param_data, uint32_t param_size, + uint32_t set_param_opcode) + +{ + if (q6common_is_instance_id_supported()) + return q6lsm_set_params_v3(client, mem_hdr, param_data, + param_size); + else + return q6lsm_set_params_v2(client, mem_hdr, param_data, + param_size, set_param_opcode); +} + +static int q6lsm_pack_and_set_params(struct lsm_client *client, + struct param_hdr_v3 *param_info, + uint8_t *param_data, + uint32_t set_param_opcode) + +{ + u8 *packed_data = NULL; + size_t total_size = 0; + int ret = 0; + + total_size = sizeof(union param_hdrs) + param_info->param_size; + packed_data = kzalloc(total_size, GFP_KERNEL); + if (!packed_data) + return -ENOMEM; + + ret = q6lsm_pack_params(packed_data, param_info, param_data, + &total_size, set_param_opcode); + if (ret) + goto done; + + ret = q6lsm_set_params(client, NULL, packed_data, total_size, + set_param_opcode); + +done: + kfree(packed_data); + return ret; +} static int q6lsm_send_custom_topologies(struct lsm_client *client) { @@ -586,14 +734,18 @@ void q6lsm_sm_set_param_data(struct lsm_client *client, struct lsm_params_info *p_info, size_t *offset) { - struct lsm_param_payload_common *param; - - param = (struct lsm_param_payload_common *) - client->sound_model.data; - param->module_id = p_info->module_id; - param->param_id = p_info->param_id; - param->p_size.param_size = client->sound_model.size; - *offset = sizeof(*param); + struct param_hdr_v3 param_hdr = {0}; + int ret = 0; + + param_hdr.module_id = p_info->module_id; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = p_info->param_id; + param_hdr.param_size = client->sound_model.size; + + ret = q6lsm_pack_params((u8 *) client->sound_model.data, ¶m_hdr, + NULL, offset, LSM_SESSION_CMD_SET_PARAMS_V2); + if (ret) + pr_err("%s: Failed to pack params, error %d\n", __func__, ret); } int q6lsm_open(struct lsm_client *client, uint16_t app_id) @@ -644,109 +796,64 @@ done: return rc; } -static int q6lsm_send_confidence_levels( - struct lsm_client *client, - struct lsm_module_param_ids *ids, - u32 set_param_opcode) +static int q6lsm_send_confidence_levels(struct lsm_client *client, + struct param_hdr_v3 *param_info, + uint32_t set_param_opcode) { - u8 *packet; - size_t pkt_size; - struct lsm_cmd_set_params_conf *conf_params; - struct apr_hdr *msg_hdr; - struct lsm_param_min_confidence_levels *cfl; + struct lsm_param_confidence_levels *conf_levels = NULL; + uint32_t num_conf_levels = client->num_confidence_levels; uint8_t i = 0; uint8_t padd_size = 0; - u8 *conf_levels; - int rc; - u32 payload_size, param_size; + uint32_t param_size = 0; + int rc = 0; - padd_size = (4 - (client->num_confidence_levels % 4)) - 1; - pkt_size = sizeof(*conf_params) + padd_size + - client->num_confidence_levels; + /* Data must be 4 byte alligned so add any necessary padding. */ + padd_size = (4 - (num_conf_levels % 4)) - 1; + param_size = (sizeof(uint8_t) + num_conf_levels + padd_size) * + sizeof(uint8_t); + param_info->param_size = param_size; + pr_debug("%s: Set Conf Levels PARAM SIZE = %d\n", __func__, param_size); - packet = kzalloc(pkt_size, GFP_KERNEL); - if (!packet) { - pr_err("%s: no memory for confidence level, size = %zd\n", - __func__, pkt_size); + conf_levels = kzalloc(param_size, GFP_KERNEL); + if (!conf_levels) return -ENOMEM; - } - conf_params = (struct lsm_cmd_set_params_conf *) packet; - conf_levels = (u8 *) (packet + sizeof(*conf_params)); - msg_hdr = &conf_params->msg_hdr; - q6lsm_add_hdr(client, msg_hdr, - pkt_size, true); - msg_hdr->opcode = set_param_opcode; - payload_size = pkt_size - sizeof(*msg_hdr) - - sizeof(conf_params->params_hdr); - q6lsm_set_param_hdr_info(&conf_params->params_hdr, - payload_size, 0, 0, 0); - cfl = &conf_params->conf_payload; - param_size = ((sizeof(uint8_t) + padd_size + - client->num_confidence_levels)) * - sizeof(uint8_t); - q6lsm_set_param_common(&cfl->common, ids, - param_size, set_param_opcode); - cfl->num_confidence_levels = client->num_confidence_levels; - - pr_debug("%s: CMD PARAM SIZE = %d\n", - __func__, param_size); - pr_debug("%s: Num conf_level = %d\n", - __func__, client->num_confidence_levels); - - memcpy(conf_levels, client->confidence_levels, - client->num_confidence_levels); - for (i = 0; i < client->num_confidence_levels; i++) - pr_debug("%s: Confidence_level[%d] = %d\n", - __func__, i, conf_levels[i]); + conf_levels->num_confidence_levels = num_conf_levels; + pr_debug("%s: Num conf_level = %d\n", __func__, num_conf_levels); - rc = q6lsm_apr_send_pkt(client, client->apr, - packet, true, NULL); + memcpy(conf_levels->confidence_levels, client->confidence_levels, + num_conf_levels); + for (i = 0; i < num_conf_levels; i++) + pr_debug("%s: Confidence_level[%d] = %d\n", __func__, i, + conf_levels->confidence_levels[i]); + + rc = q6lsm_pack_and_set_params(client, param_info, + (uint8_t *) conf_levels, + set_param_opcode); if (rc) - pr_err("%s: confidence_levels cmd failed, err = %d\n", - __func__, rc); - kfree(packet); + pr_err("%s: Send confidence_levels cmd failed, err = %d\n", + __func__, rc); + kfree(conf_levels); return rc; } static int q6lsm_send_param_opmode(struct lsm_client *client, - struct lsm_module_param_ids *opmode_ids, - u32 set_param_opcode) + struct param_hdr_v3 *param_info, + u32 set_param_opcode) { - int rc; - struct lsm_cmd_set_params_opmode opmode_params; - struct apr_hdr *msg_hdr; - - struct lsm_param_op_mode *op_mode; - u32 data_payload_size, param_size; - - msg_hdr = &opmode_params.msg_hdr; - q6lsm_add_hdr(client, msg_hdr, - sizeof(opmode_params), true); - msg_hdr->opcode = set_param_opcode; - data_payload_size = sizeof(opmode_params) - - sizeof(*msg_hdr) - - sizeof(opmode_params.params_hdr); - q6lsm_set_param_hdr_info(&opmode_params.params_hdr, - data_payload_size, 0, 0, 0); - op_mode = &opmode_params.op_mode; - - - param_size = sizeof(struct lsm_param_op_mode) - - sizeof(op_mode->common); - q6lsm_set_param_common(&op_mode->common, - opmode_ids, param_size, - set_param_opcode); - op_mode->minor_version = QLSM_PARAM_ID_MINOR_VERSION; - op_mode->mode = client->mode; - op_mode->reserved = 0; - pr_debug("%s: mode = 0x%x", __func__, op_mode->mode); + struct lsm_param_op_mode op_mode = {0}; + int rc = 0; - rc = q6lsm_apr_send_pkt(client, client->apr, - &opmode_params, true, NULL); + param_info->param_size = sizeof(op_mode); + + op_mode.minor_version = QLSM_PARAM_ID_MINOR_VERSION; + op_mode.mode = client->mode; + pr_debug("%s: mode = 0x%x", __func__, op_mode.mode); + + rc = q6lsm_pack_and_set_params(client, param_info, (uint8_t *) &op_mode, + set_param_opcode); if (rc) - pr_err("%s: Failed set_params opcode 0x%x, rc %d\n", - __func__, msg_hdr->opcode, rc); + pr_err("%s: Failed set_params, rc %d\n", __func__, rc); pr_debug("%s: leave %d\n", __func__, rc); return rc; @@ -764,138 +871,81 @@ int get_lsm_port(void) int q6lsm_set_port_connected(struct lsm_client *client) { - int rc; - struct lsm_cmd_set_connectport connectport; - struct lsm_module_param_ids connectport_ids; - struct apr_hdr *msg_hdr; - struct lsm_param_connect_to_port *connect_to_port; - u32 data_payload_size, param_size, set_param_opcode; + struct lsm_param_connect_to_port connect_port = {0}; + struct param_hdr_v3 connectport_hdr = {0}; + u32 set_param_opcode = 0; + int rc = 0; if (client->use_topology) { set_param_opcode = LSM_SESSION_CMD_SET_PARAMS_V2; - connectport_ids.module_id = LSM_MODULE_ID_FRAMEWORK; - connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT; + connectport_hdr.module_id = LSM_MODULE_ID_FRAMEWORK; } else { set_param_opcode = LSM_SESSION_CMD_SET_PARAMS; - connectport_ids.module_id = LSM_MODULE_ID_VOICE_WAKEUP; - connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT; + connectport_hdr.module_id = LSM_MODULE_ID_VOICE_WAKEUP; } - client->connect_to_port = get_lsm_port(); + connectport_hdr.instance_id = INSTANCE_ID_0; + connectport_hdr.param_id = LSM_PARAM_ID_CONNECT_TO_PORT; + connectport_hdr.param_size = sizeof(connect_port); - msg_hdr = &connectport.msg_hdr; - q6lsm_add_hdr(client, msg_hdr, - sizeof(connectport), true); - msg_hdr->opcode = set_param_opcode; - data_payload_size = sizeof(connectport) - - sizeof(*msg_hdr) - - sizeof(connectport.params_hdr); - q6lsm_set_param_hdr_info(&connectport.params_hdr, - data_payload_size, 0, 0, 0); - connect_to_port = &connectport.connect_to_port; - - param_size = (sizeof(struct lsm_param_connect_to_port) - - sizeof(connect_to_port->common)); - q6lsm_set_param_common(&connect_to_port->common, - &connectport_ids, param_size, - set_param_opcode); - connect_to_port->minor_version = QLSM_PARAM_ID_MINOR_VERSION; - connect_to_port->port_id = client->connect_to_port; - connect_to_port->reserved = 0; - pr_debug("%s: port= %d", __func__, connect_to_port->port_id); + client->connect_to_port = get_lsm_port(); + connect_port.minor_version = QLSM_PARAM_ID_MINOR_VERSION; + connect_port.port_id = client->connect_to_port; - rc = q6lsm_apr_send_pkt(client, client->apr, - &connectport, true, NULL); + rc = q6lsm_pack_and_set_params(client, &connectport_hdr, + (uint8_t *) &connect_port, + set_param_opcode); if (rc) - pr_err("%s: Failed set_params opcode 0x%x, rc %d\n", - __func__, msg_hdr->opcode, rc); - + pr_err("%s: Failed set_params, rc %d\n", __func__, rc); return rc; } + static int q6lsm_send_param_polling_enable(struct lsm_client *client, - bool poll_en, - struct lsm_module_param_ids *poll_enable_ids, - u32 set_param_opcode) + bool poll_en, + struct param_hdr_v3 *param_info, + u32 set_param_opcode) { + struct lsm_param_poll_enable polling_enable = {0}; int rc = 0; - struct lsm_cmd_poll_enable cmd; - struct apr_hdr *msg_hdr; - struct lsm_param_poll_enable *poll_enable; - u32 data_payload_size, param_size; - - msg_hdr = &cmd.msg_hdr; - q6lsm_add_hdr(client, msg_hdr, - sizeof(struct lsm_cmd_poll_enable), true); - msg_hdr->opcode = set_param_opcode; - data_payload_size = sizeof(struct lsm_cmd_poll_enable) - - sizeof(struct apr_hdr) - - sizeof(struct lsm_set_params_hdr); - q6lsm_set_param_hdr_info(&cmd.params_hdr, - data_payload_size, 0, 0, 0); - poll_enable = &cmd.poll_enable; - - param_size = (sizeof(struct lsm_param_poll_enable) - - sizeof(poll_enable->common)); - q6lsm_set_param_common(&poll_enable->common, - poll_enable_ids, param_size, - set_param_opcode); - poll_enable->minor_version = QLSM_PARAM_ID_MINOR_VERSION; - poll_enable->polling_enable = (poll_en) ? 1 : 0; - pr_debug("%s: poll enable= %d", __func__, poll_enable->polling_enable); - rc = q6lsm_apr_send_pkt(client, client->apr, - &cmd, true, NULL); - if (rc) - pr_err("%s: Failed set_params opcode 0x%x, rc %d\n", - __func__, msg_hdr->opcode, rc); + param_info->param_size = sizeof(polling_enable); + + polling_enable.minor_version = QLSM_PARAM_ID_MINOR_VERSION; + polling_enable.polling_enable = (poll_en) ? 1 : 0; + rc = q6lsm_pack_and_set_params(client, param_info, + (uint8_t *) &polling_enable, + set_param_opcode); + if (rc) + pr_err("%s: Failed set_params, rc %d\n", __func__, rc); return rc; } int q6lsm_set_fwk_mode_cfg(struct lsm_client *client, uint32_t event_mode) { + struct lsm_param_fwk_mode_cfg fwk_mode_cfg = {0}; + struct param_hdr_v3 fwk_mode_cfg_hdr = {0}; int rc = 0; - struct lsm_cmd_set_fwk_mode_cfg cmd; - struct lsm_module_param_ids fwk_mode_cfg_ids; - struct apr_hdr *msg_hdr; - struct lsm_param_fwk_mode_cfg *fwk_mode_cfg; - u32 data_payload_size, param_size, set_param_opcode; - if (client->use_topology) { - set_param_opcode = LSM_SESSION_CMD_SET_PARAMS_V2; - fwk_mode_cfg_ids.module_id = LSM_MODULE_ID_FRAMEWORK; - fwk_mode_cfg_ids.param_id = LSM_PARAM_ID_FWK_MODE_CONFIG; - } else { + if (!client->use_topology) { pr_debug("%s: Ignore sending event mode\n", __func__); return rc; } - msg_hdr = &cmd.msg_hdr; - q6lsm_add_hdr(client, msg_hdr, - sizeof(struct lsm_cmd_set_fwk_mode_cfg), true); - msg_hdr->opcode = set_param_opcode; - data_payload_size = sizeof(struct lsm_cmd_set_fwk_mode_cfg) - - sizeof(struct apr_hdr) - - sizeof(struct lsm_set_params_hdr); - q6lsm_set_param_hdr_info(&cmd.params_hdr, - data_payload_size, 0, 0, 0); - fwk_mode_cfg = &cmd.fwk_mode_cfg; - - param_size = (sizeof(struct lsm_param_fwk_mode_cfg) - - sizeof(fwk_mode_cfg->common)); - q6lsm_set_param_common(&fwk_mode_cfg->common, - &fwk_mode_cfg_ids, param_size, - set_param_opcode); + fwk_mode_cfg_hdr.module_id = LSM_MODULE_ID_FRAMEWORK; + fwk_mode_cfg_hdr.instance_id = INSTANCE_ID_0; + fwk_mode_cfg_hdr.param_id = LSM_PARAM_ID_FWK_MODE_CONFIG; + fwk_mode_cfg_hdr.param_size = sizeof(fwk_mode_cfg); - fwk_mode_cfg->minor_version = QLSM_PARAM_ID_MINOR_VERSION; - fwk_mode_cfg->mode = event_mode; - pr_debug("%s: mode = %d\n", __func__, fwk_mode_cfg->mode); + fwk_mode_cfg.minor_version = QLSM_PARAM_ID_MINOR_VERSION; + fwk_mode_cfg.mode = event_mode; + pr_debug("%s: mode = %d\n", __func__, fwk_mode_cfg.mode); - rc = q6lsm_apr_send_pkt(client, client->apr, - &cmd, true, NULL); + rc = q6lsm_pack_and_set_params(client, &fwk_mode_cfg_hdr, + (uint8_t *) &fwk_mode_cfg, + LSM_SESSION_CMD_SET_PARAMS_V2); if (rc) - pr_err("%s: Failed set_params opcode 0x%x, rc %d\n", - __func__, msg_hdr->opcode, rc); + pr_err("%s: Failed set_params, rc %d\n", __func__, rc); return rc; } @@ -935,58 +985,38 @@ static int q6lsm_arrange_mch_map(struct lsm_param_media_fmt *media_fmt, int q6lsm_set_media_fmt_params(struct lsm_client *client) { - int rc = 0; - struct lsm_cmd_set_media_fmt cmd; - struct lsm_module_param_ids media_fmt_ids; - struct apr_hdr *msg_hdr; - struct lsm_param_media_fmt *media_fmt; - u32 data_payload_size, param_size, set_param_opcode; + struct lsm_param_media_fmt media_fmt = {0}; struct lsm_hw_params param = client->hw_params; + struct param_hdr_v3 media_fmt_hdr = {0}; + int rc = 0; - if (client->use_topology) { - set_param_opcode = LSM_SESSION_CMD_SET_PARAMS_V2; - media_fmt_ids.module_id = LSM_MODULE_ID_FRAMEWORK; - media_fmt_ids.param_id = LSM_PARAM_ID_MEDIA_FMT; - } else { + if (!client->use_topology) { pr_debug("%s: Ignore sending media format\n", __func__); goto err_ret; } - msg_hdr = &cmd.msg_hdr; - q6lsm_add_hdr(client, msg_hdr, - sizeof(struct lsm_cmd_set_media_fmt), true); - msg_hdr->opcode = set_param_opcode; - data_payload_size = sizeof(struct lsm_cmd_set_media_fmt) - - sizeof(struct apr_hdr) - - sizeof(struct lsm_set_params_hdr); - q6lsm_set_param_hdr_info(&cmd.params_hdr, - data_payload_size, 0, 0, 0); - media_fmt = &cmd.media_fmt; - - param_size = (sizeof(struct lsm_param_media_fmt) - - sizeof(media_fmt->common)); - q6lsm_set_param_common(&media_fmt->common, - &media_fmt_ids, param_size, - set_param_opcode); + media_fmt_hdr.module_id = LSM_MODULE_ID_FRAMEWORK; + media_fmt_hdr.instance_id = INSTANCE_ID_0; + media_fmt_hdr.param_id = LSM_PARAM_ID_MEDIA_FMT; + media_fmt_hdr.param_size = sizeof(media_fmt); - media_fmt->minor_version = QLSM_PARAM_ID_MINOR_VERSION_2; - media_fmt->sample_rate = param.sample_rate; - media_fmt->num_channels = param.num_chs; - media_fmt->bit_width = param.sample_size; - - rc = q6lsm_arrange_mch_map(media_fmt, media_fmt->num_channels); + media_fmt.minor_version = QLSM_PARAM_ID_MINOR_VERSION_2; + media_fmt.sample_rate = param.sample_rate; + media_fmt.num_channels = param.num_chs; + media_fmt.bit_width = param.sample_size; + rc = q6lsm_arrange_mch_map(&media_fmt, media_fmt.num_channels); if (rc) goto err_ret; - pr_debug("%s: sample rate= %d, channels %d bit width %d\n", - __func__, media_fmt->sample_rate, media_fmt->num_channels, - media_fmt->bit_width); + pr_debug("%s: sample rate= %d, channels %d bit width %d\n", __func__, + media_fmt.sample_rate, media_fmt.num_channels, + media_fmt.bit_width); - rc = q6lsm_apr_send_pkt(client, client->apr, - &cmd, true, NULL); + rc = q6lsm_pack_and_set_params(client, &media_fmt_hdr, + (uint8_t *) &media_fmt, + LSM_SESSION_CMD_SET_PARAMS_V2); if (rc) - pr_err("%s: Failed set_params opcode 0x%x, rc %d\n", - __func__, msg_hdr->opcode, rc); + pr_err("%s: Failed set_params, rc %d\n", __func__, rc); err_ret: return rc; } @@ -995,9 +1025,8 @@ int q6lsm_set_data(struct lsm_client *client, enum lsm_detection_mode mode, bool detectfailure) { + struct param_hdr_v3 param_hdr = {0}; int rc = 0; - struct lsm_module_param_ids opmode_ids; - struct lsm_module_param_ids conf_levels_ids; if (!client->confidence_levels) { /* @@ -1021,22 +1050,20 @@ int q6lsm_set_data(struct lsm_client *client, } client->mode |= detectfailure << 2; - opmode_ids.module_id = LSM_MODULE_ID_VOICE_WAKEUP; - opmode_ids.param_id = LSM_PARAM_ID_OPERATION_MODE; - - rc = q6lsm_send_param_opmode(client, &opmode_ids, - LSM_SESSION_CMD_SET_PARAMS); + param_hdr.module_id = LSM_MODULE_ID_VOICE_WAKEUP; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_id = LSM_PARAM_ID_OPERATION_MODE; + rc = q6lsm_send_param_opmode(client, ¶m_hdr, + LSM_SESSION_CMD_SET_PARAMS); if (rc) { pr_err("%s: Failed to set lsm config params %d\n", __func__, rc); goto err_ret; } - conf_levels_ids.module_id = LSM_MODULE_ID_VOICE_WAKEUP; - conf_levels_ids.param_id = LSM_PARAM_ID_MIN_CONFIDENCE_LEVELS; - - rc = q6lsm_send_confidence_levels(client, &conf_levels_ids, - LSM_SESSION_CMD_SET_PARAMS); + param_hdr.param_id = LSM_PARAM_ID_MIN_CONFIDENCE_LEVELS; + rc = q6lsm_send_confidence_levels(client, ¶m_hdr, + LSM_SESSION_CMD_SET_PARAMS); if (rc) { pr_err("%s: Failed to send conf_levels, err = %d\n", __func__, rc); @@ -1226,9 +1253,7 @@ static int q6lsm_send_cal(struct lsm_client *client, u32 set_params_opcode) { int rc = 0; - struct lsm_cmd_set_params params; - struct lsm_set_params_hdr *params_hdr = ¶ms.param_hdr; - struct apr_hdr *msg_hdr = ¶ms.msg_hdr; + struct mem_mapping_hdr mem_hdr = {0}; struct cal_block_data *cal_block = NULL; pr_debug("%s: Session id %d\n", __func__, client->session); @@ -1258,21 +1283,16 @@ static int q6lsm_send_cal(struct lsm_client *client, } /* Cache mmap address, only map once or if new addr */ lsm_common.common_client[client->session].session = client->session; - q6lsm_add_hdr(client, msg_hdr, sizeof(params), true); - msg_hdr->opcode = set_params_opcode; - q6lsm_set_param_hdr_info(params_hdr, - cal_block->cal_data.size, - lower_32_bits(client->lsm_cal_phy_addr), - msm_audio_populate_upper_32_bits( - client->lsm_cal_phy_addr), - client->sound_model.mem_map_handle); - - pr_debug("%s: Cal Size = %zd", __func__, - cal_block->cal_data.size); - rc = q6lsm_apr_send_pkt(client, client->apr, ¶ms, true, NULL); + mem_hdr.data_payload_addr_lsw = lower_32_bits(client->lsm_cal_phy_addr); + mem_hdr.data_payload_addr_msw = + msm_audio_populate_upper_32_bits(client->lsm_cal_phy_addr); + mem_hdr.mem_map_handle = client->sound_model.mem_map_handle; + + pr_debug("%s: Cal Size = %zd", __func__, cal_block->cal_data.size); + rc = q6lsm_set_params(client, &mem_hdr, NULL, cal_block->cal_data.size, + set_params_opcode); if (rc) - pr_err("%s: Failed set_params opcode 0x%x, rc %d\n", - __func__, msg_hdr->opcode, rc); + pr_err("%s: Failed set_params, rc %d\n", __func__, rc); unlock: mutex_unlock(&lsm_common.cal_data[LSM_CAL_IDX]->lock); done: @@ -1444,7 +1464,7 @@ int q6lsm_snd_model_buf_alloc(struct lsm_client *client, size_t len, * set_param payload as well. */ if (allocate_module_data) - len += sizeof(struct lsm_param_payload_common); + len += sizeof(union param_hdrs); client->sound_model.size = len; pad_zero = (LSM_ALIGN_BOUNDARY - @@ -1539,66 +1559,44 @@ static int q6lsm_cmd(struct lsm_client *client, int opcode, bool wait) return rc; } -static int q6lsm_send_param_epd_thres( - struct lsm_client *client, - void *data, struct lsm_module_param_ids *ids) +static int q6lsm_send_param_epd_thres(struct lsm_client *client, void *data, + struct param_hdr_v3 *param_info) { - struct snd_lsm_ep_det_thres *ep_det_data; - struct lsm_cmd_set_epd_threshold epd_cmd; - struct apr_hdr *msg_hdr = &epd_cmd.msg_hdr; - struct lsm_set_params_hdr *param_hdr = - &epd_cmd.param_hdr; - struct lsm_param_epd_thres *epd_thres = - &epd_cmd.epd_thres; - int rc; + struct snd_lsm_ep_det_thres *ep_det_data = NULL; + struct lsm_param_epd_thres epd_thres = {0}; + int rc = 0; + + param_info->param_size = sizeof(epd_thres); ep_det_data = (struct snd_lsm_ep_det_thres *) data; - q6lsm_add_hdr(client, msg_hdr, - sizeof(epd_cmd), true); - msg_hdr->opcode = LSM_SESSION_CMD_SET_PARAMS_V2; - q6lsm_set_param_hdr_info(param_hdr, - sizeof(*epd_thres), 0, 0, 0); - q6lsm_set_param_common(&epd_thres->common, ids, - sizeof(*epd_thres) - sizeof(epd_thres->common), - LSM_SESSION_CMD_SET_PARAMS_V2); - epd_thres->minor_version = QLSM_PARAM_ID_MINOR_VERSION; - epd_thres->epd_begin = ep_det_data->epd_begin; - epd_thres->epd_end = ep_det_data->epd_end; + epd_thres.minor_version = QLSM_PARAM_ID_MINOR_VERSION; + epd_thres.epd_begin = ep_det_data->epd_begin; + epd_thres.epd_end = ep_det_data->epd_end; - rc = q6lsm_apr_send_pkt(client, client->apr, - &epd_cmd, true, NULL); + rc = q6lsm_pack_and_set_params(client, param_info, + (uint8_t *) &epd_thres, + LSM_SESSION_CMD_SET_PARAMS_V2); if (unlikely(rc)) - pr_err("%s: EPD_THRESHOLD failed, rc %d\n", - __func__, rc); + pr_err("%s: EPD_THRESHOLD failed, rc %d\n", __func__, rc); return rc; } -static int q6lsm_send_param_gain( - struct lsm_client *client, - u16 gain, struct lsm_module_param_ids *ids) +static int q6lsm_send_param_gain(struct lsm_client *client, u16 gain, + struct param_hdr_v3 *param_info) { - struct lsm_cmd_set_gain lsm_cmd_gain; - struct apr_hdr *msg_hdr = &lsm_cmd_gain.msg_hdr; - struct lsm_param_gain *lsm_gain = &lsm_cmd_gain.lsm_gain; - int rc; + struct lsm_param_gain lsm_gain = {0}; + int rc = 0; - q6lsm_add_hdr(client, msg_hdr, - sizeof(lsm_cmd_gain), true); - msg_hdr->opcode = LSM_SESSION_CMD_SET_PARAMS_V2; - q6lsm_set_param_hdr_info(&lsm_cmd_gain.param_hdr, - sizeof(*lsm_gain), 0, 0, 0); - q6lsm_set_param_common(&lsm_gain->common, ids, - sizeof(*lsm_gain) - sizeof(lsm_gain->common), - LSM_SESSION_CMD_SET_PARAMS_V2); - lsm_gain->minor_version = QLSM_PARAM_ID_MINOR_VERSION; - lsm_gain->gain = gain; - lsm_gain->reserved = 0; + param_info->param_size = sizeof(lsm_gain); - rc = q6lsm_apr_send_pkt(client, client->apr, - &lsm_cmd_gain, true, NULL); + lsm_gain.minor_version = QLSM_PARAM_ID_MINOR_VERSION; + lsm_gain.gain = gain; + + rc = q6lsm_pack_and_set_params(client, param_info, + (uint8_t *) &lsm_gain, + LSM_SESSION_CMD_SET_PARAMS_V2); if (unlikely(rc)) - pr_err("%s: LSM_GAIN CMD send failed, rc %d\n", - __func__, rc); + pr_err("%s: LSM_GAIN CMD send failed, rc %d\n", __func__, rc); return rc; } @@ -1606,23 +1604,23 @@ int q6lsm_set_one_param(struct lsm_client *client, struct lsm_params_info *p_info, void *data, uint32_t param_type) { - int rc = 0, pkt_sz; - struct lsm_module_param_ids ids; - u8 *packet; + struct param_hdr_v3 param_info = {0}; + int rc = 0; - memset(&ids, 0, sizeof(ids)); switch (param_type) { case LSM_ENDPOINT_DETECT_THRESHOLD: { - ids.module_id = p_info->module_id; - ids.param_id = p_info->param_id; - rc = q6lsm_send_param_epd_thres(client, data, - &ids); + param_info.module_id = p_info->module_id; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = p_info->param_id; + rc = q6lsm_send_param_epd_thres(client, data, ¶m_info); + if (rc) + pr_err("%s: LSM_ENDPOINT_DETECT_THRESHOLD failed, rc %d\n", + __func__, rc); break; } case LSM_OPERATION_MODE: { struct snd_lsm_detect_mode *det_mode = data; - struct lsm_module_param_ids opmode_ids; if (det_mode->mode == LSM_MODE_KEYWORD_ONLY_DETECTION) { client->mode = 0x01; @@ -1636,11 +1634,12 @@ int q6lsm_set_one_param(struct lsm_client *client, client->mode |= det_mode->detect_failure << 2; - opmode_ids.module_id = p_info->module_id; - opmode_ids.param_id = p_info->param_id; + param_info.module_id = p_info->module_id; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = p_info->param_id; - rc = q6lsm_send_param_opmode(client, &opmode_ids, - LSM_SESSION_CMD_SET_PARAMS_V2); + rc = q6lsm_send_param_opmode(client, ¶m_info, + LSM_SESSION_CMD_SET_PARAMS_V2); if (rc) pr_err("%s: OPERATION_MODE failed, rc %d\n", __func__, rc); @@ -1649,9 +1648,10 @@ int q6lsm_set_one_param(struct lsm_client *client, case LSM_GAIN: { struct snd_lsm_gain *lsm_gain = (struct snd_lsm_gain *) data; - ids.module_id = p_info->module_id; - ids.param_id = p_info->param_id; - rc = q6lsm_send_param_gain(client, lsm_gain->gain, &ids); + param_info.module_id = p_info->module_id; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = p_info->param_id; + rc = q6lsm_send_param_gain(client, lsm_gain->gain, ¶m_info); if (rc) pr_err("%s: LSM_GAIN command failed, rc %d\n", __func__, rc); @@ -1659,10 +1659,11 @@ int q6lsm_set_one_param(struct lsm_client *client, } case LSM_MIN_CONFIDENCE_LEVELS: - ids.module_id = p_info->module_id; - ids.param_id = p_info->param_id; - rc = q6lsm_send_confidence_levels(client, &ids, - LSM_SESSION_CMD_SET_PARAMS_V2); + param_info.module_id = p_info->module_id; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = p_info->param_id; + rc = q6lsm_send_confidence_levels( + client, ¶m_info, LSM_SESSION_CMD_SET_PARAMS_V2); if (rc) pr_err("%s: CONFIDENCE_LEVELS cmd failed, rc %d\n", __func__, rc); @@ -1670,11 +1671,12 @@ int q6lsm_set_one_param(struct lsm_client *client, case LSM_POLLING_ENABLE: { struct snd_lsm_poll_enable *lsm_poll_enable = (struct snd_lsm_poll_enable *) data; - ids.module_id = p_info->module_id; - ids.param_id = p_info->param_id; - rc = q6lsm_send_param_polling_enable(client, - lsm_poll_enable->poll_en, &ids, - LSM_SESSION_CMD_SET_PARAMS_V2); + param_info.module_id = p_info->module_id; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = p_info->param_id; + rc = q6lsm_send_param_polling_enable( + client, lsm_poll_enable->poll_en, ¶m_info, + LSM_SESSION_CMD_SET_PARAMS_V2); if (rc) pr_err("%s: POLLING ENABLE cmd failed, rc %d\n", __func__, rc); @@ -1682,24 +1684,25 @@ int q6lsm_set_one_param(struct lsm_client *client, } case LSM_REG_SND_MODEL: { - struct lsm_cmd_set_params model_param; + struct mem_mapping_hdr mem_hdr = {0}; u32 payload_size; - memset(&model_param, 0, sizeof(model_param)); - q6lsm_add_hdr(client, &model_param.msg_hdr, - sizeof(model_param), true); - model_param.msg_hdr.opcode = LSM_SESSION_CMD_SET_PARAMS_V2; - payload_size = p_info->param_size + - sizeof(struct lsm_param_payload_common); - q6lsm_set_param_hdr_info(&model_param.param_hdr, - payload_size, - lower_32_bits(client->sound_model.phys), - msm_audio_populate_upper_32_bits( - client->sound_model.phys), - client->sound_model.mem_map_handle); - - rc = q6lsm_apr_send_pkt(client, client->apr, - &model_param, true, NULL); + if (q6common_is_instance_id_supported()) + payload_size = p_info->param_size + + sizeof(struct param_hdr_v3); + else + payload_size = p_info->param_size + + sizeof(struct param_hdr_v2); + + mem_hdr.data_payload_addr_lsw = + lower_32_bits(client->sound_model.phys); + mem_hdr.data_payload_addr_msw = + msm_audio_populate_upper_32_bits( + client->sound_model.phys), + mem_hdr.mem_map_handle = client->sound_model.mem_map_handle; + + rc = q6lsm_set_params(client, &mem_hdr, NULL, payload_size, + LSM_SESSION_CMD_SET_PARAMS_V2); if (rc) { pr_err("%s: REG_SND_MODEL failed, rc %d\n", __func__, rc); @@ -1714,69 +1717,33 @@ int q6lsm_set_one_param(struct lsm_client *client, } case LSM_DEREG_SND_MODEL: { - struct lsm_param_payload_common *common; - struct lsm_cmd_set_params *param; - - pkt_sz = sizeof(*param) + sizeof(*common); - packet = kzalloc(pkt_sz, GFP_KERNEL); - if (!packet) { - pr_err("%s: No memory for DEREG_SND_MODEL pkt, size = %d\n", - __func__, pkt_sz); - return -ENOMEM; - } - - param = (struct lsm_cmd_set_params *) packet; - common = (struct lsm_param_payload_common *) - (packet + sizeof(*param)); - q6lsm_add_hdr(client, ¶m->msg_hdr, pkt_sz, true); - param->msg_hdr.opcode = LSM_SESSION_CMD_SET_PARAMS_V2; - q6lsm_set_param_hdr_info(¶m->param_hdr, - sizeof(*common), - 0, 0, 0); - ids.module_id = p_info->module_id; - ids.param_id = p_info->param_id; - q6lsm_set_param_common(common, &ids, 0, - LSM_SESSION_CMD_SET_PARAMS_V2); - rc = q6lsm_apr_send_pkt(client, client->apr, - packet, true, NULL); + param_info.module_id = p_info->module_id; + param_info.instance_id = INSTANCE_ID_0; + param_info.param_id = p_info->param_id; + param_info.param_size = 0; + rc = q6lsm_pack_and_set_params(client, ¶m_info, NULL, + LSM_SESSION_CMD_SET_PARAMS_V2); if (rc) pr_err("%s: DEREG_SND_MODEL failed, rc %d\n", __func__, rc); - kfree(packet); break; } case LSM_CUSTOM_PARAMS: { - struct apr_hdr *hdr; - u8 *custom_data; + u32 param_size = p_info->param_size; - if (p_info->param_size < - sizeof(struct lsm_param_payload_common)) { - pr_err("%s: Invalid param_size %d\n", - __func__, p_info->param_size); + /* Check minimum size, V2 structure is smaller than V3 */ + if (param_size < sizeof(struct param_hdr_v2)) { + pr_err("%s: Invalid param_size %d\n", __func__, + param_size); return -EINVAL; } - pkt_sz = p_info->param_size + sizeof(*hdr); - packet = kzalloc(pkt_sz, GFP_KERNEL); - if (!packet) { - pr_err("%s: no memory for CUSTOM_PARAMS, size = %d\n", - __func__, pkt_sz); - return -ENOMEM; - } - - hdr = (struct apr_hdr *) packet; - custom_data = (u8 *) (packet + sizeof(*hdr)); - q6lsm_add_hdr(client, hdr, pkt_sz, true); - hdr->opcode = LSM_SESSION_CMD_SET_PARAMS_V2; - memcpy(custom_data, data, p_info->param_size); - - rc = q6lsm_apr_send_pkt(client, client->apr, - packet, true, NULL); + rc = q6lsm_set_params(client, NULL, data, param_size, + LSM_SESSION_CMD_SET_PARAMS_V2); if (rc) pr_err("%s: CUSTOM_PARAMS failed, rc %d\n", __func__, rc); - kfree(packet); break; } default: @@ -1805,60 +1772,51 @@ int q6lsm_close(struct lsm_client *client) int q6lsm_lab_control(struct lsm_client *client, u32 enable) { + struct lsm_param_lab_enable lab_enable = {0}; + struct param_hdr_v3 lab_enable_hdr = {0}; + struct lsm_param_lab_config lab_config = {0}; + struct param_hdr_v3 lab_config_hdr = {0}; int rc = 0; - struct lsm_params_lab_enable lab_enable; - struct lsm_params_lab_config lab_config; - struct lsm_module_param_ids lab_ids; - u32 param_size; if (!client) { pr_err("%s: invalid param client %pK\n", __func__, client); return -EINVAL; } + /* enable/disable lab on dsp */ - q6lsm_add_hdr(client, &lab_enable.msg_hdr, sizeof(lab_enable), true); - lab_enable.msg_hdr.opcode = LSM_SESSION_CMD_SET_PARAMS; - q6lsm_set_param_hdr_info(&lab_enable.params_hdr, - sizeof(struct lsm_lab_enable), - 0, 0, 0); - param_size = (sizeof(struct lsm_lab_enable) - - sizeof(struct lsm_param_payload_common)); - lab_ids.module_id = LSM_MODULE_ID_LAB; - lab_ids.param_id = LSM_PARAM_ID_LAB_ENABLE; - q6lsm_set_param_common(&lab_enable.lab_enable.common, - &lab_ids, param_size, - LSM_SESSION_CMD_SET_PARAMS); - lab_enable.lab_enable.enable = (enable) ? 1 : 0; - rc = q6lsm_apr_send_pkt(client, client->apr, &lab_enable, true, NULL); + lab_enable_hdr.module_id = LSM_MODULE_ID_LAB; + lab_enable_hdr.instance_id = INSTANCE_ID_0; + lab_enable_hdr.param_id = LSM_PARAM_ID_LAB_ENABLE; + lab_enable_hdr.param_size = sizeof(lab_enable); + lab_enable.enable = (enable) ? 1 : 0; + rc = q6lsm_pack_and_set_params(client, &lab_enable_hdr, + (uint8_t *) &lab_enable, + LSM_SESSION_CMD_SET_PARAMS); if (rc) { pr_err("%s: Lab enable failed rc %d\n", __func__, rc); return rc; } if (!enable) goto exit; + /* lab session is being enabled set the config values */ - q6lsm_add_hdr(client, &lab_config.msg_hdr, sizeof(lab_config), true); - lab_config.msg_hdr.opcode = LSM_SESSION_CMD_SET_PARAMS; - q6lsm_set_param_hdr_info(&lab_config.params_hdr, - sizeof(struct lsm_lab_config), - 0, 0, 0); - lab_ids.module_id = LSM_MODULE_ID_LAB; - lab_ids.param_id = LSM_PARAM_ID_LAB_CONFIG; - param_size = (sizeof(struct lsm_lab_config) - - sizeof(struct lsm_param_payload_common)); - q6lsm_set_param_common(&lab_config.lab_config.common, - &lab_ids, param_size, - LSM_SESSION_CMD_SET_PARAMS); - lab_config.lab_config.minor_version = 1; - lab_config.lab_config.wake_up_latency_ms = 250; - rc = q6lsm_apr_send_pkt(client, client->apr, &lab_config, true, NULL); + lab_config_hdr.module_id = LSM_MODULE_ID_LAB; + lab_config_hdr.instance_id = INSTANCE_ID_0; + lab_config_hdr.param_id = LSM_PARAM_ID_LAB_CONFIG; + lab_config_hdr.param_size = sizeof(lab_config); + lab_config.minor_version = 1; + lab_config.wake_up_latency_ms = 250; + rc = q6lsm_pack_and_set_params(client, &lab_config_hdr, + (uint8_t *) &lab_config, + LSM_SESSION_CMD_SET_PARAMS); if (rc) { pr_err("%s: Lab config failed rc %d disable lab\n", __func__, rc); /* Lab config failed disable lab */ - lab_enable.lab_enable.enable = 0; - if (q6lsm_apr_send_pkt(client, client->apr, - &lab_enable, true, NULL)) + lab_enable.enable = 0; + if (q6lsm_pack_and_set_params(client, &lab_enable_hdr, + (uint8_t *) &lab_enable, + LSM_SESSION_CMD_SET_PARAMS)) pr_err("%s: Lab disable failed\n", __func__); } exit: @@ -2142,6 +2100,8 @@ static int __init q6lsm_init(void) { int i = 0; pr_debug("%s:\n", __func__); + + memset(&lsm_common, 0, sizeof(struct lsm_common)); spin_lock_init(&lsm_session_lock); spin_lock_init(&mmap_lock); mutex_init(&lsm_common.apr_lock); diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c index 01e31578f107..a0f30a32f8e6 100644 --- a/sound/soc/msm/qdsp6v2/q6voice.c +++ b/sound/soc/msm/qdsp6v2/q6voice.c @@ -24,6 +24,7 @@ #include "sound/q6audio-v2.h" #include "sound/apr_audio-v2.h" #include "sound/q6afe-v2.h" +#include <sound/q6common.h> #include <sound/audio_cal_utils.h> #include "q6voice.h" #include <sound/adsp_err.h> @@ -93,8 +94,9 @@ static int32_t qdsp_mvm_callback(struct apr_client_data *data, void *priv); static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv); static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv); -static int voice_send_set_pp_enable_cmd(struct voice_data *v, - uint32_t module_id, int enable); +static int voice_send_set_pp_enable_cmd( + struct voice_data *v, struct module_instance_info mod_inst_info, + int enable); static int is_cal_memory_allocated(void); static bool is_cvd_version_queried(void); static int is_voip_memory_allocated(void); @@ -126,6 +128,12 @@ static int voice_send_get_sound_focus_cmd(struct voice_data *v, struct sound_focus_param *soundFocusData); static int voice_send_get_source_tracking_cmd(struct voice_data *v, struct source_tracking_param *sourceTrackingData); +static int voice_pack_and_set_cvp_param(struct voice_data *v, + struct param_hdr_v3 param_hdr, + u8 *param_data); +static int voice_pack_and_set_cvs_ui_property(struct voice_data *v, + struct param_hdr_v3 param_hdr, + u8 *param_data); static void voice_itr_init(struct voice_session_itr *itr, u32 session_id) @@ -1451,70 +1459,29 @@ fail: return ret; } -static int voice_send_set_pp_enable_cmd(struct voice_data *v, - uint32_t module_id, int enable) +static int voice_send_set_pp_enable_cmd( + struct voice_data *v, struct module_instance_info mod_inst_info, + int enable) { - struct cvs_set_pp_enable_cmd cvs_set_pp_cmd; + struct enable_param enable_param = {0}; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; - void *apr_cvs; - u16 cvs_handle; - if (v == NULL) { - pr_err("%s: v is NULL\n", __func__); - return -EINVAL; - } - apr_cvs = common.apr_q6_cvs; + param_hdr.module_id = mod_inst_info.module_id; + param_hdr.instance_id = mod_inst_info.instance_id; + param_hdr.param_id = VOICE_PARAM_MOD_ENABLE; + param_hdr.param_size = sizeof(enable_param); + enable_param.enable = enable ? 1 : 0; - if (!apr_cvs) { - pr_err("%s: apr_cvs is NULL.\n", __func__); - return -EINVAL; - } - cvs_handle = voice_get_cvs_handle(v); + pr_debug("%s: voice_send_set_pp_enable_cmd, module_id=%d, instance_id=%d, enable=%d\n", + __func__, mod_inst_info.module_id, mod_inst_info.instance_id, + enable); - cvs_set_pp_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), - APR_PKT_VER); - cvs_set_pp_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, - sizeof(cvs_set_pp_cmd) - - APR_HDR_SIZE); - cvs_set_pp_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id); - cvs_set_pp_cmd.hdr.dest_port = cvs_handle; - cvs_set_pp_cmd.hdr.token = 0; - cvs_set_pp_cmd.hdr.opcode = VSS_ICOMMON_CMD_SET_UI_PROPERTY; - - cvs_set_pp_cmd.vss_set_pp.module_id = module_id; - cvs_set_pp_cmd.vss_set_pp.param_id = VOICE_PARAM_MOD_ENABLE; - cvs_set_pp_cmd.vss_set_pp.param_size = MOD_ENABLE_PARAM_LEN; - cvs_set_pp_cmd.vss_set_pp.reserved = 0; - cvs_set_pp_cmd.vss_set_pp.enable = enable; - cvs_set_pp_cmd.vss_set_pp.reserved_field = 0; - pr_debug("voice_send_set_pp_enable_cmd, module_id=%d, enable=%d\n", - module_id, enable); + ret = voice_pack_and_set_cvs_ui_property(v, param_hdr, + (uint8_t *) &enable_param); + if (ret < 0) + pr_err("Fail: sending cvs set pp enable\n"); - v->cvs_state = CMD_STATUS_FAIL; - v->async_err = 0; - ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_pp_cmd); - if (ret < 0) { - pr_err("Fail: sending cvs set pp enable,\n"); - goto fail; - } - ret = wait_event_timeout(v->cvs_wait, - (v->cvs_state == CMD_STATUS_SUCCESS), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", __func__); - goto fail; - } - if (v->async_err > 0) { - pr_err("%s: DSP returned error[%s]\n", - __func__, adsp_err_get_err_str( - v->async_err)); - ret = adsp_err_get_lnx_err_code( - v->async_err); - goto fail; - } - return 0; -fail: return ret; } @@ -3823,6 +3790,7 @@ done: static int voice_setup_vocproc(struct voice_data *v) { + struct module_instance_info mod_inst_info = {0}; int ret = 0; ret = voice_send_cvp_create_cmd(v); @@ -3845,6 +3813,9 @@ static int voice_setup_vocproc(struct voice_data *v) goto fail; } + mod_inst_info.module_id = MODULE_ID_VOICE_MODULE_ST; + mod_inst_info.instance_id = INSTANCE_ID_0; + voice_send_cvs_register_cal_cmd(v); voice_send_cvp_register_dev_cfg_cmd(v); voice_send_cvp_register_cal_cmd(v); @@ -3878,9 +3849,7 @@ static int voice_setup_vocproc(struct voice_data *v) } if (v->st_enable && !v->tty_mode) - voice_send_set_pp_enable_cmd(v, - MODULE_ID_VOICE_MODULE_ST, - v->st_enable); + voice_send_set_pp_enable_cmd(v, mod_inst_info, v->st_enable); /* Start in-call music delivery if this feature is enabled */ if (v->music_info.play_enable) voice_cvs_start_playback(v); @@ -4017,14 +3986,9 @@ done: static int voice_send_cvp_media_format_cmd(struct voice_data *v, uint32_t param_type) { + struct vss_param_endpoint_media_format_info media_fmt_info = {0}; + struct param_hdr_v3 param_hdr = {0}; int ret = 0; - struct cvp_set_media_format_cmd cvp_set_media_format_cmd; - void *apr_cvp; - u16 cvp_handle; - struct vss_icommon_param_data_t *media_fmt_param_data = - &cvp_set_media_format_cmd.cvp_set_param_v2.param_data; - struct vss_param_endpoint_media_format_info_t *media_fmt_info = - &media_fmt_param_data->media_format_info; if (v == NULL) { pr_err("%s: v is NULL\n", __func__); @@ -4032,75 +3996,41 @@ static int voice_send_cvp_media_format_cmd(struct voice_data *v, goto done; } - apr_cvp = common.apr_q6_cvp; - if (!apr_cvp) { - pr_err("%s: apr_cvp is NULL.\n", __func__); - ret = -EINVAL; - goto done; - } - - cvp_handle = voice_get_cvp_handle(v); - memset(&cvp_set_media_format_cmd, 0, sizeof(cvp_set_media_format_cmd)); + param_hdr.module_id = VSS_MODULE_CVD_GENERIC; + param_hdr.instance_id = INSTANCE_ID_0; + param_hdr.param_size = sizeof(media_fmt_info); - /* Fill header data */ - cvp_set_media_format_cmd.hdr.hdr_field = - APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), - APR_PKT_VER); - cvp_set_media_format_cmd.hdr.pkt_size = - APR_PKT_SIZE(APR_HDR_SIZE, - sizeof(cvp_set_media_format_cmd) - APR_HDR_SIZE); - cvp_set_media_format_cmd.hdr.src_svc = 0; - cvp_set_media_format_cmd.hdr.src_domain = APR_DOMAIN_APPS; - cvp_set_media_format_cmd.hdr.src_port = - voice_get_idx_for_session(v->session_id); - cvp_set_media_format_cmd.hdr.dest_svc = 0; - cvp_set_media_format_cmd.hdr.dest_domain = APR_DOMAIN_ADSP; - cvp_set_media_format_cmd.hdr.dest_port = cvp_handle; - cvp_set_media_format_cmd.hdr.token = VOC_SET_MEDIA_FORMAT_PARAM_TOKEN; - cvp_set_media_format_cmd.hdr.opcode = VSS_ICOMMON_CMD_SET_PARAM_V2; - - /* Fill param data */ - cvp_set_media_format_cmd.cvp_set_param_v2.mem_size = - sizeof(struct vss_icommon_param_data_t); - media_fmt_param_data->module_id = VSS_MODULE_CVD_GENERIC; - media_fmt_param_data->param_size = - sizeof(struct vss_param_endpoint_media_format_info_t); - - /* Fill device specific data */ switch (param_type) { case RX_PATH: - media_fmt_param_data->param_id = - VSS_PARAM_RX_PORT_ENDPOINT_MEDIA_INFO; - media_fmt_info->port_id = v->dev_rx.port_id; - media_fmt_info->num_channels = v->dev_rx.no_of_channels; - media_fmt_info->bits_per_sample = v->dev_rx.bits_per_sample; - media_fmt_info->sample_rate = v->dev_rx.sample_rate; - memcpy(&media_fmt_info->channel_mapping, + param_hdr.param_id = VSS_PARAM_RX_PORT_ENDPOINT_MEDIA_INFO; + media_fmt_info.port_id = v->dev_rx.port_id; + media_fmt_info.num_channels = v->dev_rx.no_of_channels; + media_fmt_info.bits_per_sample = v->dev_rx.bits_per_sample; + media_fmt_info.sample_rate = v->dev_rx.sample_rate; + memcpy(&media_fmt_info.channel_mapping, &v->dev_rx.channel_mapping, VSS_CHANNEL_MAPPING_SIZE); break; case TX_PATH: - media_fmt_param_data->param_id = - VSS_PARAM_TX_PORT_ENDPOINT_MEDIA_INFO; - media_fmt_info->port_id = v->dev_tx.port_id; - media_fmt_info->num_channels = v->dev_tx.no_of_channels; - media_fmt_info->bits_per_sample = v->dev_tx.bits_per_sample; - media_fmt_info->sample_rate = v->dev_tx.sample_rate; - memcpy(&media_fmt_info->channel_mapping, + param_hdr.param_id = VSS_PARAM_TX_PORT_ENDPOINT_MEDIA_INFO; + media_fmt_info.port_id = v->dev_tx.port_id; + media_fmt_info.num_channels = v->dev_tx.no_of_channels; + media_fmt_info.bits_per_sample = v->dev_tx.bits_per_sample; + media_fmt_info.sample_rate = v->dev_tx.sample_rate; + memcpy(&media_fmt_info.channel_mapping, &v->dev_tx.channel_mapping, VSS_CHANNEL_MAPPING_SIZE); break; case EC_REF_PATH: - media_fmt_param_data->param_id = - VSS_PARAM_EC_REF_PORT_ENDPOINT_MEDIA_INFO; - media_fmt_info->port_id = common.ec_media_fmt_info.port_id; - media_fmt_info->num_channels = + param_hdr.param_id = VSS_PARAM_EC_REF_PORT_ENDPOINT_MEDIA_INFO; + media_fmt_info.port_id = common.ec_media_fmt_info.port_id; + media_fmt_info.num_channels = common.ec_media_fmt_info.num_channels; - media_fmt_info->bits_per_sample = + media_fmt_info.bits_per_sample = common.ec_media_fmt_info.bits_per_sample; - media_fmt_info->sample_rate = + media_fmt_info.sample_rate = common.ec_media_fmt_info.sample_rate; - memcpy(&media_fmt_info->channel_mapping, + memcpy(&media_fmt_info.channel_mapping, &common.ec_media_fmt_info.channel_mapping, VSS_CHANNEL_MAPPING_SIZE); break; @@ -4111,32 +4041,11 @@ static int voice_send_cvp_media_format_cmd(struct voice_data *v, goto done; } - /* Send command */ - v->cvp_state = CMD_STATUS_FAIL; - v->async_err = 0; - ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_set_media_format_cmd); - if (ret < 0) { - pr_err("%s: Fail in sending VSS_ICOMMON_CMD_SET_PARAM_V2\n", - __func__); - ret = -EINVAL; - goto done; - } - - ret = wait_event_timeout(v->cvp_wait, - (v->cvp_state == CMD_STATUS_SUCCESS), - msecs_to_jiffies(TIMEOUT_MS)); - if (!ret) { - pr_err("%s: wait_event timeout\n", __func__); - ret = -EINVAL; - goto done; - } - - if (v->async_err > 0) { - pr_err("%s: DSP returned error[%s] handle = %d\n", __func__, - adsp_err_get_err_str(v->async_err), cvp_handle); - ret = adsp_err_get_lnx_err_code(v->async_err); - goto done; - } + ret = voice_pack_and_set_cvp_param(v, param_hdr, + (u8 *) &media_fmt_info); + if (ret) + pr_err("%s: Failed to set media format params on CVP, err %d\n", + __func__, ret); done: return ret; @@ -4532,6 +4441,7 @@ static int voice_destroy_vocproc(struct voice_data *v) { struct mvm_detach_vocproc_cmd mvm_d_vocproc_cmd; struct apr_hdr cvp_destroy_session_cmd; + struct module_instance_info mod_inst_info = {0}; int ret = 0; void *apr_mvm, *apr_cvp; u16 mvm_handle, cvp_handle; @@ -4550,9 +4460,12 @@ static int voice_destroy_vocproc(struct voice_data *v) mvm_handle = voice_get_mvm_handle(v); cvp_handle = voice_get_cvp_handle(v); + mod_inst_info.module_id = MODULE_ID_VOICE_MODULE_ST; + mod_inst_info.instance_id = INSTANCE_ID_0; + /* disable slowtalk if st_enable is set */ if (v->st_enable) - voice_send_set_pp_enable_cmd(v, MODULE_ID_VOICE_MODULE_ST, 0); + voice_send_set_pp_enable_cmd(v, mod_inst_info, 0); /* Disable HD Voice if hd_enable is set */ if (v->hd_enable) @@ -5789,11 +5702,15 @@ uint8_t voc_get_tty_mode(uint32_t session_id) return ret; } -int voc_set_pp_enable(uint32_t session_id, uint32_t module_id, uint32_t enable) +int voc_set_pp_enable(uint32_t session_id, + struct module_instance_info mod_inst_info, + uint32_t enable) { struct voice_data *v = NULL; int ret = 0; struct voice_session_itr itr; + int mid = mod_inst_info.module_id; + int iid = mod_inst_info.instance_id; voice_itr_init(&itr, session_id); while (voice_itr_get_next_session(&itr, &v)) { @@ -5802,15 +5719,15 @@ int voc_set_pp_enable(uint32_t session_id, uint32_t module_id, uint32_t enable) continue; mutex_lock(&v->lock); - if (module_id == MODULE_ID_VOICE_MODULE_ST) + if (mid == MODULE_ID_VOICE_MODULE_ST && + iid == INSTANCE_ID_0) v->st_enable = enable; if (v->voc_state == VOC_RUN) { - if ((module_id == MODULE_ID_VOICE_MODULE_ST) && - (!v->tty_mode)) - ret = voice_send_set_pp_enable_cmd(v, - MODULE_ID_VOICE_MODULE_ST, - enable); + if ((mid == MODULE_ID_VOICE_MODULE_ST) && + iid == INSTANCE_ID_0 && (!v->tty_mode)) + ret = voice_send_set_pp_enable_cmd( + v, mod_inst_info, enable); } mutex_unlock(&v->lock); } else { @@ -5893,7 +5810,8 @@ bool voc_get_afe_sidetone(void) return ret; } -int voc_get_pp_enable(uint32_t session_id, uint32_t module_id) +int voc_get_pp_enable(uint32_t session_id, + struct module_instance_info mod_inst_info) { struct voice_data *v = voice_get_session(session_id); int ret = 0; @@ -5905,7 +5823,8 @@ int voc_get_pp_enable(uint32_t session_id, uint32_t module_id) } mutex_lock(&v->lock); - if (module_id == MODULE_ID_VOICE_MODULE_ST) + if (mod_inst_info.module_id == MODULE_ID_VOICE_MODULE_ST && + mod_inst_info.instance_id == INSTANCE_ID_0) ret = v->st_enable; mutex_unlock(&v->lock); @@ -6180,6 +6099,7 @@ done: int voc_enable_device(uint32_t session_id) { struct voice_data *v = voice_get_session(session_id); + struct module_instance_info mod_inst_info = {0}; int ret = 0; if (v == NULL) { @@ -6197,15 +6117,15 @@ int voc_enable_device(uint32_t session_id) /* Not a critical error, allow voice call to continue */ } + mod_inst_info.module_id = MODULE_ID_VOICE_MODULE_ST; + mod_inst_info.instance_id = INSTANCE_ID_0; + if (v->tty_mode) { /* disable slowtalk */ - voice_send_set_pp_enable_cmd(v, - MODULE_ID_VOICE_MODULE_ST, - 0); + voice_send_set_pp_enable_cmd(v, mod_inst_info, 0); } else { /* restore slowtalk */ - voice_send_set_pp_enable_cmd(v, - MODULE_ID_VOICE_MODULE_ST, + voice_send_set_pp_enable_cmd(v, mod_inst_info, v->st_enable); } @@ -6787,6 +6707,7 @@ static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv) case VSS_ICOMMON_CMD_MAP_MEMORY: case VSS_ICOMMON_CMD_UNMAP_MEMORY: case VSS_ICOMMON_CMD_SET_UI_PROPERTY: + case VSS_ICOMMON_CMD_SET_UI_PROPERTY_V2: case VSS_IPLAYBACK_CMD_START: case VSS_IPLAYBACK_CMD_STOP: case VSS_IRECORD_CMD_START: @@ -6800,12 +6721,14 @@ static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv) wake_up(&v->cvs_wait); break; case VSS_ICOMMON_CMD_SET_PARAM_V2: - pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2\n", + case VSS_ICOMMON_CMD_SET_PARAM_V3: + pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM\n", __func__); rtac_make_voice_callback(RTAC_CVS, ptr, data->payload_size); break; case VSS_ICOMMON_CMD_GET_PARAM_V2: + case VSS_ICOMMON_CMD_GET_PARAM_V3: pr_debug("%s: VSS_ICOMMON_CMD_GET_PARAM_V2\n", __func__); /* Should only come here if there is an APR */ @@ -6938,7 +6861,8 @@ static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv) pr_debug("Recd VSS_ISTREAM_EVT_NOT_READY\n"); } else if (data->opcode == VSS_ISTREAM_EVT_READY) { pr_debug("Recd VSS_ISTREAM_EVT_READY\n"); - } else if (data->opcode == VSS_ICOMMON_RSP_GET_PARAM) { + } else if (data->opcode == VSS_ICOMMON_RSP_GET_PARAM || + VSS_ICOMMON_RSP_GET_PARAM_V3) { pr_debug("%s: VSS_ICOMMON_RSP_GET_PARAM\n", __func__); ptr = data->payload; if (ptr[0] != 0) { @@ -7081,28 +7005,30 @@ static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv) case VSS_IVPCM_EVT_PUSH_BUFFER_V2: break; case VSS_ICOMMON_CMD_SET_PARAM_V2: + case VSS_ICOMMON_CMD_SET_PARAM_V3: switch (data->token) { case VOC_SET_MEDIA_FORMAT_PARAM_TOKEN: - pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2 called by voice_send_cvp_media_format_cmd\n", + pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM called by voice_send_cvp_media_format_cmd\n", __func__); v->cvp_state = CMD_STATUS_SUCCESS; v->async_err = ptr[1]; wake_up(&v->cvp_wait); break; case VOC_RTAC_SET_PARAM_TOKEN: - pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2 called by rtac\n", + pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM called by rtac\n", __func__); rtac_make_voice_callback( RTAC_CVP, ptr, data->payload_size); break; default: - pr_debug("%s: invalid token for command VSS_ICOMMON_CMD_SET_PARAM_V2: %d\n", + pr_debug("%s: invalid token for command VSS_ICOMMON_CMD_SET_PARAM: %d\n", __func__, data->token); break; } break; case VSS_ICOMMON_CMD_GET_PARAM_V2: + case VSS_ICOMMON_CMD_GET_PARAM_V3: pr_debug("%s: VSS_ICOMMON_CMD_GET_PARAM_V2\n", __func__); /* Should only come here if there is an APR */ @@ -7169,7 +7095,8 @@ static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv) break; } } - } else if (data->opcode == VSS_ICOMMON_RSP_GET_PARAM) { + } else if (data->opcode == VSS_ICOMMON_RSP_GET_PARAM || + VSS_ICOMMON_RSP_GET_PARAM_V3) { pr_debug("%s: VSS_ICOMMON_RSP_GET_PARAM\n", __func__); ptr = data->payload; if (ptr[0] != 0) { @@ -8578,6 +8505,199 @@ int voc_get_source_tracking(struct source_tracking_param *sourceTrackingData) return ret; } +static int voice_set_cvp_param(struct voice_data *v, + struct vss_icommon_mem_mapping_hdr *mem_hdr, + u32 *param_data, u32 param_size) +{ + struct vss_icommon_cmd_set_param *set_param = NULL; + uint32_t pkt_size = sizeof(struct vss_icommon_cmd_set_param); + void *apr_cvp; + int ret = 0; + + apr_cvp = common.apr_q6_cvp; + if (!apr_cvp) { + pr_err("%s: apr_cvp is NULL\n", __func__); + return -EINVAL; + } + + if (param_data != NULL) + pkt_size += param_size; + set_param = kzalloc(pkt_size, GFP_KERNEL); + if (!set_param) + return -ENOMEM; + + set_param->apr_hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + set_param->apr_hdr.pkt_size = + APR_PKT_SIZE(APR_HDR_SIZE, pkt_size - APR_HDR_SIZE); + set_param->apr_hdr.src_svc = 0; + set_param->apr_hdr.src_domain = APR_DOMAIN_APPS; + set_param->apr_hdr.src_port = voice_get_idx_for_session(v->session_id); + set_param->apr_hdr.dest_svc = 0; + set_param->apr_hdr.dest_domain = APR_DOMAIN_ADSP; + set_param->apr_hdr.dest_port = voice_get_cvp_handle(v); + set_param->apr_hdr.token = VOC_SET_MEDIA_FORMAT_PARAM_TOKEN; + set_param->apr_hdr.opcode = q6common_is_instance_id_supported() ? + VSS_ICOMMON_CMD_SET_PARAM_V3 : + VSS_ICOMMON_CMD_SET_PARAM_V2; + + set_param->payload_size = param_size; + + if (mem_hdr != NULL) { + set_param->mem_hdr = *mem_hdr; + } else if (param_data != NULL) { + memcpy(set_param->param_data, param_data, param_size); + } else { + pr_err("%s: Both memory header and param data are NULL\n", + __func__); + ret = -EINVAL; + goto done; + } + + v->cvp_state = CMD_STATUS_FAIL; + v->async_err = 0; + ret = apr_send_pkt(apr_cvp, (u32 *) set_param); + if (ret < 0) { + pr_err("%s: Failed to send apr packet, error %d\n", __func__, + ret); + goto done; + } + + ret = wait_event_timeout(v->cvp_wait, + v->cvp_state == CMD_STATUS_SUCCESS, + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_err("%s: wait_event timeout\n", __func__); + ret = -ETIMEDOUT; + goto done; + } + + if (v->async_err > 0) { + pr_err("%s: DSP returned error[%s]\n", __func__, + adsp_err_get_err_str(v->async_err)); + ret = adsp_err_get_lnx_err_code(v->async_err); + goto done; + } + ret = 0; + +done: + kfree(set_param); + return ret; +} + +static int voice_pack_and_set_cvp_param(struct voice_data *v, + struct param_hdr_v3 param_hdr, + u8 *param_data) +{ + u8 *packed_data = NULL; + u32 total_size = 0; + int ret = 0; + + total_size = sizeof(union param_hdrs) + param_hdr.param_size; + packed_data = kzalloc(total_size, GFP_KERNEL); + if (!packed_data) + return -ENOMEM; + + ret = q6common_pack_pp_params(packed_data, ¶m_hdr, param_data, + &total_size); + if (ret) { + pr_err("%s: Failed to pack params, error %d", __func__, ret); + goto done; + } + + ret = voice_set_cvp_param(v, NULL, (u32 *) packed_data, total_size); + +done: + kfree(packed_data); + return ret; +} + +/* + * Out of band is not supported and there are currently no pre-packed cases, + * so pack and set in the same function. When needed, split up. + */ +static int voice_pack_and_set_cvs_ui_property(struct voice_data *v, + struct param_hdr_v3 param_hdr, + u8 *param_data) +{ + struct vss_icommon_cmd_set_ui_property *set_ui_property = NULL; + u32 total_size = 0; + bool iid_supported = q6common_is_instance_id_supported(); + void *apr_cvs; + int ret = 0; + + apr_cvs = common.apr_q6_cvs; + if (!apr_cvs) { + pr_err("%s: apr_cvs is NULL\n", __func__); + return -EINVAL; + } + + total_size = sizeof(struct vss_icommon_cmd_set_ui_property) + + sizeof(union param_hdrs) + param_hdr.param_size; + set_ui_property = kzalloc(total_size, GFP_KERNEL); + if (!set_ui_property) + return -ENOMEM; + + ret = q6common_pack_pp_params(set_ui_property->param_data, ¶m_hdr, + param_data, &total_size); + if (ret) { + pr_err("%s: Failed to pack params, error %d", __func__, ret); + goto done; + } + + /* + * Pack the APR header after packing the data so we have the actual + * total size of the payload + */ + set_ui_property->apr_hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + set_ui_property->apr_hdr.pkt_size = + APR_PKT_SIZE(APR_HDR_SIZE, total_size - APR_HDR_SIZE); + set_ui_property->apr_hdr.src_svc = 0; + set_ui_property->apr_hdr.src_domain = APR_DOMAIN_APPS; + set_ui_property->apr_hdr.src_port = + voice_get_idx_for_session(v->session_id); + set_ui_property->apr_hdr.dest_svc = 0; + set_ui_property->apr_hdr.dest_domain = APR_DOMAIN_ADSP; + set_ui_property->apr_hdr.dest_port = voice_get_cvs_handle(v); + set_ui_property->apr_hdr.token = 0; + + set_ui_property->apr_hdr.opcode = + iid_supported ? VSS_ICOMMON_CMD_SET_UI_PROPERTY_V2 : + VSS_ICOMMON_CMD_SET_UI_PROPERTY; + + v->cvs_state = CMD_STATUS_FAIL; + v->async_err = 0; + ret = apr_send_pkt(apr_cvs, (u32 *) set_ui_property); + if (ret < 0) { + pr_err("%s: Failed to send apr packet, error %d\n", __func__, + ret); + goto done; + } + + ret = wait_event_timeout(v->cvs_wait, + v->cvs_state == CMD_STATUS_SUCCESS, + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_err("%s: wait_event timeout\n", __func__); + ret = -ETIMEDOUT; + goto done; + } + + if (v->async_err > 0) { + pr_err("%s: DSP returned error[%s]\n", __func__, + adsp_err_get_err_str(v->async_err)); + ret = adsp_err_get_lnx_err_code(v->async_err); + goto done; + } + ret = 0; +done: + kfree(set_ui_property); + return ret; +} + int is_voc_initialized(void) { return module_initialized; diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h index f7ea650dfda9..f448e701d564 100644 --- a/sound/soc/msm/qdsp6v2/q6voice.h +++ b/sound/soc/msm/qdsp6v2/q6voice.h @@ -172,6 +172,7 @@ struct mem_map_table { /* Common */ #define VSS_ICOMMON_CMD_SET_UI_PROPERTY 0x00011103 +#define VSS_ICOMMON_CMD_SET_UI_PROPERTY_V2 0x00013248 /* Set a UI property */ #define VSS_ICOMMON_CMD_MAP_MEMORY 0x00011025 #define VSS_ICOMMON_CMD_UNMAP_MEMORY 0x00011026 @@ -213,7 +214,7 @@ struct vss_unmap_memory_cmd { struct vss_icommon_cmd_unmap_memory_t vss_unmap_mem; } __packed; -struct vss_param_endpoint_media_format_info_t { +struct vss_param_endpoint_media_format_info { /* AFE port ID to which this media format corresponds to. */ uint32_t port_id; /* @@ -240,29 +241,7 @@ struct vss_param_endpoint_media_format_info_t { uint8_t channel_mapping[VSS_NUM_CHANNELS_MAX]; } __packed; -struct vss_icommon_param_data_t { - /* Valid ID of the module. */ - uint32_t module_id; - /* Valid ID of the parameter. */ - uint32_t param_id; - /* - * Data size of the structure relating to the param_id/module_id - * combination in uint8_t bytes. - */ - uint16_t param_size; - /* This field must be set to zero. */ - uint16_t reserved; - /* - * Parameter data payload when inband. Should have size param_size. - * Bit size of payload must be a multiple of 4. - */ - union { - struct vss_param_endpoint_media_format_info_t media_format_info; - }; -} __packed; - -/* Payload structure for the VSS_ICOMMON_CMD_SET_PARAM_V2 command. */ -struct vss_icommon_cmd_set_param_v2_t { +struct vss_icommon_mem_mapping_hdr { /* * Pointer to the unique identifier for an address (physical/virtual). * @@ -275,6 +254,7 @@ struct vss_icommon_cmd_set_param_v2_t { * data. */ uint32_t mem_handle; + /* * Location of the parameter data payload. * @@ -282,12 +262,25 @@ struct vss_icommon_cmd_set_param_v2_t { * mem_handle is 0, this field is ignored. */ uint64_t mem_address; - /* Size of the parameter data payload in bytes. */ - uint32_t mem_size; - /* Parameter data payload when the data is inband. */ - struct vss_icommon_param_data_t param_data; + } __packed; +struct vss_icommon_cmd_set_param { + /* APR Header */ + struct apr_hdr apr_hdr; + + /* The memory mapping header to be used when sending outband */ + struct vss_icommon_mem_mapping_hdr mem_hdr; + + /* Size of the parameter data payload in bytes. */ + uint32_t payload_size; + + /* + * Parameter data payload when inband. Should have size param_size. + * Bit size of payload must be a multiple of 4. + */ + uint8_t param_data[0]; +} __packed; /* TO MVM commands */ #define VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION 0x000110FF /**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */ @@ -638,7 +631,6 @@ struct vss_imemory_cmd_unmap_t { #define MODULE_ID_VOICE_MODULE_ST 0x00010EE3 #define VOICE_PARAM_MOD_ENABLE 0x00010E00 -#define MOD_ENABLE_PARAM_LEN 4 #define VSS_IPLAYBACK_CMD_START 0x000112BD /* Start in-call music delivery on the Tx voice path. */ @@ -907,20 +899,20 @@ struct vss_istream_cmd_register_calibration_data_v2_t { */ } __packed; -struct vss_icommon_cmd_set_ui_property_enable_t { - uint32_t module_id; - /* Unique ID of the module. */ - uint32_t param_id; - /* Unique ID of the parameter. */ - uint16_t param_size; - /* Size of the parameter in bytes: MOD_ENABLE_PARAM_LEN */ - uint16_t reserved; - /* Reserved; set to 0. */ +struct enable_param { uint16_t enable; uint16_t reserved_field; /* Reserved, set to 0. */ }; +struct vss_icommon_cmd_set_ui_property { + /* APR Header */ + struct apr_hdr apr_hdr; + + /* The parameter data to be filled when sent inband */ + u8 param_data[0]; +} __packed; + /* * Event sent by the stream to the client that enables Rx DTMF * detection whenever DTMF is detected in the Rx path. @@ -1029,10 +1021,6 @@ struct cvs_deregister_cal_data_cmd { struct apr_hdr hdr; } __packed; -struct cvs_set_pp_enable_cmd { - struct apr_hdr hdr; - struct vss_icommon_cmd_set_ui_property_enable_t vss_set_pp; -} __packed; struct cvs_start_record_cmd { struct apr_hdr hdr; struct vss_irecord_cmd_start_t rec_mode; @@ -1105,6 +1093,8 @@ struct vss_istream_cmd_set_packet_exchange_mode_t { */ #define VSS_IVOCPROC_CMD_DEREGISTER_DEVICE_CONFIG 0x00011372 +#define CVD_CAL_DATA_FORMAT_MINOR_VERSION_V0 0x00000000 +#define CVD_CAL_DATA_FORMAT_MINOR_VERSION_V1 0x00000001 #define VSS_IVOCPROC_CMD_REGISTER_CALIBRATION_DATA_V2 0x00011373 #define VSS_IVOCPROC_CMD_DEREGISTER_CALIBRATION_DATA 0x00011276 @@ -1484,11 +1474,6 @@ struct cvp_set_dev_channels_cmd { struct vss_ivocproc_cmd_topology_set_dev_channels_t cvp_set_channels; } __packed; -struct cvp_set_media_format_cmd { - struct apr_hdr hdr; - struct vss_icommon_cmd_set_param_v2_t cvp_set_param_v2; -} __packed; - struct cvp_set_vp3_data_cmd { struct apr_hdr hdr; } __packed; @@ -1836,9 +1821,11 @@ enum { #define VSID_MAX ALL_SESSION_VSID /* called by alsa driver */ -int voc_set_pp_enable(uint32_t session_id, uint32_t module_id, +int voc_set_pp_enable(uint32_t session_id, + struct module_instance_info mod_inst_info, uint32_t enable); -int voc_get_pp_enable(uint32_t session_id, uint32_t module_id); +int voc_get_pp_enable(uint32_t session_id, + struct module_instance_info mod_inst_info); int voc_set_hd_enable(uint32_t session_id, uint32_t enable); uint8_t voc_get_tty_mode(uint32_t session_id); int voc_set_tty_mode(uint32_t session_id, uint8_t tty_mode); diff --git a/sound/soc/msm/qdsp6v2/rtac.c b/sound/soc/msm/qdsp6v2/rtac.c index 77c6dfbbe8c1..5e33fb508455 100644 --- a/sound/soc/msm/qdsp6v2/rtac.c +++ b/sound/soc/msm/qdsp6v2/rtac.c @@ -27,6 +27,7 @@ #include <sound/q6afe-v2.h> #include <sound/q6adm-v2.h> #include <sound/apr_audio-v2.h> +#include <sound/q6common.h> #include "q6voice.h" #include "msm-pcm-routing-v2.h" #include <sound/adsp_err.h> @@ -104,14 +105,10 @@ struct rtac_afe_user_data { uint32_t cmd_size; uint32_t port_id; union { - struct rtac_afe_set { - struct afe_port_cmd_set_param_v2 cmd; - struct afe_port_param_data_v2 data; - } rtac_afe_set; - struct rtac_afe_get { - struct afe_port_cmd_get_param_v2 cmd; - struct afe_port_param_data_v2 data; - } rtac_afe_get; + struct afe_rtac_user_data_set_v2 v2_set; + struct afe_rtac_user_data_set_v3 v3_set; + struct afe_rtac_user_data_get_v2 v2_get; + struct afe_rtac_user_data_get_v3 v3_get; }; } __packed; @@ -800,7 +797,9 @@ int send_adm_apr(void *buf, u32 opcode) goto err; } - if (opcode == ADM_CMD_SET_PP_PARAMS_V5) { + switch (opcode) { + case ADM_CMD_SET_PP_PARAMS_V5: + case ADM_CMD_SET_PP_PARAMS_V6: /* set payload size to in-band payload */ /* set data size to actual out of band payload size */ data_size = payload_size - 4 * sizeof(u32); @@ -818,12 +817,15 @@ int send_adm_apr(void *buf, u32 opcode) buf + 7 * sizeof(u32), data_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); - result = -EINVAL; + result = -EFAULT; goto err; } + /* set payload size in packet */ rtac_adm_buffer[8] = data_size; - } else { + break; + case ADM_CMD_GET_PP_PARAMS_V5: + case ADM_CMD_GET_PP_PARAMS_V6: if (payload_size > MAX_PAYLOAD_SIZE) { pr_err("%s: Invalid payload size = %d\n", __func__, payload_size); @@ -837,9 +839,14 @@ int send_adm_apr(void *buf, u32 opcode) buf + 3 * sizeof(u32), payload_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); - result = -EINVAL; + result = -EFAULT; goto err; } + break; + default: + pr_err("%s: Invalid opcode %d\n", __func__, opcode); + result = -EINVAL; + goto err; } /* Pack header */ @@ -900,33 +907,39 @@ int send_adm_apr(void *buf, u32 opcode) if (opcode == ADM_CMD_GET_PP_PARAMS_V5) { bytes_returned = ((u32 *)rtac_cal[ADM_RTAC_CAL].cal_data. kvaddr)[2] + 3 * sizeof(u32); + } else if (opcode == ADM_CMD_GET_PP_PARAMS_V6) { + bytes_returned = + ((u32 *) rtac_cal[ADM_RTAC_CAL].cal_data.kvaddr)[3] + + 4 * sizeof(u32); + } else { + bytes_returned = data_size; + goto unlock; + } - if (bytes_returned > rtac_cal[ADM_RTAC_CAL]. - map_data.map_size) { - pr_err("%s: Invalid data size = %d\n", - __func__, bytes_returned); - result = -EINVAL; - goto err; - } + if (bytes_returned > rtac_cal[ADM_RTAC_CAL].map_data.map_size) { + pr_err("%s: Invalid data size = %d\n", __func__, + bytes_returned); + result = -EINVAL; + goto err; + } - if (bytes_returned > user_buf_size) { - pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n", - __func__, user_buf_size, bytes_returned); - result = -EINVAL; - goto err; - } + if (bytes_returned > user_buf_size) { + pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n", + __func__, user_buf_size, bytes_returned); + result = -EINVAL; + goto err; + } - if (copy_to_user(buf, (void *) - rtac_cal[ADM_RTAC_CAL].cal_data.kvaddr, - bytes_returned)) { - pr_err("%s: Could not copy buffer to user,size = %d\n", - __func__, bytes_returned); - result = -EINVAL; - goto err; - } - } else { - bytes_returned = data_size; + if (copy_to_user((void __user *) buf, + rtac_cal[ADM_RTAC_CAL].cal_data.kvaddr, + bytes_returned)) { + pr_err("%s: Could not copy buffer to user,size = %d\n", + __func__, bytes_returned); + result = -EFAULT; + goto err; } + +unlock: mutex_unlock(&rtac_adm_apr_mutex); done: return bytes_returned; @@ -1027,7 +1040,9 @@ int send_rtac_asm_apr(void *buf, u32 opcode) goto err; } - if (opcode == ASM_STREAM_CMD_SET_PP_PARAMS_V2) { + switch (opcode) { + case ASM_STREAM_CMD_SET_PP_PARAMS_V2: + case ASM_STREAM_CMD_SET_PP_PARAMS_V3: /* set payload size to in-band payload */ /* set data size to actual out of band payload size */ data_size = payload_size - 4 * sizeof(u32); @@ -1045,13 +1060,14 @@ int send_rtac_asm_apr(void *buf, u32 opcode) buf + 7 * sizeof(u32), data_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); - result = -EINVAL; + result = -EFAULT; goto err; } /* set payload size in packet */ rtac_asm_buffer[8] = data_size; - - } else { + break; + case ASM_STREAM_CMD_GET_PP_PARAMS_V2: + case ASM_STREAM_CMD_GET_PP_PARAMS_V3: if (payload_size > MAX_PAYLOAD_SIZE) { pr_err("%s: Invalid payload size = %d\n", __func__, payload_size); @@ -1065,9 +1081,15 @@ int send_rtac_asm_apr(void *buf, u32 opcode) buf + 3 * sizeof(u32), payload_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); - result = -EINVAL; + result = -EFAULT; goto err; } + + break; + default: + pr_err("%s: Invalid opcode %d\n", __func__, opcode); + result = -EINVAL; + goto err; } /* Pack header */ @@ -1130,33 +1152,39 @@ int send_rtac_asm_apr(void *buf, u32 opcode) if (opcode == ASM_STREAM_CMD_GET_PP_PARAMS_V2) { bytes_returned = ((u32 *)rtac_cal[ASM_RTAC_CAL].cal_data. kvaddr)[2] + 3 * sizeof(u32); + } else if (opcode == ASM_STREAM_CMD_GET_PP_PARAMS_V3) { + bytes_returned = + ((u32 *) rtac_cal[ASM_RTAC_CAL].cal_data.kvaddr)[3] + + 4 * sizeof(u32); + } else { + bytes_returned = data_size; + goto unlock; + } - if (bytes_returned > rtac_cal[ASM_RTAC_CAL]. - map_data.map_size) { - pr_err("%s: Invalid data size = %d\n", - __func__, bytes_returned); - result = -EINVAL; - goto err; - } + if (bytes_returned > rtac_cal[ASM_RTAC_CAL].map_data.map_size) { + pr_err("%s: Invalid data size = %d\n", __func__, + bytes_returned); + result = -EINVAL; + goto err; + } - if (bytes_returned > user_buf_size) { - pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n", - __func__, user_buf_size, bytes_returned); - result = -EINVAL; - goto err; - } + if (bytes_returned > user_buf_size) { + pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n", + __func__, user_buf_size, bytes_returned); + result = -EINVAL; + goto err; + } - if (copy_to_user(buf, (void *) - rtac_cal[ASM_RTAC_CAL].cal_data.kvaddr, - bytes_returned)) { - pr_err("%s: Could not copy buffer to user,size = %d\n", - __func__, bytes_returned); - result = -EINVAL; - goto err; - } - } else { - bytes_returned = data_size; + if (copy_to_user((void __user *) buf, + rtac_cal[ASM_RTAC_CAL].cal_data.kvaddr, + bytes_returned)) { + pr_err("%s: Could not copy buffer to user,size = %d\n", + __func__, bytes_returned); + result = -EFAULT; + goto err; } + +unlock: mutex_unlock(&rtac_asm_apr_mutex); done: return bytes_returned; @@ -1213,13 +1241,18 @@ static int fill_afe_apr_hdr(struct apr_hdr *apr_hdr, uint32_t port, return 0; } -static int send_rtac_afe_apr(void *buf, uint32_t opcode) +static int send_rtac_afe_apr(void __user *buf, uint32_t opcode) { int32_t result; uint32_t bytes_returned = 0; + uint32_t payload_size = 0; uint32_t port_index = 0; + uint32_t *afe_cmd = NULL; uint32_t apr_msg_size = 0; struct rtac_afe_user_data user_afe_buf; + struct mem_mapping_hdr *mem_hdr = NULL; + struct param_hdr_v1 *get_resp_v2; + struct param_hdr_v3 *get_resp_v3; pr_debug("%s\n", __func__); @@ -1267,93 +1300,126 @@ static int send_rtac_afe_apr(void *buf, uint32_t opcode) result = -EINVAL; goto err; } - if (opcode == AFE_PORT_CMD_SET_PARAM_V2) { - struct afe_port_cmd_set_param_v2 *afe_set_apr_msg; - /* set data size to actual out of band payload size */ - if (user_afe_buf.rtac_afe_set.cmd.payload_size > - rtac_cal[AFE_RTAC_CAL].map_data.map_size) { - pr_err("%s: Invalid data size = %d\n", - __func__, - user_afe_buf.rtac_afe_set.cmd.payload_size); + afe_cmd = + (u32 *) rtac_afe_buffer + sizeof(struct apr_hdr) / sizeof(u32); + + switch (opcode) { + case AFE_PORT_CMD_SET_PARAM_V2: + apr_msg_size = sizeof(struct afe_port_cmd_set_param_v2); + payload_size = user_afe_buf.v2_set.payload_size; + if (payload_size > rtac_cal[AFE_RTAC_CAL].map_data.map_size) { + pr_err("%s: Invalid payload size = %d\n", __func__, + payload_size); result = -EINVAL; goto err; } - /* Copy buffer to out-of-band payload */ - if (copy_from_user((void *) - rtac_cal[AFE_RTAC_CAL].cal_data.kvaddr, - buf+offsetof(struct rtac_afe_user_data, - rtac_afe_set.data), - user_afe_buf.rtac_afe_set.cmd.payload_size)) { + /* Copy the command to the rtac buffer */ + memcpy(afe_cmd, &user_afe_buf.v2_set, + sizeof(user_afe_buf.v2_set)); + + /* Copy the param data to the out-of-band location */ + if (copy_from_user(rtac_cal[AFE_RTAC_CAL].cal_data.kvaddr, + (void __user *) buf + + offsetof(struct rtac_afe_user_data, + v2_set.param_hdr), + payload_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); + result = -EFAULT; + goto err; + } + break; + case AFE_PORT_CMD_SET_PARAM_V3: + apr_msg_size = sizeof(struct afe_port_cmd_set_param_v3); + payload_size = user_afe_buf.v3_set.payload_size; + if (payload_size > rtac_cal[AFE_RTAC_CAL].map_data.map_size) { + pr_err("%s: Invalid payload size = %d\n", __func__, + payload_size); result = -EINVAL; goto err; } - /* Copy AFE APR Message */ - afe_set_apr_msg = (struct afe_port_cmd_set_param_v2 *) - ((u8 *)rtac_afe_buffer + - sizeof(struct apr_hdr)); - if (copy_from_user((void *) - afe_set_apr_msg, - buf + offsetof(struct rtac_afe_user_data, - rtac_afe_set.cmd) , - sizeof(struct afe_port_cmd_set_param_v2))) { + /* Copy the command to the rtac buffer */ + memcpy(afe_cmd, &user_afe_buf.v3_set, + sizeof(user_afe_buf.v3_set)); + + /* Copy the param data to the out-of-band location */ + if (copy_from_user(rtac_cal[AFE_RTAC_CAL].cal_data.kvaddr, + (void __user *) buf + + offsetof(struct rtac_afe_user_data, + v3_get.param_hdr), + payload_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); - result = -EINVAL; + result = -EFAULT; goto err; } + break; + case AFE_PORT_CMD_GET_PARAM_V2: + apr_msg_size = sizeof(struct afe_port_cmd_get_param_v2); - afe_set_apr_msg->payload_address_lsw = - lower_32_bits(rtac_cal[AFE_RTAC_CAL].cal_data.paddr); - afe_set_apr_msg->payload_address_msw = - msm_audio_populate_upper_32_bits( - rtac_cal[AFE_RTAC_CAL].cal_data.paddr); - afe_set_apr_msg->mem_map_handle = - rtac_cal[AFE_RTAC_CAL].map_data.map_handle; - - apr_msg_size = sizeof(struct apr_hdr) + - sizeof(struct afe_port_cmd_set_param_v2); + if (user_afe_buf.cmd_size > MAX_PAYLOAD_SIZE) { + pr_err("%s: Invalid payload size = %d\n", __func__, + user_afe_buf.cmd_size); + result = -EINVAL; + goto err; + } - } else { - struct afe_port_cmd_get_param_v2 *afe_get_apr_msg; + /* Copy the command and param data in-band */ + if (copy_from_user(afe_cmd, + (void __user *) buf + + offsetof(struct rtac_afe_user_data, + v2_get), + user_afe_buf.cmd_size)) { + pr_err("%s: Could not copy payload from user buffer\n", + __func__); + result = -EFAULT; + goto err; + } + break; + case AFE_PORT_CMD_GET_PARAM_V3: + apr_msg_size = sizeof(struct afe_port_cmd_get_param_v3); if (user_afe_buf.cmd_size > MAX_PAYLOAD_SIZE) { - pr_err("%s: Invalid payload size = %d\n", - __func__, user_afe_buf.cmd_size); + pr_err("%s: Invalid payload size = %d\n", __func__, + user_afe_buf.cmd_size); result = -EINVAL; goto err; } - /* Copy buffer to in-band payload */ - afe_get_apr_msg = (struct afe_port_cmd_get_param_v2 *) - ((u8 *) rtac_afe_buffer + - sizeof(struct apr_hdr)); - if (copy_from_user((void *)afe_get_apr_msg, - buf+offsetof(struct rtac_afe_user_data, - rtac_afe_get.cmd), - sizeof(struct afe_port_cmd_get_param_v2))) { + /* Copy the command and param data in-band */ + if (copy_from_user(afe_cmd, + (void __user *) buf + + offsetof(struct rtac_afe_user_data, + v3_get), + user_afe_buf.cmd_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); - result = -EINVAL; + result = -EFAULT; goto err; } - - afe_get_apr_msg->payload_address_lsw = - lower_32_bits(rtac_cal[AFE_RTAC_CAL].cal_data.paddr); - afe_get_apr_msg->payload_address_msw = - msm_audio_populate_upper_32_bits( - rtac_cal[AFE_RTAC_CAL].cal_data.paddr); - afe_get_apr_msg->mem_map_handle = - rtac_cal[AFE_RTAC_CAL].map_data.map_handle; - afe_get_apr_msg->payload_size -= sizeof(struct apr_hdr); - apr_msg_size = sizeof(struct apr_hdr) + - sizeof(struct afe_port_cmd_get_param_v2); + break; + default: + pr_err("%s: Invalid opcode %d\n", __func__, opcode); + result = -EINVAL; + goto err; } + /* + * The memory header is in the same location in all commands. Therefore, + * it doesn't matter what command the buffer is cast into. + */ + mem_hdr = &((struct afe_port_cmd_set_param_v3 *) rtac_afe_buffer) + ->mem_hdr; + mem_hdr->data_payload_addr_lsw = + lower_32_bits(rtac_cal[AFE_RTAC_CAL].cal_data.paddr); + mem_hdr->data_payload_addr_msw = msm_audio_populate_upper_32_bits( + rtac_cal[AFE_RTAC_CAL].cal_data.paddr); + mem_hdr->mem_map_handle = rtac_cal[AFE_RTAC_CAL].map_data.map_handle; + + /* Fill the APR header at the end so we have the correct message size */ fill_afe_apr_hdr((struct apr_hdr *) rtac_afe_buffer, port_index, opcode, apr_msg_size); @@ -1391,40 +1457,44 @@ static int send_rtac_afe_apr(void *buf, uint32_t opcode) } if (opcode == AFE_PORT_CMD_GET_PARAM_V2) { - struct afe_port_param_data_v2 *get_resp; - get_resp = (struct afe_port_param_data_v2 *) - rtac_cal[AFE_RTAC_CAL].cal_data.kvaddr; - - bytes_returned = get_resp->param_size + - sizeof(struct afe_port_param_data_v2); + get_resp_v2 = (struct param_hdr_v1 *) rtac_cal[AFE_RTAC_CAL] + .cal_data.kvaddr; + bytes_returned = + get_resp_v2->param_size + sizeof(struct param_hdr_v1); + } else if (opcode == AFE_PORT_CMD_GET_PARAM_V3) { + get_resp_v3 = (struct param_hdr_v3 *) rtac_cal[AFE_RTAC_CAL] + .cal_data.kvaddr; + bytes_returned = + get_resp_v3->param_size + sizeof(struct param_hdr_v3); + } else { + bytes_returned = payload_size; + goto unlock; + } - if (bytes_returned > rtac_cal[AFE_RTAC_CAL]. - map_data.map_size) { - pr_err("%s: Invalid data size = %d\n", - __func__, bytes_returned); - result = -EINVAL; - goto err; - } + if (bytes_returned > rtac_cal[AFE_RTAC_CAL].map_data.map_size) { + pr_err("%s: Invalid data size = %d\n", __func__, + bytes_returned); + result = -EINVAL; + goto err; + } - if (bytes_returned > user_afe_buf.buf_size) { - pr_err("%s: user size = 0x%x, returned size = 0x%x\n", - __func__, user_afe_buf.buf_size, - bytes_returned); - result = -EINVAL; - goto err; - } + if (bytes_returned > user_afe_buf.buf_size) { + pr_err("%s: user size = 0x%x, returned size = 0x%x\n", __func__, + user_afe_buf.buf_size, bytes_returned); + result = -EINVAL; + goto err; + } - if (copy_to_user(buf, (void *) - rtac_cal[AFE_RTAC_CAL].cal_data.kvaddr, - bytes_returned)) { - pr_err("%s: Could not copy buffer to user,size = %d\n", - __func__, bytes_returned); - result = -EINVAL; - goto err; - } - } else { - bytes_returned = user_afe_buf.rtac_afe_set.cmd.payload_size; + if (copy_to_user((void __user *) buf, + rtac_cal[AFE_RTAC_CAL].cal_data.kvaddr, + bytes_returned)) { + pr_err("%s: Could not copy buffer to user,size = %d\n", + __func__, bytes_returned); + result = -EFAULT; + goto err; } + +unlock: mutex_unlock(&rtac_afe_apr_mutex); done: return bytes_returned; @@ -1526,7 +1596,9 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode) goto err; } - if (opcode == VSS_ICOMMON_CMD_SET_PARAM_V2) { + switch (opcode) { + case VSS_ICOMMON_CMD_SET_PARAM_V2: + case VSS_ICOMMON_CMD_SET_PARAM_V3: /* set payload size to in-band payload */ /* set data size to actual out of band payload size */ data_size = payload_size - 4 * sizeof(u32); @@ -1544,12 +1616,16 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode) buf + 7 * sizeof(u32), data_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); - result = -EINVAL; + result = -EFAULT; goto err; } /* set payload size in packet */ rtac_voice_buffer[8] = data_size; - } else { + /* set token for set param case */ + voice_params.token = VOC_RTAC_SET_PARAM_TOKEN; + break; + case VSS_ICOMMON_CMD_GET_PARAM_V2: + case VSS_ICOMMON_CMD_GET_PARAM_V3: if (payload_size > MAX_PAYLOAD_SIZE) { pr_err("%s: Invalid payload size = %d\n", __func__, payload_size); @@ -1563,9 +1639,16 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode) buf + 3 * sizeof(u32), payload_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); - result = -EINVAL; + result = -EFAULT; goto err; } + /* set token for get param case */ + voice_params.token = 0; + break; + default: + pr_err("%s: Invalid opcode %d\n", __func__, opcode); + result = -EINVAL; + goto err; } /* Pack header */ @@ -1579,18 +1662,14 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode) voice_params.dest_svc = 0; voice_params.dest_domain = APR_DOMAIN_MODEM; voice_params.dest_port = (u16)dest_port; - voice_params.token = (opcode == VSS_ICOMMON_CMD_SET_PARAM_V2) ? - VOC_RTAC_SET_PARAM_TOKEN : - 0; voice_params.opcode = opcode; /* fill for out-of-band */ rtac_voice_buffer[5] = rtac_cal[VOICE_RTAC_CAL].map_data.map_handle; rtac_voice_buffer[6] = lower_32_bits(rtac_cal[VOICE_RTAC_CAL].cal_data.paddr); - rtac_voice_buffer[7] = - msm_audio_populate_upper_32_bits( - rtac_cal[VOICE_RTAC_CAL].cal_data.paddr); + rtac_voice_buffer[7] = msm_audio_populate_upper_32_bits( + rtac_cal[VOICE_RTAC_CAL].cal_data.paddr); memcpy(rtac_voice_buffer, &voice_params, sizeof(voice_params)); atomic_set(&rtac_voice_apr_data[mode].cmd_state, 1); @@ -1629,33 +1708,39 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode) if (opcode == VSS_ICOMMON_CMD_GET_PARAM_V2) { bytes_returned = ((u32 *)rtac_cal[VOICE_RTAC_CAL].cal_data. kvaddr)[2] + 3 * sizeof(u32); + } else if (opcode == VSS_ICOMMON_CMD_GET_PARAM_V3) { + bytes_returned = + ((u32 *) rtac_cal[VOICE_RTAC_CAL].cal_data.kvaddr)[3] + + 4 * sizeof(u32); + } else { + bytes_returned = data_size; + goto unlock; + } - if (bytes_returned > rtac_cal[VOICE_RTAC_CAL]. - map_data.map_size) { - pr_err("%s: Invalid data size = %d\n", - __func__, bytes_returned); - result = -EINVAL; - goto err; - } + if (bytes_returned > rtac_cal[VOICE_RTAC_CAL].map_data.map_size) { + pr_err("%s: Invalid data size = %d\n", __func__, + bytes_returned); + result = -EINVAL; + goto err; + } - if (bytes_returned > user_buf_size) { - pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n", - __func__, user_buf_size, bytes_returned); - result = -EINVAL; - goto err; - } + if (bytes_returned > user_buf_size) { + pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n", + __func__, user_buf_size, bytes_returned); + result = -EINVAL; + goto err; + } - if (copy_to_user(buf, (void *) - rtac_cal[VOICE_RTAC_CAL].cal_data.kvaddr, - bytes_returned)) { - pr_err("%s: Could not copy buffer to user, size = %d\n", - __func__, bytes_returned); - result = -EINVAL; - goto err; - } - } else { - bytes_returned = data_size; + if (copy_to_user((void __user *) buf, + rtac_cal[VOICE_RTAC_CAL].cal_data.kvaddr, + bytes_returned)) { + pr_err("%s: Could not copy buffer to user, size = %d\n", + __func__, bytes_returned); + result = -EFAULT; + goto err; } + +unlock: mutex_unlock(&rtac_voice_apr_mutex); done: return bytes_returned; @@ -1675,6 +1760,7 @@ void get_rtac_adm_data(struct rtac_adm *adm_data) static long rtac_ioctl_shared(struct file *f, unsigned int cmd, void *arg) { + u32 opcode; int result = 0; if (!arg) { pr_err("%s: No data sent to driver!\n", __func__); @@ -1713,42 +1799,64 @@ static long rtac_ioctl_shared(struct file *f, } case AUDIO_GET_RTAC_ADM_CAL: - result = send_adm_apr((void *)arg, ADM_CMD_GET_PP_PARAMS_V5); + opcode = q6common_is_instance_id_supported() ? + ADM_CMD_GET_PP_PARAMS_V6 : + ADM_CMD_GET_PP_PARAMS_V5; + result = send_adm_apr((void *) arg, opcode); break; case AUDIO_SET_RTAC_ADM_CAL: - result = send_adm_apr((void *)arg, ADM_CMD_SET_PP_PARAMS_V5); + opcode = q6common_is_instance_id_supported() ? + ADM_CMD_SET_PP_PARAMS_V6 : + ADM_CMD_SET_PP_PARAMS_V5; + result = send_adm_apr((void *) arg, opcode); break; case AUDIO_GET_RTAC_ASM_CAL: - result = send_rtac_asm_apr((void *)arg, - ASM_STREAM_CMD_GET_PP_PARAMS_V2); + opcode = q6common_is_instance_id_supported() ? + ASM_STREAM_CMD_GET_PP_PARAMS_V3 : + ASM_STREAM_CMD_GET_PP_PARAMS_V2; + result = send_rtac_asm_apr((void *) arg, opcode); break; case AUDIO_SET_RTAC_ASM_CAL: - result = send_rtac_asm_apr((void *)arg, - ASM_STREAM_CMD_SET_PP_PARAMS_V2); + opcode = q6common_is_instance_id_supported() ? + ASM_STREAM_CMD_SET_PP_PARAMS_V3 : + ASM_STREAM_CMD_SET_PP_PARAMS_V2; + result = send_rtac_asm_apr((void *) arg, opcode); break; case AUDIO_GET_RTAC_CVS_CAL: - result = send_voice_apr(RTAC_CVS, (void *) arg, - VSS_ICOMMON_CMD_GET_PARAM_V2); + opcode = q6common_is_instance_id_supported() ? + VSS_ICOMMON_CMD_GET_PARAM_V3 : + VSS_ICOMMON_CMD_GET_PARAM_V2; + result = send_voice_apr(RTAC_CVS, (void *) arg, opcode); break; case AUDIO_SET_RTAC_CVS_CAL: - result = send_voice_apr(RTAC_CVS, (void *) arg, - VSS_ICOMMON_CMD_SET_PARAM_V2); + opcode = q6common_is_instance_id_supported() ? + VSS_ICOMMON_CMD_SET_PARAM_V3 : + VSS_ICOMMON_CMD_SET_PARAM_V2; + result = send_voice_apr(RTAC_CVS, (void *) arg, opcode); break; case AUDIO_GET_RTAC_CVP_CAL: - result = send_voice_apr(RTAC_CVP, (void *) arg, - VSS_ICOMMON_CMD_GET_PARAM_V2); + opcode = q6common_is_instance_id_supported() ? + VSS_ICOMMON_CMD_GET_PARAM_V3 : + VSS_ICOMMON_CMD_GET_PARAM_V2; + result = send_voice_apr(RTAC_CVP, (void *) arg, opcode); break; case AUDIO_SET_RTAC_CVP_CAL: - result = send_voice_apr(RTAC_CVP, (void *) arg, - VSS_ICOMMON_CMD_SET_PARAM_V2); + opcode = q6common_is_instance_id_supported() ? + VSS_ICOMMON_CMD_SET_PARAM_V3 : + VSS_ICOMMON_CMD_SET_PARAM_V2; + result = send_voice_apr(RTAC_CVP, (void *) arg, opcode); break; case AUDIO_GET_RTAC_AFE_CAL: - result = send_rtac_afe_apr((void *)arg, - AFE_PORT_CMD_GET_PARAM_V2); + opcode = q6common_is_instance_id_supported() ? + AFE_PORT_CMD_GET_PARAM_V3 : + AFE_PORT_CMD_GET_PARAM_V2; + result = send_rtac_afe_apr((void __user *) arg, opcode); break; case AUDIO_SET_RTAC_AFE_CAL: - result = send_rtac_afe_apr((void *)arg, - AFE_PORT_CMD_SET_PARAM_V2); + opcode = q6common_is_instance_id_supported() ? + AFE_PORT_CMD_SET_PARAM_V3 : + AFE_PORT_CMD_SET_PARAM_V2; + result = send_rtac_afe_apr((void __user *) arg, opcode); break; default: pr_err("%s: Invalid IOCTL, command = %d!\n", diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 362446c36c9e..e00dfbec22c5 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -1049,10 +1049,8 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod, return -ENOMEM; ret = snd_ctl_add(card, kctrl); - if (ret < 0) { - snd_ctl_free_one(kctrl); + if (ret < 0) return ret; - } cfg->update = update; cfg->card = card; diff --git a/sound/usb/clock.c b/sound/usb/clock.c index 2cd09ceba5e9..2899797610e8 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -43,7 +43,7 @@ static struct uac_clock_source_descriptor * while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, cs, UAC2_CLOCK_SOURCE))) { - if (cs->bClockID == clock_id) + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) return cs; } @@ -59,8 +59,11 @@ static struct uac_clock_selector_descriptor * while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, cs, UAC2_CLOCK_SELECTOR))) { - if (cs->bClockID == clock_id) + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) { + if (cs->bLength < 5 + cs->bNrInPins) + return NULL; return cs; + } } return NULL; @@ -75,7 +78,7 @@ static struct uac_clock_multiplier_descriptor * while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, cs, UAC2_CLOCK_MULTIPLIER))) { - if (cs->bClockID == clock_id) + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) return cs; } diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 413824566102..9d864648c901 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1541,6 +1541,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, __u8 *bmaControls; if (state->mixer->protocol == UAC_VERSION_1) { + if (hdr->bLength < 7) { + usb_audio_err(state->chip, + "unit %u: invalid UAC_FEATURE_UNIT descriptor\n", + unitid); + return -EINVAL; + } csize = hdr->bControlSize; if (!csize) { usb_audio_dbg(state->chip, @@ -1558,6 +1564,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, } } else if (state->mixer->protocol == UAC_VERSION_2) { struct uac2_feature_unit_descriptor *ftr = _ftr; + if (hdr->bLength < 6) { + usb_audio_err(state->chip, + "unit %u: invalid UAC_FEATURE_UNIT descriptor\n", + unitid); + return -EINVAL; + } csize = 4; channels = (hdr->bLength - 6) / 4 - 1; bmaControls = ftr->bmaControls; @@ -2277,7 +2289,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, const struct usbmix_name_map *map; char **namelist; - if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) { + if (desc->bLength < 5 || !desc->bNrInPins || + desc->bLength < 5 + desc->bNrInPins) { usb_audio_err(state->chip, "invalid SELECTOR UNIT descriptor %d\n", unitid); return -EINVAL; diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c index 638875a0960a..79547c225c14 100644 --- a/tools/perf/tests/attr.c +++ b/tools/perf/tests/attr.c @@ -150,7 +150,7 @@ static int run_dir(const char *d, const char *perf) snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s", d, d, perf, vcnt, v); - return system(cmd); + return system(cmd) ? TEST_FAIL : TEST_OK; } int test__attr(void) diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh index c4366dc74e01..856a1f327b3f 100755 --- a/tools/testing/selftests/firmware/fw_filesystem.sh +++ b/tools/testing/selftests/firmware/fw_filesystem.sh @@ -48,8 +48,16 @@ echo "ABCD0123" >"$FW" NAME=$(basename "$FW") +if printf '\000' >"$DIR"/trigger_request 2> /dev/null; then + echo "$0: empty filename should not succeed" >&2 + exit 1 +fi + # Request a firmware that doesn't exist, it should fail. -echo -n "nope-$NAME" >"$DIR"/trigger_request +if echo -n "nope-$NAME" >"$DIR"/trigger_request 2> /dev/null; then + echo "$0: firmware shouldn't have loaded" >&2 + exit 1 +fi if diff -q "$FW" /dev/test_firmware >/dev/null ; then echo "$0: firmware was not expected to match" >&2 exit 1 diff --git a/tools/testing/selftests/firmware/fw_userhelper.sh b/tools/testing/selftests/firmware/fw_userhelper.sh index b9983f8e09f6..01c626a1f226 100755 --- a/tools/testing/selftests/firmware/fw_userhelper.sh +++ b/tools/testing/selftests/firmware/fw_userhelper.sh @@ -64,9 +64,33 @@ trap "test_finish" EXIT echo "ABCD0123" >"$FW" NAME=$(basename "$FW") +DEVPATH="$DIR"/"nope-$NAME"/loading + # Test failure when doing nothing (timeout works). -echo 1 >/sys/class/firmware/timeout -echo -n "$NAME" >"$DIR"/trigger_request +echo -n 2 >/sys/class/firmware/timeout +echo -n "nope-$NAME" >"$DIR"/trigger_request 2>/dev/null & + +# Give the kernel some time to load the loading file, must be less +# than the timeout above. +sleep 1 +if [ ! -f $DEVPATH ]; then + echo "$0: fallback mechanism immediately cancelled" + echo "" + echo "The file never appeared: $DEVPATH" + echo "" + echo "This might be a distribution udev rule setup by your distribution" + echo "to immediately cancel all fallback requests, this must be" + echo "removed before running these tests. To confirm look for" + echo "a firmware rule like /lib/udev/rules.d/50-firmware.rules" + echo "and see if you have something like this:" + echo "" + echo "SUBSYSTEM==\"firmware\", ACTION==\"add\", ATTR{loading}=\"-1\"" + echo "" + echo "If you do remove this file or comment out this line before" + echo "proceeding with these tests." + exit 1 +fi + if diff -q "$FW" /dev/test_firmware >/dev/null ; then echo "$0: firmware was not expected to match" >&2 exit 1 diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c index 923e59eb82c7..412b845412d2 100644 --- a/tools/testing/selftests/x86/ldt_gdt.c +++ b/tools/testing/selftests/x86/ldt_gdt.c @@ -351,9 +351,24 @@ static void do_simple_tests(void) install_invalid(&desc, false); desc.seg_not_present = 0; - desc.read_exec_only = 0; desc.seg_32bit = 1; + desc.read_exec_only = 0; + desc.limit = 0xfffff; + install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB); + + desc.limit_in_pages = 1; + + install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB | AR_G); + desc.read_exec_only = 1; + install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P | AR_DB | AR_G); + desc.contents = 1; + desc.read_exec_only = 0; + install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G); + desc.read_exec_only = 1; + install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G); + + desc.limit = 0; install_invalid(&desc, true); } diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index a7b9022b5c8f..7f38db2a46c8 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -84,9 +84,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work) struct kvm_vcpu *vcpu; vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); - vcpu->arch.timer_cpu.armed = false; - - WARN_ON(!kvm_timer_should_fire(vcpu)); /* * If the vcpu is blocked we want to wake it up so that it will see |
