summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.bp27
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs11
-rw-r--r--Documentation/device-mapper/verity.txt11
-rw-r--r--Documentation/devicetree/bindings/sound/qcom-audio-dev.txt66
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3.txt1
-rw-r--r--Documentation/filesystems/f2fs.txt17
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/at91sam9g25.dtsi2
-rw-r--r--arch/arm/boot/dts/imx53-qsrb.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard.dtsi1
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-auto-adp.dts6
-rw-r--r--arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts7
-rw-r--r--arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp.dts10
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi60
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi21
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-v3-auto-adp.dts6
-rw-r--r--arch/arm/boot/dts/qcom/msm8996.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts11
-rw-r--r--arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts62
-rw-r--r--arch/arm/boot/dts/qcom/sdm630.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/sdm636-pm660a-qrd.dts32
-rw-r--r--arch/arm/boot/dts/qcom/sdm636-qrd.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-regulator.dtsi37
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-audio-common.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-baseline.dts18
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-blsp.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts84
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-modem.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi2
-rw-r--r--arch/arm/include/asm/xen/events.h2
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c10
-rw-r--r--arch/arm/mach-imx/cpu.c3
-rw-r--r--arch/arm/mach-imx/mxc.h6
-rw-r--r--arch/arm64/configs/msm-auto-gvm-perf_defconfig7
-rw-r--r--arch/arm64/configs/msm-auto-gvm_defconfig7
-rw-r--r--arch/arm64/configs/msm-auto-perf_defconfig3
-rw-r--r--arch/arm64/configs/msm-auto_defconfig3
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig2
-rw-r--r--arch/arm64/configs/msmcortex_defconfig3
-rw-r--r--arch/arm64/configs/msmcortex_mediabox-perf_defconfig1
-rw-r--r--arch/arm64/configs/msmcortex_mediabox_defconfig1
-rw-r--r--arch/arm64/configs/sdm660-perf_defconfig2
-rw-r--r--arch/arm64/configs/sdm660_defconfig3
-rw-r--r--arch/arm64/include/asm/futex.h8
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S1
-rw-r--r--arch/mips/include/asm/kprobes.h3
-rw-r--r--arch/mips/include/asm/pgtable-32.h7
-rw-r--r--arch/mips/include/asm/uaccess.h11
-rw-r--r--arch/mips/lib/memset.S11
-rw-r--r--arch/mips/mm/pgtable-32.c6
-rw-r--r--arch/parisc/kernel/drivers.c4
-rw-r--r--arch/powerpc/include/asm/barrier.h3
-rw-r--r--arch/powerpc/include/asm/opal.h3
-rw-r--r--arch/powerpc/include/asm/page.h12
-rw-r--r--arch/powerpc/include/asm/synch.h4
-rw-r--r--arch/powerpc/kernel/eeh_pe.c3
-rw-r--r--arch/powerpc/kernel/time.c14
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c34
-rw-r--r--arch/powerpc/lib/feature-fixups.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-nvram.c11
-rw-r--r--arch/s390/Kconfig47
-rw-r--r--arch/s390/Makefile10
-rw-r--r--arch/s390/hypfs/inode.c2
-rw-r--r--arch/s390/include/asm/alternative.h149
-rw-r--r--arch/s390/include/asm/barrier.h24
-rw-r--r--arch/s390/include/asm/facility.h18
-rw-r--r--arch/s390/include/asm/kvm_host.h3
-rw-r--r--arch/s390/include/asm/lowcore.h7
-rw-r--r--arch/s390/include/asm/nospec-branch.h17
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/thread_info.h4
-rw-r--r--arch/s390/include/uapi/asm/kvm.h3
-rw-r--r--arch/s390/kernel/Makefile5
-rw-r--r--arch/s390/kernel/alternative.c112
-rw-r--r--arch/s390/kernel/early.c5
-rw-r--r--arch/s390/kernel/entry.S250
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/module.c65
-rw-r--r--arch/s390/kernel/nospec-branch.c169
-rw-r--r--arch/s390/kernel/processor.c18
-rw-r--r--arch/s390/kernel/setup.c14
-rw-r--r--arch/s390/kernel/smp.c7
-rw-r--r--arch/s390/kernel/uprobes.c9
-rw-r--r--arch/s390/kernel/vmlinux.lds.S45
-rw-r--r--arch/s390/kvm/kvm-s390.c12
-rw-r--r--arch/sparc/kernel/ldc.c7
-rw-r--r--arch/um/os-Linux/signal.c2
-rw-r--r--arch/x86/configs/x86_64_cuttlefish_defconfig442
-rw-r--r--arch/x86/include/uapi/asm/msgbuf.h31
-rw-r--r--arch/x86/include/uapi/asm/shmbuf.h42
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kvm/svm.c24
-rw-r--r--arch/x86/kvm/vmx.c7
-rw-r--r--arch/x86/lib/csum-copy_64.S12
-rw-r--r--arch/x86/um/stub_segv.c2
-rw-r--r--block/bio-integrity.c3
-rw-r--r--block/blk-mq.c7
-rw-r--r--block/partition-generic.c4
-rw-r--r--build.config.cuttlefish.x86_6415
-rw-r--r--crypto/async_tx/async_pq.c5
-rw-r--r--drivers/acpi/acpica/evxfevnt.c18
-rw-r--r--drivers/acpi/acpica/psobject.c14
-rw-r--r--drivers/acpi/video_detect.c9
-rw-r--r--drivers/amba/bus.c14
-rw-r--r--drivers/ata/libahci_platform.c5
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/block/loop.c15
-rw-r--r--drivers/bus/brcmstb_gisb.c42
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/diag/diag_dci.c4
-rw-r--r--drivers/char/diag/diag_memorydevice.c49
-rw-r--r--drivers/char/diag/diag_memorydevice.h4
-rw-r--r--drivers/char/diag/diagchar_core.c1
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c2
-rw-r--r--drivers/char/random.c12
-rw-r--r--drivers/char/virtio_console.c49
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c8
-rw-r--r--drivers/clk/clk-conf.c2
-rw-r--r--drivers/clk/clk-scpi.c6
-rw-r--r--drivers/clk/msm/virtclk-front-8996.c31
-rw-r--r--drivers/clk/msm/virtclk-front.c12
-rw-r--r--drivers/clk/msm/virtclk-front.h2
-rw-r--r--drivers/clk/mvebu/armada-38x.c15
-rw-r--r--drivers/clk/qcom/clk-rcg2.c20
-rw-r--r--drivers/cpuidle/dt_idle_states.c4
-rw-r--r--drivers/cpuidle/lpm-levels-of.c31
-rw-r--r--drivers/cpuidle/lpm-levels.c3
-rw-r--r--drivers/dma/at_xdmac.c4
-rw-r--r--drivers/dma/imx-sdma.c23
-rw-r--r--drivers/edac/mv64x60_edac.c2
-rw-r--r--drivers/esoc/esoc-mdm-4x.c11
-rw-r--r--drivers/esoc/esoc-mdm-pon.c4
-rw-r--r--drivers/gpio/gpiolib.c8
-rw-r--r--drivers/gpu/drm/msm/dba_bridge.c7
-rw-r--r--drivers/gpu/drm/msm/dba_bridge.h3
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c35
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h15
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display.c131
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display.h15
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy.c12
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy.h9
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c35
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h2
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c56
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c13
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c39
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.c4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_ctl.c100
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_ctl.h29
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c46
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.c17
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.h6
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.c248
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.h12
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.c396
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.h85
-rw-r--r--drivers/gpu/drm/msm/sde_dbg.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c4
-rw-r--r--drivers/hid/hid-core.c10
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-rmi.c4
-rw-r--r--drivers/hid/hidraw.c5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c13
-rw-r--r--drivers/hwmon/ina2xx.c90
-rw-r--r--drivers/i2c/busses/i2c-msm-v2.c4
-rw-r--r--drivers/iio/adc/hi8435.c27
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c2
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c18
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c6
-rw-r--r--drivers/input/misc/drv260x.c2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c7
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c9
-rw-r--r--drivers/input/mouse/elantech.c11
-rw-r--r--drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c4
-rw-r--r--drivers/iommu/intel-svm.c1
-rw-r--r--drivers/irqchip/irq-gic.c123
-rw-r--r--drivers/isdn/mISDN/stack.c2
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/leds/leds-qpnp-flash-v2.c2
-rw-r--r--drivers/md/bcache/alloc.c19
-rw-r--r--drivers/md/bcache/super.c6
-rw-r--r--drivers/md/dm-verity-target.c65
-rw-r--r--drivers/md/dm-verity.h1
-rw-r--r--drivers/md/md-cluster.c4
-rw-r--r--drivers/md/raid5.c17
-rw-r--r--drivers/media/i2c/adv7481.c16
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c36
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c5
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c12
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_util.c4
-rw-r--r--drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c4
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h5
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c8
-rw-r--r--drivers/media/rc/mceusb.c9
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c4
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_multi_aac.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c10
-rw-r--r--drivers/mmc/card/block.c4
-rw-r--r--drivers/mmc/core/core.c12
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c33
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c9
-rw-r--r--drivers/mtd/ubi/block.c2
-rw-r--r--drivers/mtd/ubi/build.c11
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c1
-rw-r--r--drivers/net/bonding/bond_main.c87
-rw-r--r--drivers/net/can/spi/rh850.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c19
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c23
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c9
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c26
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c17
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c10
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c16
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/ppp/pptp.c1
-rw-r--r--drivers/net/slip/slhc.c5
-rw-r--r--drivers/net/team/team.c38
-rw-r--r--drivers/net/usb/cdc_ether.c16
-rw-r--r--drivers/net/usb/cdc_ncm.c11
-rw-r--r--drivers/net/usb/lan78xx.c3
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/virtio_net.c16
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c5
-rw-r--r--drivers/net/vrf.c8
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c35
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h19
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/ray_cs.c7
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c3
-rw-r--r--drivers/net/xen-netfront.c7
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c23
-rw-r--r--drivers/platform/goldfish/Makefile3
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c11
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.h2
-rw-r--r--drivers/platform/goldfish/goldfish_pipe_v2.c100
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c58
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c68
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_flt.c8
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c4
-rw-r--r--drivers/power/supply/qcom/battery.c21
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c6
-rw-r--r--drivers/power/supply/qcom/smb-lib.c101
-rw-r--r--drivers/power/supply/qcom/smb-lib.h3
-rw-r--r--drivers/powercap/powercap_sys.c1
-rw-r--r--drivers/regulator/cprh-kbss-regulator.c5
-rw-r--r--drivers/rtc/interface.c9
-rw-r--r--drivers/rtc/rtc-opal.c10
-rw-r--r--drivers/rtc/rtc-snvs.c2
-rw-r--r--drivers/s390/block/dasd.c8
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/cio/chsc.c14
-rw-r--r--drivers/s390/cio/qdio_main.c42
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c10
-rw-r--r--drivers/scsi/csiostor/csio_hw.c5
-rw-r--r--drivers/scsi/libiscsi.c24
-rw-r--r--drivers/scsi/libsas/sas_expander.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c28
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/soc/qcom/Kconfig7
-rw-r--r--drivers/soc/qcom/glink_smem_native_xprt.c23
-rw-r--r--drivers/soc/qcom/glink_ssr.c3
-rw-r--r--drivers/soc/qcom/hab/hab.c50
-rw-r--r--drivers/soc/qcom/hab/hab.h36
-rw-r--r--drivers/soc/qcom/hab/hab_mem_linux.c488
-rw-r--r--drivers/soc/qcom/hab/hab_mimex.c30
-rw-r--r--drivers/soc/qcom/hab/hab_msg.c28
-rw-r--r--drivers/soc/qcom/hab/hab_vchan.c5
-rw-r--r--drivers/soc/qcom/hab/khab.c18
-rw-r--r--drivers/soc/qcom/qdsp6v2/Makefile2
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr.c23
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_tal.c36
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_v2.c4
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_vm.c22
-rw-r--r--drivers/soc/qcom/qdsp6v2/audio-anc-dev-mgr.c1170
-rw-r--r--drivers/soc/qcom/qdsp6v2/audio_anc.c350
-rw-r--r--drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c32
-rw-r--r--drivers/soc/qcom/qdsp6v2/sdsp-anc.c801
-rw-r--r--drivers/soc/qcom/rpm_stats.c2
-rw-r--r--drivers/soc/qcom/scm_qcpe.c408
-rw-r--r--drivers/spmi/spmi-pmic-arb.c21
-rw-r--r--drivers/staging/android/Kconfig9
-rw-r--r--drivers/staging/android/Makefile1
-rw-r--r--drivers/staging/android/TODO10
-rw-r--r--drivers/staging/android/ion/ion.c9
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c4
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c5
-rw-r--r--drivers/staging/android/uapi/vsoc_shm.h303
-rw-r--r--drivers/staging/android/vsoc.c1169
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c2
-rw-r--r--drivers/thermal/imx_thermal.c6
-rw-r--r--drivers/thermal/power_allocator.c2
-rw-r--r--drivers/thunderbolt/nhi.c1
-rw-r--r--drivers/tty/goldfish.c32
-rw-r--r--drivers/tty/n_gsm.c40
-rw-r--r--drivers/tty/n_tty.c6
-rw-r--r--drivers/tty/serial/8250/8250_omap.c4
-rw-r--r--drivers/tty/serial/msm_serial_hs.c8
-rw-r--r--drivers/tty/serial/sccnxp.c15
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c5
-rw-r--r--drivers/tty/serial/sh-sci.c16
-rw-r--r--drivers/tty/tty_io.c14
-rw-r--r--drivers/tty/tty_ldisc.c16
-rw-r--r--drivers/usb/chipidea/core.c29
-rw-r--r--drivers/usb/core/generic.c9
-rw-r--r--drivers/usb/core/hcd.c14
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/usb.c17
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c4
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c2
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c257
-rw-r--r--drivers/usb/host/xhci-plat.c5
-rw-r--r--drivers/usb/host/xhci.c20
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c14
-rw-r--r--drivers/usb/serial/Kconfig1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/usb-serial-simple.c7
-rw-r--r--drivers/usb/storage/ene_ub6250.c11
-rw-r--r--drivers/usb/usbip/stub_main.c5
-rw-r--r--drivers/usb/usbip/usbip_common.h2
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c107
-rw-r--r--drivers/vhost/vhost.c3
-rw-r--r--drivers/video/fbdev/msm/mdp3_ctrl.c14
-rw-r--r--drivers/video/fbdev/msm/msm_dba/adv7533.c2
-rw-r--r--drivers/video/fbdev/vfb.c17
-rw-r--r--drivers/watchdog/f71808e_wdt.c2
-rw-r--r--fs/autofs4/root.c2
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/cifs/dir.c9
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/cifs/smb2pdu.c14
-rw-r--r--fs/crypto/bio.c35
-rw-r--r--fs/crypto/crypto.c9
-rw-r--r--fs/crypto/fname.c140
-rw-r--r--fs/crypto/fscrypt_private.h32
-rw-r--r--fs/crypto/hooks.c156
-rw-r--r--fs/crypto/keyinfo.c1
-rw-r--r--fs/ext4/balloc.c20
-rw-r--r--fs/ext4/extents.c16
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/ialloc.c54
-rw-r--r--fs/ext4/inline.c66
-rw-r--r--fs/ext4/inode.c48
-rw-r--r--fs/ext4/mballoc.c23
-rw-r--r--fs/ext4/super.c6
-rw-r--r--fs/ext4/xattr.c30
-rw-r--r--fs/ext4/xattr.h32
-rw-r--r--fs/f2fs/checkpoint.c103
-rw-r--r--fs/f2fs/data.c295
-rw-r--r--fs/f2fs/dir.c9
-rw-r--r--fs/f2fs/extent_cache.c5
-rw-r--r--fs/f2fs/f2fs.h207
-rw-r--r--fs/f2fs/file.c98
-rw-r--r--fs/f2fs/gc.c29
-rw-r--r--fs/f2fs/inline.c2
-rw-r--r--fs/f2fs/inode.c9
-rw-r--r--fs/f2fs/namei.c255
-rw-r--r--fs/f2fs/node.c57
-rw-r--r--fs/f2fs/node.h5
-rw-r--r--fs/f2fs/recovery.c14
-rw-r--r--fs/f2fs/segment.c133
-rw-r--r--fs/f2fs/segment.h27
-rw-r--r--fs/f2fs/super.c352
-rw-r--r--fs/f2fs/sysfs.c73
-rw-r--r--fs/fs-writeback.c7
-rw-r--r--fs/jbd2/journal.c5
-rw-r--r--fs/jbd2/transaction.c1
-rw-r--r--fs/jffs2/super.c2
-rw-r--r--fs/lockd/svc.c6
-rw-r--r--fs/namei.c3
-rw-r--r--fs/namespace.c3
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c1
-rw-r--r--fs/nfs/nfs4proc.c13
-rw-r--r--fs/nfs/nfs4state.c10
-rw-r--r--fs/notify/fanotify/fanotify.c34
-rw-r--r--fs/overlayfs/inode.c12
-rw-r--r--fs/proc/base.c8
-rw-r--r--fs/proc/meminfo.c7
-rw-r--r--fs/proc/uid.c2
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/sdcardfs/dentry.c2
-rw-r--r--fs/sdcardfs/lookup.c2
-rw-r--r--fs/sdcardfs/main.c7
-rw-r--r--fs/ubifs/super.c14
-rw-r--r--include/linux/backing-dev-defs.h5
-rw-r--r--include/linux/backing-dev.h31
-rw-r--r--include/linux/blk_types.h1
-rw-r--r--include/linux/clk/msm-clock-generic.h3
-rw-r--r--include/linux/diagchar.h52
-rw-r--r--include/linux/f2fs_fs.h19
-rw-r--r--include/linux/fs.h21
-rw-r--r--include/linux/fscrypt.h174
-rw-r--r--include/linux/fscrypt_notsupp.h68
-rw-r--r--include/linux/fscrypt_supp.h70
-rw-r--r--include/linux/habmm.h5
-rw-r--r--include/linux/hid.h6
-rw-r--r--include/linux/if_vlan.h7
-rw-r--r--include/linux/mlx4/qp.h1
-rw-r--r--include/linux/mlx5/device.h10
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/mtd/flashchip.h1
-rw-r--r--include/linux/power_supply.h3
-rw-r--r--include/linux/qdsp6v2/apr.h7
-rw-r--r--include/linux/qdsp6v2/apr_tal.h3
-rw-r--r--include/linux/qdsp6v2/audio-anc-dev-mgr.h46
-rw-r--r--include/linux/qdsp6v2/sdsp_anc.h302
-rw-r--r--include/linux/sched/sysctl.h5
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/tty.h3
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/hcd.h5
-rw-r--r--include/linux/virtio.h3
-rw-r--r--include/net/cfg80211.h14
-rw-r--r--include/net/llc_conn.h1
-rw-r--r--include/net/slhc_vj.h1
-rw-r--r--include/net/x25.h4
-rw-r--r--include/sound/apr_audio-v2.h257
-rw-r--r--include/sound/control.h7
-rw-r--r--include/sound/pcm_oss.h1
-rw-r--r--include/sound/q6afe-v2.h91
-rw-r--r--include/sound/q6core.h4
-rw-r--r--include/sound/rawmidi.h1
-rw-r--r--include/trace/events/preemptirq.h39
-rw-r--r--include/trace/events/sched.h32
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/fcntl.h21
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/msm_audio_anc.h53
-rw-r--r--include/uapi/media/msm_camera.h5
-rw-r--r--include/uapi/media/msm_camsensor_sdk.h2
-rw-r--r--ipc/shm.c23
-rw-r--r--kernel/events/core.c19
-rw-r--r--kernel/futex.c98
-rw-r--r--kernel/pid.c4
-rw-r--r--kernel/resource.c3
-rw-r--r--kernel/sched/core.c38
-rw-r--r--kernel/sched/fair.c3
-rw-r--r--kernel/sched/hmp.c8
-rw-r--r--kernel/sysctl.c16
-rw-r--r--kernel/taskstats.c6
-rw-r--r--kernel/time/hrtimer.c1
-rw-r--r--kernel/trace/trace_irqsoff.c42
-rw-r--r--lib/kobject.c12
-rw-r--r--mm/filemap.c16
-rw-r--r--mm/memory.c17
-rw-r--r--mm/mmap.c7
-rw-r--r--mm/nommu.c7
-rw-r--r--mm/page-writeback.c18
-rw-r--r--mm/slab.c3
-rw-r--r--mm/vmstat.c1
-rw-r--r--net/8021q/vlan_dev.c6
-rw-r--r--net/bluetooth/hci_core.c17
-rw-r--r--net/ceph/messenger.c7
-rw-r--r--net/ceph/osdmap.c1
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/neighbour.c54
-rw-r--r--net/core/net_namespace.c19
-rw-r--r--net/core/skbuff.c75
-rw-r--r--net/core/sysctl_net_core.c2
-rw-r--r--net/dns_resolver/dns_key.c13
-rw-r--r--net/ieee802154/socket.c8
-rw-r--r--net/ipc_router/ipc_router_core.c64
-rw-r--r--net/ipv4/ah4.c8
-rw-r--r--net/ipv4/arp.c18
-rw-r--r--net/ipv4/esp4.c13
-rw-r--r--net/ipv4/ip_tunnel.c11
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_input.c31
-rw-r--r--net/ipv6/addrconf.c5
-rw-r--r--net/ipv6/ah6.c8
-rw-r--r--net/ipv6/esp6.c12
-rw-r--r--net/ipv6/ip6_gre.c8
-rw-r--r--net/ipv6/ip6_output.c19
-rw-r--r--net/ipv6/ip6_tunnel.c11
-rw-r--r--net/ipv6/ip6_vti.c7
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/ipv6/sit.c9
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_netlink.c2
-rw-r--r--net/l2tp/l2tp_ppp.c7
-rw-r--r--net/llc/af_llc.c17
-rw-r--r--net/llc/llc_c_ac.c9
-rw-r--r--net/llc/llc_conn.c22
-rw-r--r--net/mac80211/mlme.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c7
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/packet/af_packet.c88
-rw-r--r--net/packet/internal.h10
-rw-r--r--net/rds/bind.c1
-rw-r--r--net/rxrpc/rxkad.c21
-rw-r--r--net/sched/act_api.c4
-rw-r--r--net/sched/act_bpf.c12
-rw-r--r--net/sctp/ipv6.c64
-rw-r--r--net/sctp/socket.c17
-rw-r--r--net/sunrpc/rpc_pipe.c1
-rw-r--r--net/sunrpc/xprtsock.c7
-rw-r--r--net/tipc/net.c3
-rw-r--r--net/wireless/reg.c59
-rw-r--r--net/x25/af_x25.c24
-rw-r--r--net/x25/sysctl_net_x25.c5
-rw-r--r--net/xfrm/xfrm_state.c2
-rwxr-xr-xscripts/tags.sh1
-rw-r--r--security/selinux/hooks.c10
-rw-r--r--sound/core/oss/pcm_oss.c189
-rw-r--r--sound/core/pcm.c8
-rw-r--r--sound/core/pcm_native.c1
-rw-r--r--sound/core/rawmidi.c44
-rw-r--r--sound/core/rawmidi_compat.c18
-rw-r--r--sound/core/seq/oss/seq_oss_event.c15
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c85
-rw-r--r--sound/core/seq/oss/seq_oss_synth.h3
-rw-r--r--sound/drivers/opl3/opl3_synth.c7
-rw-r--r--sound/pci/asihpi/hpimsginit.c13
-rw-r--r--sound/pci/asihpi/hpioctl.c4
-rw-r--r--sound/pci/hda/hda_hwdep.c12
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/pci/rme9652/hdspm.c24
-rw-r--r--sound/pci/rme9652/rme9652.c6
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c7
-rw-r--r--sound/soc/codecs/ssm2602.c19
-rw-r--r--sound/soc/fsl/fsl_esai.c7
-rw-r--r--sound/soc/intel/atom/sst/sst_stream.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c7
-rw-r--r--sound/soc/msm/apq8096-auto.c40
-rw-r--r--sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c5
-rw-r--r--sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c80
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-dtmf-v2.c8
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c18
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c22
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c110
-rw-r--r--sound/soc/msm/qdsp6v2/msm-qti-pp-config.c5
-rw-r--r--sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c5
-rw-r--r--sound/soc/msm/qdsp6v2/q6adm.c4
-rw-r--r--sound/soc/msm/qdsp6v2/q6afe.c362
-rw-r--r--sound/soc/msm/qdsp6v2/q6audio-v2.c3
-rw-r--r--sound/soc/msm/qdsp6v2/q6core.c6
-rw-r--r--sound/soc/msm/sdm660-external.c104
-rw-r--r--sound/soc/msm/sdm660-internal.c133
-rw-r--r--sound/soc/sh/rcar/ssi.c11
-rw-r--r--sound/usb/line6/midi.c2
-rw-r--r--sound/usb/mixer_maps.c3
-rw-r--r--sound/usb/usb_audio_qmi_svc.c91
-rw-r--r--sound/usb/usb_audio_qmi_v01.c60
-rw-r--r--sound/usb/usb_audio_qmi_v01.h23
-rw-r--r--tools/perf/builtin-trace.c4
-rw-r--r--tools/perf/util/header.c12
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c64
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h2
-rw-r--r--tools/perf/util/intel-pt.c37
-rw-r--r--tools/perf/util/probe-event.c8
-rw-r--r--tools/perf/util/unwind-libdw.c8
-rw-r--r--tools/perf/util/util.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-resched-dscr.c2
597 files changed, 15100 insertions, 3464 deletions
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 000000000000..4341e3a71dad
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,27 @@
+cc_binary_host {
+ name: "unifdef",
+ srcs: ["scripts/unifdef.c"],
+ sanitize: {
+ never: true,
+ }
+}
+
+gensrcs {
+ name: "qseecom-kernel-includes",
+
+ // move to out/ as root for header generation because of scripts/unifdef
+ // storage - at the expense of extra ../ references
+ cmd: "pushd out && mkdir -p scripts && rm -f scripts/unifdef && ln -s ../../$(location unifdef) scripts/unifdef && ../$(location scripts/headers_install.sh) `dirname ../$(out)` ../ $(in) && popd",
+
+ tools: ["unifdef"],
+ tool_files: ["scripts/headers_install.sh"],
+ export_include_dirs: ["include/uapi"],
+ srcs: ["include/uapi/linux/qseecom.h"],
+ output_extension: "h",
+}
+
+cc_library_headers {
+ name: "qseecom-kernel-headers",
+ generated_headers: ["qseecom-kernel-includes"],
+ export_generated_headers: ["qseecom-kernel-includes"],
+}
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index db7aab1516de..b8d0a30f1644 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -192,3 +192,14 @@ Date: November 2017
Contact: "Sheng Yong" <shengyong1@huawei.com>
Description:
Controls readahead inode block in readdir.
+
+What: /sys/fs/f2fs/<disk>/extension_list
+Date: Feburary 2018
+Contact: "Chao Yu" <yuchao0@huawei.com>
+Description:
+ Used to control configure extension list:
+ - Query: cat /sys/fs/f2fs/<disk>/extension_list
+ - Add: echo '[h/c]extension' > /sys/fs/f2fs/<disk>/extension_list
+ - Del: echo '[h/c]!extension' > /sys/fs/f2fs/<disk>/extension_list
+ - [h] means add/del hot file extension
+ - [c] means add/del cold file extension
diff --git a/Documentation/device-mapper/verity.txt b/Documentation/device-mapper/verity.txt
index 89fd8f9a259f..b3d2e4a42255 100644
--- a/Documentation/device-mapper/verity.txt
+++ b/Documentation/device-mapper/verity.txt
@@ -109,6 +109,17 @@ fec_start <offset>
This is the offset, in <data_block_size> blocks, from the start of the
FEC device to the beginning of the encoding data.
+check_at_most_once
+ Verify data blocks only the first time they are read from the data device,
+ rather than every time. This reduces the overhead of dm-verity so that it
+ can be used on systems that are memory and/or CPU constrained. However, it
+ provides a reduced level of security because only offline tampering of the
+ data device's content will be detected, not online tampering.
+
+ Hash blocks are still verified each time they are read from the hash device,
+ since verification of hash blocks is less performance critical than data
+ blocks, and a hash block will not be verified any more after all the data
+ blocks it covers have been verified anyway.
Theory of operation
===================
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index b6d0c9affa0e..4cf7b93b922e 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -2550,3 +2550,69 @@ Example of child node that would have qcom,wdsp-cmpnt-dev-name property
wcd934x_cdc: tavil_codec {
qcom,wdsp-cmpnt-dev-name = "tavil_codec";
};
+
+
+* MSM external ANC driver
+
+Required properties:
+- compatible : "qcom,msm-ext-anc"
+- qcom,refs-port-id : This is AFE port ID for playback in ADSP used for ANC Algo refers input.
+- qcom,spkr-port-id : This is AFE port ID for ANC speaker in Sensor DSP.
+- qcom,mic-port-id : This is AFE port ID for ANC mic in Sensor DSP.
+- qcom,num-anc-mic : Define the number of microphones which are directly involved in ANC processing.
+- qcom,num-add-mic-signal : Define additional microphone which might be required for monitoring the environment, input reference signal.
+- qcom,anc-mic-array : Array that specifies the channel or slot index used for ANC in one MIC hardware interface like TDM Tx.
+ The channel or slot index is count from 0.
+ Always place the valid channel or slot index value setting in from index 0 of this array.
+ This array include two parts:
+ Part I ---- num_anc_mic, define the number of microphones which are directly involved in ANC processing.
+ Part II ---- num_add_mic_signal, define additional microphones which might be required for
+ monitoring the environment, input reference signal.
+ num_add_mic_signal is always appened at the end of num_anc_mic.
+- qcom,num-anc-spkr : Define the number of speakers which are directly involved in ANC processing.
+- qcom,num-add-spkr-signal : Define additional speaker channels which connects to interested speakers for example a subwoofer.
+- qcom,anc-spkr-array : Array that specifies the channel or slot index used for ANC in one SPEAKER hardware interface like TDM Rx.
+ The channel or slot index is count from 0.
+ Always place the valid channel or slot index value setting in from index 0 of this array.
+ This array include two parts:
+ Part I ---- num_anc_spkr, define the number of speakers which are directly involved in ANC processing.
+ Part II ---- num_add_spkr_signal, define additional speakers.
+ num_add_spkr_signal is always appened at the end of num_anc_spkr.
+- qcom,refs-tdm-rx : Point to phandle for refs tdm port info.
+- qcom,spkr-tdm-rx : Point to phandle for spkr tdm port info.
+- qcom,mic-tdm-tx : Point to phandle for mic tdm port info.
+Example 1:
+
+ qcom,msm-ext-anc {
+ compatible = "qcom,msm-ext-anc";
+ qcom,refs-port-id = <36906>;
+ qcom,spkr-port-id = <36912>;
+ qcom,mic-port-id = <36913>;
+ qcom,num-anc-mic = <4>;
+ qcom,num-add-mic-signal = <0>;
+ qcom,anc-mic-array = <0 1 2 3>;
+ qcom,num-anc-spkr = <4>;
+ qcom,num-add-spkr-signal = <0>;
+ qcom,anc-spkr-array = <0 1 2 3>;
+ qcom,refs-tdm-rx = <&dai_tert_tdm_rx_5>;
+ qcom,spkr-tdm-rx = <&dai_quat_tdm_rx_0>;
+ qcom,mic-tdm-tx = <&dai_quat_tdm_tx_0>;
+ };
+
+Example 2:
+
+ qcom,msm-ext-anc {
+ compatible = "qcom,msm-ext-anc";
+ qcom,refs-port-id = <36906>;
+ qcom,spkr-port-id = <36912>;
+ qcom,mic-port-id = <36913>;
+ qcom,num-anc-mic = <4>;
+ qcom,num-add-mic-signal = <2>;
+ qcom,anc-mic-array = <2 1 0 3 6 7>;
+ qcom,num-anc-spkr = <4>;
+ qcom,num-add-spkr-signal = <1>;
+ qcom,anc-spkr-array = <0 1 2 3 6>;
+ qcom,refs-tdm-rx = <&dai_tert_tdm_rx_5>;
+ qcom,spkr-tdm-rx = <&dai_quat_tdm_rx_0>;
+ qcom,mic-tdm-tx = <&dai_quat_tdm_tx_0>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 7328b2847e02..ddca4c39e2de 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -61,7 +61,6 @@ Optional properties:
- snps,num-gsi-evt-buffs: If present, specifies number of GSI based hardware accelerated event buffers.
1 event buffer is needed per h/w accelerated endpoint.
- xhci-imod-value: Interrupt moderation interval for host mode (in increments of 250nsec).
- - usb-core-id: Differentiates between different controllers present on a device.
This is usually a subnode to DWC3 glue to which it is connected.
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index 6cf9ad12c57f..1f52baea2f69 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -172,6 +172,23 @@ offgrpjquota Turn off group journelled quota.
offprjjquota Turn off project journelled quota.
quota Enable plain user disk quota accounting.
noquota Disable all plain disk quota option.
+whint_mode=%s Control which write hints are passed down to block
+ layer. This supports "off", "user-based", and
+ "fs-based". In "off" mode (default), f2fs does not pass
+ down hints. In "user-based" mode, f2fs tries to pass
+ down hints given by users. And in "fs-based" mode, f2fs
+ passes down hints with its policy.
+alloc_mode=%s Adjust block allocation policy, which supports "reuse"
+ and "default".
+fsync_mode=%s Control the policy of fsync. Currently supports "posix"
+ and "strict". In "posix" mode, which is default, fsync
+ will follow POSIX semantics and does a light operation
+ to improve the filesystem performance. In "strict" mode,
+ fsync will be heavy and behaves in line with xfs, ext4
+ and btrfs, where xfstest generic/342 will pass, but the
+ performance will regress.
+test_dummy_encryption Enable dummy encryption, which provides a fake fscrypt
+ context. The fake fscrypt context is used by xfstests.
================================================================================
DEBUGFS ENTRIES
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index d3e294320989..e47f2c950855 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2453,6 +2453,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
noalign [KNL,ARM]
+ noaltinstr [S390] Disables alternative instructions patching
+ (CPU alternatives feature).
+
noapic [SMP,APIC] Tells the kernel to not make use of any
IOAPICs that may be present in the system.
diff --git a/Makefile b/Makefile
index 2213da14bc6d..367fac0527a4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 127
+SUBLEVEL = 131
EXTRAVERSION =
NAME = Blurry Fish Butt
diff --git a/arch/arm/boot/dts/at91sam9g25.dtsi b/arch/arm/boot/dts/at91sam9g25.dtsi
index a7da0dd0c98f..0898213f3bb2 100644
--- a/arch/arm/boot/dts/at91sam9g25.dtsi
+++ b/arch/arm/boot/dts/at91sam9g25.dtsi
@@ -21,7 +21,7 @@
atmel,mux-mask = <
/* A B C */
0xffffffff 0xffe0399f 0xc000001c /* pioA */
- 0x0007ffff 0x8000fe3f 0x00000000 /* pioB */
+ 0x0007ffff 0x00047e3f 0x00000000 /* pioB */
0x80000000 0x07c0ffff 0xb83fffff /* pioC */
0x003fffff 0x003f8000 0x00000000 /* pioD */
>;
diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts
index 96d7eede412e..036c9bd9bf75 100644
--- a/arch/arm/boot/dts/imx53-qsrb.dts
+++ b/arch/arm/boot/dts/imx53-qsrb.dts
@@ -23,7 +23,7 @@
imx53-qsrb {
pinctrl_pmic: pmicgrp {
fsl,pins = <
- MX53_PAD_CSI0_DAT5__GPIO5_23 0x1e4 /* IRQ */
+ MX53_PAD_CSI0_DAT5__GPIO5_23 0x1c4 /* IRQ */
>;
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index 9e096d811bed..7a032dd84bb2 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -88,6 +88,7 @@
clocks = <&clks 201>;
VDDA-supply = <&reg_2p5v>;
VDDIO-supply = <&reg_3p3v>;
+ lrclk-strength = <3>;
};
};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 9430a9928199..00de37fe5f8a 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -132,7 +132,7 @@
};
esdhc: esdhc@1560000 {
- compatible = "fsl,esdhc";
+ compatible = "fsl,ls1021a-esdhc", "fsl,esdhc";
reg = <0x0 0x1560000 0x0 0x10000>;
interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <0>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
index 80901ddcf7d1..111266ca59f0 100644
--- a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
+++ b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -699,14 +699,14 @@
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&pcmnoirq>,
- <&loopback1>;
+ <&loopback1>, <&pcm_dtmf>;
asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
"msm-pcm-dsp.2", "msm-voip-dsp",
"msm-pcm-voice", "msm-pcm-loopback",
"msm-compress-dsp", "msm-pcm-hostless",
"msm-pcm-afe", "msm-lsm-client",
"msm-pcm-routing", "msm-pcm-dsp-noirq",
- "msm-pcm-loopback.1";
+ "msm-pcm-loopback.1", "msm-pcm-dtmf";
asoc-cpu = <&dai_pri_auxpcm>, <&dai_sec_auxpcm>, <&dai_hdmi>,
<&dai_mi2s_sec>, <&dai_mi2s>, <&dai_mi2s_quat>,
<&afe_pcm_rx>, <&afe_pcm_tx>,
@@ -769,6 +769,10 @@
qcom,msm-pcm-loopback-low-latency;
};
+ pcm_dtmf: qcom,msm-pcm-dtmf {
+ compatible = "qcom,msm-pcm-dtmf";
+ };
+
qcom,msm-dai-mi2s {
dai_mi2s_sec: qcom,msm-dai-q6-mi2s-sec {
qcom,msm-mi2s-rx-lines = <2>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-auto-adp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-auto-adp.dts
index a91ec5eeb2e7..46894ea1e530 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-auto-adp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-auto-adp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -36,10 +36,6 @@
};
&soc {
- qcom,msm-ssc-sensors {
- status = "disabled";
- };
-
qcom,msm-thermal {
qcom,hotplug-temp = <115>;
qcom,hotplug-temp-hysteresis = <25>;
diff --git a/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts b/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts
index 6c2413d98efd..4648d2000d01 100644
--- a/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts
+++ b/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp-lite.dts
@@ -30,10 +30,6 @@
};
&soc {
- qcom,msm-ssc-sensors {
- status = "disabled";
- };
-
qcom,msm-thermal {
qcom,hotplug-temp = <115>;
qcom,hotplug-temp-hysteresis = <25>;
@@ -101,6 +97,3 @@
};
};
-&modem_mem {
- status = "disabled";
-};
diff --git a/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp.dts b/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp.dts
index fa3467646b7c..d0758aeacb6f 100644
--- a/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-adp.dts
@@ -37,10 +37,6 @@
};
&soc {
- qcom,msm-ssc-sensors {
- status = "disabled";
- };
-
qcom,msm-thermal {
qcom,hotplug-temp = <115>;
qcom,hotplug-temp-hysteresis = <25>;
@@ -86,6 +82,8 @@
};
};
-&modem_mem {
- status = "disabled";
+&wil6210 {
+ qcom,pcie-parent = <&pcie0>;
+ qcom,smmu-support;
+ status = "ok";
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
index 2adfb8b749eb..692dd58ac956 100644
--- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
@@ -1041,6 +1041,12 @@
status = "disabled";
};
+ qcom,msm-ssc-sensors {
+ compatible = "qcom,msm-ssc-sensors";
+ qcom,firmware-name = "slpi";
+ status = "ok";
+ };
+
sound-adp-agave {
compatible = "qcom,apq8096-asoc-snd-adp-agave";
qcom,model = "apq8096-adp-agave-snd-card";
@@ -1048,14 +1054,14 @@
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&pcmnoirq>,
- <&loopback1>;
+ <&loopback1>, <&pcm_dtmf>;
asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
"msm-pcm-dsp.2", "msm-voip-dsp",
"msm-pcm-voice", "msm-pcm-loopback",
"msm-compress-dsp", "msm-pcm-hostless",
"msm-pcm-afe", "msm-lsm-client",
"msm-pcm-routing", "msm-pcm-dsp-noirq",
- "msm-pcm-loopback.1";
+ "msm-pcm-loopback.1", "msm-pcm-dtmf";
asoc-cpu = <&dai_pri_auxpcm>, <&dai_sec_auxpcm>, <&dai_hdmi>,
<&dai_mi2s_sec>, <&dai_mi2s>, <&dai_mi2s_quat>,
<&afe_pcm_rx>, <&afe_pcm_tx>,
@@ -1107,6 +1113,37 @@
asoc-codec-names = "msm-stub-codec.1";
};
+ qcom,msm-dai-tdm-tert-rx {
+ qcom,msm-cpudai-tdm-group-num-ports = <6>;
+ qcom,msm-cpudai-tdm-group-port-id = <36896 36898 36900
+ 36902 36904 36906>;
+
+ dai_tert_tdm_rx_5: qcom,msm-dai-q6-tdm-tert-rx-5 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36906>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
+ qcom,msm-ext-anc {
+ compatible = "qcom,msm-ext-anc";
+ qcom,refs-port-id = <36906>;
+ qcom,spkr-port-id = <36912>;
+ qcom,mic-port-id = <36913>;
+ qcom,sample-rate = <48000>;
+ qcom,num-channels = <8>;
+ qcom,bit-width = <32>;
+ qcom,num-anc-mic = <4>;
+ qcom,num-add-mic-signal = <0>;
+ qcom,anc-mic-array = <0 1 2 3>;
+ qcom,num-anc-spkr = <4>;
+ qcom,num-add-spkr-signal = <0>;
+ qcom,anc-spkr-array = <0 1 2 3>;
+ qcom,refs-tdm-rx = <&dai_tert_tdm_rx_5>;
+ qcom,spkr-tdm-rx = <&dai_quat_tdm_rx_0>;
+ qcom,mic-tdm-tx = <&dai_quat_tdm_tx_0>;
+ };
+
usb_detect: usb_detect {
compatible = "qcom,gpio-usbdetect";
qcom,vbus-det-gpio = <&pm8994_gpios 17 0>;
@@ -1120,9 +1157,8 @@
qcom,msm-pcm-loopback-low-latency;
};
- loopback1: qcom,msm-pcm-loopback-low-latency {
- compatible = "qcom,msm-pcm-loopback";
- qcom,msm-pcm-loopback-low-latency;
+ pcm_dtmf: qcom,msm-pcm-dtmf {
+ compatible = "qcom,msm-pcm-dtmf";
};
qcom,msm-dai-mi2s {
@@ -1547,3 +1583,17 @@
};
};
};
+
+/ {
+ qcom,sde-reserved-plane {
+ qcom,sde-plane-id@0 {
+ reg = <0x0>;
+ qcom,plane-name = "vig0";
+ };
+ };
+};
+
+&sde_kms {
+ contiguous-region = <&cont_splash_mem &cont_splash_mem_hdmi
+ &early_camera_mem>;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
index 7cbc09767f62..8e109b51aaa2 100644
--- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
@@ -905,14 +905,14 @@
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&pcmnoirq>,
- <&loopback1>;
+ <&loopback1>, <&pcm_dtmf>;
asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
"msm-pcm-dsp.2", "msm-voip-dsp",
"msm-pcm-voice", "msm-pcm-loopback",
"msm-compress-dsp", "msm-pcm-hostless",
"msm-pcm-afe", "msm-lsm-client",
"msm-pcm-routing", "msm-pcm-dsp-noirq",
- "msm-pcm-loopback.1";
+ "msm-pcm-loopback.1", "msm-pcm-dtmf";
asoc-cpu = <&dai_pri_auxpcm>, <&dai_sec_auxpcm>, <&dai_hdmi>,
<&dai_mi2s_sec>, <&dai_mi2s>, <&dai_mi2s_quat>,
<&afe_pcm_rx>, <&afe_pcm_tx>,
@@ -977,6 +977,10 @@
qcom,msm-pcm-loopback-low-latency;
};
+ pcm_dtmf: qcom,msm-pcm-dtmf {
+ compatible = "qcom,msm-pcm-dtmf";
+ };
+
usb_vbus_vreg: usb_vbus_vreg {
compatible = "regulator-fixed";
regulator-name = "usb_vbus_vreg";
@@ -1404,3 +1408,16 @@
};
};
+/ {
+ qcom,sde-reserved-plane {
+ qcom,sde-plane-id@0 {
+ reg = <0x0>;
+ qcom,plane-name = "vig0";
+ };
+ };
+};
+
+&sde_kms {
+ contiguous-region = <&cont_splash_mem &cont_splash_mem_hdmi
+ &early_camera_mem>;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
index d8770a738422..c0969a6aa106 100644
--- a/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -524,14 +524,14 @@
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&pcmnoirq>,
- <&loopback1>;
+ <&loopback1>, <&pcm_dtmf>;
asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
"msm-pcm-dsp.2", "msm-voip-dsp",
"msm-pcm-voice", "msm-pcm-loopback",
"msm-compress-dsp", "msm-pcm-hostless",
"msm-pcm-afe", "msm-lsm-client",
"msm-pcm-routing", "msm-pcm-dsp-noirq",
- "msm-pcm-loopback.1";
+ "msm-pcm-loopback.1", "msm-pcm-dtmf";
asoc-cpu = <&dai_pri_auxpcm>, <&dai_sec_auxpcm>, <&dai_hdmi>,
<&dai_mi2s_sec>, <&dai_mi2s>, <&dai_mi2s_quat>,
<&afe_pcm_rx>, <&afe_pcm_tx>,
@@ -588,6 +588,10 @@
qcom,msm-pcm-loopback-low-latency;
};
+ pcm_dtmf: qcom,msm-pcm-dtmf {
+ compatible = "qcom,msm-pcm-dtmf";
+ };
+
qcom,msm-dai-mi2s {
dai_mi2s_sec: qcom,msm-dai-q6-mi2s-sec {
qcom,msm-mi2s-rx-lines = <2>;
diff --git a/arch/arm/boot/dts/qcom/msm8996-v3-auto-adp.dts b/arch/arm/boot/dts/qcom/msm8996-v3-auto-adp.dts
index 68956d71b74d..89a585bd426e 100644
--- a/arch/arm/boot/dts/qcom/msm8996-v3-auto-adp.dts
+++ b/arch/arm/boot/dts/qcom/msm8996-v3-auto-adp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -37,10 +37,6 @@
};
&soc {
- qcom,msm-ssc-sensors {
- status = "disabled";
- };
-
qcom,msm-thermal {
qcom,hotplug-temp = <115>;
qcom,hotplug-temp-hysteresis = <25>;
diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi
index efc91f9f86f7..c77d7fd9869c 100644
--- a/arch/arm/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996.dtsi
@@ -2018,7 +2018,6 @@
snps,nominal-elastic-buffer;
snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>;
- usb-core-id = <0>;
};
qcom,usbbam@6b04000 {
@@ -2127,7 +2126,6 @@
snps,nominal-elastic-buffer;
snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>;
- usb-core-id = <1>;
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts
index 9fd2686dac67..02f5dbcc0d4d 100644
--- a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts
+++ b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts
@@ -30,10 +30,6 @@
};
&soc {
- qcom,msm-ssc-sensors {
- status = "disabled";
- };
-
qcom,msm-thermal {
qcom,hotplug-temp = <115>;
qcom,hotplug-temp-hysteresis = <25>;
diff --git a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts
index d2aa5c854c83..05a3144fb312 100644
--- a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts
+++ b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -37,10 +37,6 @@
};
&soc {
- qcom,msm-ssc-sensors {
- status = "disabled";
- };
-
qcom,msm-thermal {
qcom,hotplug-temp = <115>;
qcom,hotplug-temp-hysteresis = <25>;
@@ -86,3 +82,8 @@
};
};
+&wil6210 {
+ qcom,pcie-parent = <&pcie0>;
+ qcom,smmu-support;
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts b/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts
index ccc1be75f39b..f8f0403d9c7a 100644
--- a/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts
+++ b/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,10 +17,6 @@
#include "sdm636-qrd.dtsi"
#include "msm-pm660a.dtsi"
-&smb1351_charger {
- status = "disabled";
-};
-
&i2c_2 {
smb138x: qcom,smb138x@8 {
compatible = "qcom,i2c-pmic";
@@ -154,6 +150,8 @@
};
};
+#include "sdm660-external-codec.dtsi"
+
/ {
model = "Qualcomm Technologies, Inc. SDA 636 PM660 + PM660A QRD HDK636";
compatible = "qcom,sda636-qrd", "qcom,sda636", "qcom,qrd";
@@ -172,15 +170,6 @@
qcom,mdss-pref-prim-intf = "dsi";
};
-&mdss_dp_ctrl {
- pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
- pinctrl-0 = <&mdss_dp_aux_active &mdss_dp_usbplug_cc_active>;
- pinctrl-1 = <&mdss_dp_aux_suspend &mdss_dp_usbplug_cc_suspend>;
- qcom,aux-en-gpio = <&tlmm 55 0>;
- qcom,aux-sel-gpio = <&tlmm 56 0>;
- qcom,usbplug-cc-gpio = <&tlmm 58 0>;
-};
-
&mdss_dsi {
hw-config = "single_dsi";
};
@@ -190,6 +179,7 @@
pinctrl-names = "mdss_default", "mdss_sleep";
pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ oledb-supply = <&pm660a_oledb>;
lab-supply = <&lab_regulator>;
ibb-supply = <&ibb_regulator>;
qcom,platform-reset-gpio = <&tlmm 53 0>;
@@ -203,7 +193,24 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>;
};
+/delete-node/ &tasha_hph_en0;
+/delete-node/ &tasha_hph_en1;
+
+&qusb_phy0 {
+ qcom,qusb-phy-init-seq = <0xf8 0x80
+ 0xb3 0x84
+ 0x83 0x88
+ 0xc7 0x8c
+ 0x30 0x08
+ 0x79 0x0c
+ 0x21 0x10
+ 0x14 0x9c
+ 0x9f 0x1c
+ 0x00 0x18>;
+};
+
&tasha_snd {
+ qcom,model = "sdm660-tasha-skus-snd-card";
qcom,audio-routing =
"AIF4 VI", "MCLK",
"RX_BIAS", "MCLK",
@@ -218,25 +225,10 @@
"MIC BIAS4", "Digital Mic5",
"SpkrLeft IN", "SPK1 OUT";
qcom,msm-mbhc-hphl-swh = <0>;
-};
-
-&usb2s {
- status = "okay";
-};
-
-&qusb_phy0 {
- reg = <0x0c012000 0x180>,
- <0x00188018 0x4>;
- reg-names = "qusb_phy_base",
- "ref_clk_addr";
- qcom,qusb-phy-init-seq = <0xf8 0x80
- 0xb3 0x84
- 0x83 0x88
- 0xc7 0x8c
- 0x30 0x08
- 0x79 0x0c
- 0x21 0x10
- 0x14 0x9c
- 0x9f 0x1c
- 0x00 0x18>;
+ /delete-property/ qcom,us-euro-gpios;
+ /delete-property/ qcom,hph-en0-gpio;
+ /delete-property/ qcom,hph-en1-gpio;
+ qcom,wsa-max-devs = <1>;
+ qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_213>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
};
diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi
index 7d61370a16c0..8873627e64d6 100644
--- a/arch/arm/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630.dtsi
@@ -1437,6 +1437,7 @@
qcom,msm_fastrpc {
compatible = "qcom,msm-fastrpc-adsp";
qcom,fastrpc-glink;
+ qcom,fastrpc-vmid-heap-shared;
qcom,msm_fastrpc_compute_cb1 {
compatible = "qcom,msm-fastrpc-compute-cb";
diff --git a/arch/arm/boot/dts/qcom/sdm636-pm660a-qrd.dts b/arch/arm/boot/dts/qcom/sdm636-pm660a-qrd.dts
index 68734c9cd8c6..2b36b89c9db0 100644
--- a/arch/arm/boot/dts/qcom/sdm636-pm660a-qrd.dts
+++ b/arch/arm/boot/dts/qcom/sdm636-pm660a-qrd.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,7 +15,7 @@
#include "sdm636.dtsi"
#include "sdm636-qrd.dtsi"
-#include "msm-pm660a.dtsi"
+#include "sdm660-internal-codec.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM 636 PM660 + PM660A QRD";
@@ -26,6 +26,33 @@
<0x0001001b 0x0202001a 0x0 0x0>;
};
+&int_codec {
+ qcom,model = "sdm660-snd-card-skush";
+ /delete-property/ qcom,us-euro-gpios;
+ qcom,audio-routing =
+ "RX_BIAS", "INT_MCLK0",
+ "SPK_RX_BIAS", "INT_MCLK0",
+ "INT_LDO_H", "INT_MCLK0",
+ "MIC BIAS External2", "Headset Mic",
+ "AMIC2", "MIC BIAS External2",
+ "MIC BIAS External", "Digital Mic1",
+ "DMIC1", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic3",
+ "DMIC3", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic4",
+ "DMIC4", "MIC BIAS External",
+ "SpkrLeft IN", "SPK1 OUT",
+ "PDM_IN_RX1", "PDM_OUT_RX1",
+ "PDM_IN_RX2", "PDM_OUT_RX2",
+ "PDM_IN_RX3", "PDM_OUT_RX3",
+ "ADC1_IN", "ADC1_OUT",
+ "ADC2_IN", "ADC2_OUT",
+ "ADC3_IN", "ADC3_OUT";
+ qcom,wsa-max-devs = <1>;
+ qcom,wsa-devs = <&wsa881x_211_en>, <&wsa881x_213_en>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
+};
+
&pm660a_oledb {
status = "okay";
qcom,oledb-default-voltage-mv = <6400>;
@@ -44,6 +71,7 @@
pinctrl-names = "mdss_default", "mdss_sleep";
pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ oledb-supply = <&pm660a_oledb>;
lab-supply = <&lab_regulator>;
ibb-supply = <&ibb_regulator>;
qcom,platform-reset-gpio = <&tlmm 53 0>;
diff --git a/arch/arm/boot/dts/qcom/sdm636-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm636-qrd.dtsi
index 8791e7420148..5ee4db3155c7 100644
--- a/arch/arm/boot/dts/qcom/sdm636-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm636-qrd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,7 +10,7 @@
* GNU General Public License for more details.
*/
-#include "sdm660-qrd.dtsi"
+#include "sdm630-qrd.dtsi"
/ {
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi b/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi
index 87be1529d9fc..c4ff839d3c07 100644
--- a/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi
@@ -700,9 +700,9 @@
regulator-max-microvolt = <8>;
qcom,cpr-fuse-corners = <5>;
- qcom,cpr-fuse-combos = <32>;
- qcom,cpr-speed-bins = <4>;
- qcom,cpr-speed-bin-corners = <8 8 0 8>;
+ qcom,cpr-fuse-combos = <40>;
+ qcom,cpr-speed-bins = <5>;
+ qcom,cpr-speed-bin-corners = <8 8 0 8 8>;
qcom,cpr-corners =
/* Speed bin 0 */
<8 8 8 8 8 8 8 8>,
@@ -714,7 +714,11 @@
<0 0 0 0 0 0 0 0>,
/* Speed bin 3 */
+ <8 8 8 8 8 8 8 8>,
+
+ /* Speed bin 4 */
<8 8 8 8 8 8 8 8>;
+
qcom,cpr-corner-fmax-map =
/* Speed bin 0 */
<2 3 4 5 8>,
@@ -726,6 +730,9 @@
<0 0 0 0 0>,
/* Speed bin 3 */
+ <2 3 4 5 8>,
+
+ /* Speed bin 4 */
<2 3 4 5 8>;
qcom,cpr-voltage-ceiling =
@@ -750,7 +757,12 @@
/* Speed bin 3 */
<300000000 633600000 902400000
1113600000 1401600000 1536000000
- 1612800000 1843200000>;
+ 1612800000 1843200000>,
+
+ /* Speed bin 4 */
+ <300000000 633600000 902400000
+ 1113600000 1401600000 1536000000
+ 1747200000 1843200000>;
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
@@ -839,9 +851,9 @@
regulator-max-microvolt = <7>;
qcom,cpr-fuse-corners = <5>;
- qcom,cpr-fuse-combos = <32>;
- qcom,cpr-speed-bins = <4>;
- qcom,cpr-speed-bin-corners = <7 7 0 7>;
+ qcom,cpr-fuse-combos = <40>;
+ qcom,cpr-speed-bins = <5>;
+ qcom,cpr-speed-bin-corners = <7 7 0 7 7>;
qcom,cpr-corners =
/* Speed-bin 0 */
<7 7 7 7 7 7 7 7>,
@@ -853,6 +865,9 @@
<0 0 0 0 0 0 0 0>,
/* Speed-bin 3 */
+ <7 7 7 7 7 7 7 7>,
+
+ /* Speed-bin 4 */
<7 7 7 7 7 7 7 7>;
qcom,cpr-corner-fmax-map =
@@ -866,6 +881,9 @@
<0 0 0 0 0>,
/* Speed-bin 3 */
+ <2 3 4 6 7>,
+
+ /* Speed-bin 4 */
<2 3 4 6 7>;
qcom,cpr-voltage-ceiling =
@@ -890,6 +908,11 @@
/* Speed bin 3 */
<300000000 1113600000 1401600000
1747200000 1804800000 2150400000
+ 2208000000>,
+
+ /* Speed bin 4 */
+ <300000000 1113600000 1401600000
+ 1747200000 1958400000 2150400000
2208000000>;
qcom,allow-voltage-interpolation;
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-audio-common.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-audio-common.dtsi
index 97ab78bdb619..3928836b8e80 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-audio-common.dtsi
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-audio-common.dtsi
@@ -18,14 +18,14 @@
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&pcmnoirq>,
- <&loopback1>;
+ <&loopback1>, <&pcm_dtmf>;
asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
"msm-pcm-dsp.2", "msm-voip-dsp",
"msm-pcm-voice", "msm-pcm-loopback",
"msm-compress-dsp", "msm-pcm-hostless",
"msm-pcm-afe", "msm-lsm-client",
"msm-pcm-routing", "msm-pcm-dsp-noirq",
- "msm-pcm-loopback.1";
+ "msm-pcm-loopback.1", "msm-pcm-dtmf";
asoc-cpu = <&dai_pri_auxpcm>, <&dai_sec_auxpcm>, <&dai_hdmi>,
<&dai_mi2s_sec>, <&dai_mi2s>, <&dai_mi2s_quat>,
<&afe_pcm_rx>, <&afe_pcm_tx>,
@@ -119,6 +119,10 @@
qcom,destroy-cvd;
};
+ pcm_dtmf: qcom,msm-pcm-dtmf {
+ compatible = "qcom,msm-pcm-dtmf";
+ };
+
stub_codec: qcom,msm-stub-codec {
compatible = "qcom,msm-stub-codec";
};
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-baseline.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-baseline.dts
index 11640f245c7c..aa884e29d653 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-baseline.dts
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-baseline.dts
@@ -25,5 +25,23 @@
qcom,hab {
compatible = "qcom,hab";
vmid = <3>;
+
+ mmidgrp600: mmidgrp600 {
+ grp-start-id = <600>;
+ role = "fe";
+ remote-vmids = <0>;
+ };
+
+ mmidgrp700: mmidgrp700 {
+ grp-start-id = <700>;
+ role = "fe";
+ remote-vmids = <0>;
+ };
+
+ mmidgrp900: mmidgrp900 {
+ grp-start-id = <900>;
+ role = "fe";
+ remote-vmids = <0>;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-blsp.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-blsp.dtsi
index 61b48802540a..514bf45aa0a6 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-blsp.dtsi
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-blsp.dtsi
@@ -10,6 +10,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+#include <dt-bindings/interrupt-controller/irq.h>
+
/ {
aliases {
spi9 = &spi_9;
@@ -94,7 +96,7 @@
<0x7544000 0x2b000>;
reg-names = "core_mem", "bam_mem";
interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
- interrupts = <0 108 0>, <0 238 0>, <0 810 0>;
+ interrupts = <0 108 0>, <0 238 0>, <0 810 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <0>;
qcom,inject-rx-on-wakeup;
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts
index 7900d963bef3..92f52a6de6e8 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts
@@ -46,6 +46,11 @@
};
};
};
+
+ bluetooth: bt_qca6174 {
+ compatible = "qca,qca6174";
+ qca,bt-reset-gpio = <&pm8994_gpios 19 0>; /* BT_EN */
+ };
};
&soc {
@@ -53,6 +58,71 @@
compatible = "qcom,msm-audio-ion-vm";
qcom,smmu-enabled;
};
+
+ qcom,hab {
+ compatible = "qcom,hab";
+ vmid = <2>;
+
+ mmidgrp100: mmidgrp100 {
+ grp-start-id = <100>;
+ role = "fe";
+ remote-vmids = <0>;
+ };
+
+ mmidgrp200: mmidgrp200 {
+ grp-start-id = <200>;
+ role = "fe";
+ remote-vmids = <0>;
+ };
+
+ mmidgrp300: mmidgrp300 {
+ grp-start-id = <300>;
+ role = "fe";
+ remote-vmids = <0>;
+ };
+
+ mmidgrp400: mmidgrp400 {
+ grp-start-id = <400>;
+ role = "fe";
+ remote-vmids = <0>;
+ };
+
+ mmidgrp500: mmidgrp500 {
+ grp-start-id = <500>;
+ role = "fe";
+ remote-vmids = <0>;
+ };
+
+ mmidgrp600: mmidgrp600 {
+ grp-start-id = <600>;
+ role = "fe";
+ remote-vmids = <0>;
+ };
+
+ mmidgrp700: mmidgrp700 {
+ grp-start-id = <700>;
+ role = "fe";
+ remote-vmids = <0>;
+ };
+
+ mmidgrp800: mmidgrp800 {
+ grp-start-id = <800>;
+ role = "fe";
+ remote-vmids = <0>;
+ };
+
+ mmidgrp900: mmidgrp900 {
+ grp-start-id = <900>;
+ role = "fe";
+ remote-vmids = <0>;
+ };
+
+ mmidgrp1000: mmidgrp1000 {
+ grp-start-id = <1000>;
+ role = "fe";
+ remote-vmids = <0>;
+ };
+ };
};
&soc {
@@ -120,7 +190,7 @@
};
&usb_detect {
- qcom,force-vbus-status-off;
+ qcom,force-vbus-status-off; /*on - adb , off - MTMD*/
};
&android_usb {
@@ -131,6 +201,18 @@
status = "okay";
};
+&qusb_phy0 {
+ status = "okay";
+};
+
+&ssphy {
+ status = "okay";
+};
+
+&dbm_1p5 {
+ status = "okay";
+};
+
&pm8994_gpios {
gpio@c600 { /* GPIO 7 - adv7481 INT3 */
qcom,mode = <0>;
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-modem.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-modem.dtsi
index 4d1eda4d11af..9fb33950541c 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-modem.dtsi
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-modem.dtsi
@@ -73,7 +73,7 @@
qcom,use-ipa-tethering-bridge;
qcom,ipa-bam-remote-mode;
qcom,modem-cfg-emb-pipe-flt;
- clocks = <&clock_gcc clk_ipa_clk>;
+ clocks = <&clock_virt clk_ipa_clk>;
clock-names = "core_clk";
qcom,use-dma-zone;
qcom,msm-bus,name = "ipa";
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 3daf8d5d7878..fb0d1b252dc8 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -1354,7 +1354,7 @@
pinctrl@fc06a000 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
+ compatible = "atmel,sama5d3-pinctrl", "atmel,at91sam9x5-pinctrl", "simple-bus";
ranges = <0xfc068000 0xfc068000 0x100
0xfc06a000 0xfc06a000 0x4000>;
/* WARNING: revisit as pin spec has changed */
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 71e473d05fcc..620dc75362e5 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -16,7 +16,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->ARM_cpsr);
}
-#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \
+#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((long long*)(ptr),\
atomic64_t, \
counter), (val))
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 28c90bc372bd..78d325f3245a 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -795,6 +795,8 @@ static struct platform_device da8xx_dsp = {
.resource = da8xx_rproc_resources,
};
+static bool rproc_mem_inited __initdata;
+
#if IS_ENABLED(CONFIG_DA8XX_REMOTEPROC)
static phys_addr_t rproc_base __initdata;
@@ -833,6 +835,8 @@ void __init da8xx_rproc_reserve_cma(void)
ret = dma_declare_contiguous(&da8xx_dsp.dev, rproc_size, rproc_base, 0);
if (ret)
pr_err("%s: dma_declare_contiguous failed %d\n", __func__, ret);
+ else
+ rproc_mem_inited = true;
}
#else
@@ -847,6 +851,12 @@ int __init da8xx_register_rproc(void)
{
int ret;
+ if (!rproc_mem_inited) {
+ pr_warn("%s: memory not reserved for DSP, not registering DSP device\n",
+ __func__);
+ return -ENOMEM;
+ }
+
ret = platform_device_register(&da8xx_dsp);
if (ret)
pr_err("%s: can't register DSP device: %d\n", __func__, ret);
diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c
index 5b0f752d5507..24be631e487d 100644
--- a/arch/arm/mach-imx/cpu.c
+++ b/arch/arm/mach-imx/cpu.c
@@ -133,6 +133,9 @@ struct device * __init imx_soc_device_init(void)
case MXC_CPU_IMX6UL:
soc_id = "i.MX6UL";
break;
+ case MXC_CPU_IMX6ULL:
+ soc_id = "i.MX6ULL";
+ break;
case MXC_CPU_IMX7D:
soc_id = "i.MX7D";
break;
diff --git a/arch/arm/mach-imx/mxc.h b/arch/arm/mach-imx/mxc.h
index a5b1af6d7441..478cd91d0885 100644
--- a/arch/arm/mach-imx/mxc.h
+++ b/arch/arm/mach-imx/mxc.h
@@ -39,6 +39,7 @@
#define MXC_CPU_IMX6SX 0x62
#define MXC_CPU_IMX6Q 0x63
#define MXC_CPU_IMX6UL 0x64
+#define MXC_CPU_IMX6ULL 0x65
#define MXC_CPU_IMX7D 0x72
#define IMX_DDR_TYPE_LPDDR2 1
@@ -171,6 +172,11 @@ static inline bool cpu_is_imx6ul(void)
return __mxc_cpu_type == MXC_CPU_IMX6UL;
}
+static inline bool cpu_is_imx6ull(void)
+{
+ return __mxc_cpu_type == MXC_CPU_IMX6ULL;
+}
+
static inline bool cpu_is_imx6q(void)
{
return __mxc_cpu_type == MXC_CPU_IMX6Q;
diff --git a/arch/arm64/configs/msm-auto-gvm-perf_defconfig b/arch/arm64/configs/msm-auto-gvm-perf_defconfig
index 91115071f99b..55655ac06803 100644
--- a/arch/arm64/configs/msm-auto-gvm-perf_defconfig
+++ b/arch/arm64/configs/msm-auto-gvm-perf_defconfig
@@ -164,8 +164,13 @@ CONFIG_RMNET_DATA_FC=y
CONFIG_RMNET_DATA_DEBUG_PKT=y
CONFIG_CAN=y
CONFIG_CAN_RH850=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM=y
+CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_RFKILL=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_DEVTMPFS=y
@@ -223,6 +228,8 @@ CONFIG_DIAG_CHAR=y
CONFIG_MSM_SMD_PKT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS=y
+CONFIG_SLIMBUS_MSM_NGD=y
CONFIG_SPI=y
CONFIG_SPI_QUP=y
CONFIG_SPI_SPIDEV=y
diff --git a/arch/arm64/configs/msm-auto-gvm_defconfig b/arch/arm64/configs/msm-auto-gvm_defconfig
index 56f6629ee4fb..177ccef17f77 100644
--- a/arch/arm64/configs/msm-auto-gvm_defconfig
+++ b/arch/arm64/configs/msm-auto-gvm_defconfig
@@ -163,8 +163,13 @@ CONFIG_RMNET_DATA_FC=y
CONFIG_RMNET_DATA_DEBUG_PKT=y
CONFIG_CAN=y
CONFIG_CAN_RH850=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM=y
+CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_RFKILL=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_DEVTMPFS=y
@@ -225,6 +230,8 @@ CONFIG_DIAG_CHAR=y
CONFIG_MSM_SMD_PKT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS=y
+CONFIG_SLIMBUS_MSM_NGD=y
CONFIG_SPI=y
CONFIG_SPI_DEBUG=y
CONFIG_SPI_QUP=y
diff --git a/arch/arm64/configs/msm-auto-perf_defconfig b/arch/arm64/configs/msm-auto-perf_defconfig
index 03446d70ca77..5e05377f68d8 100644
--- a/arch/arm64/configs/msm-auto-perf_defconfig
+++ b/arch/arm64/configs/msm-auto-perf_defconfig
@@ -232,6 +232,8 @@ CONFIG_MSM_BT_POWER=y
CONFIG_BTFM_SLIM=y
CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
CONFIG_CFG80211_INTERNAL_REGDB=y
CONFIG_RFKILL=y
CONFIG_IPC_ROUTER=y
@@ -497,7 +499,6 @@ CONFIG_IPA_UT=y
CONFIG_GPIO_USB_DETECT=y
CONFIG_MSM_MHI=y
CONFIG_MSM_MHI_UCI=y
-CONFIG_MSM_11AD=y
CONFIG_SEEMP_CORE=y
CONFIG_USB_BAM=y
CONFIG_MSM_MDSS_PLL=y
diff --git a/arch/arm64/configs/msm-auto_defconfig b/arch/arm64/configs/msm-auto_defconfig
index b033d8bd041f..5b8139aa060f 100644
--- a/arch/arm64/configs/msm-auto_defconfig
+++ b/arch/arm64/configs/msm-auto_defconfig
@@ -234,6 +234,8 @@ CONFIG_MSM_BT_POWER=y
CONFIG_BTFM_SLIM=y
CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
CONFIG_CFG80211_INTERNAL_REGDB=y
CONFIG_RFKILL=y
CONFIG_IPC_ROUTER=y
@@ -503,7 +505,6 @@ CONFIG_GPIO_USB_DETECT=y
CONFIG_MSM_MHI=y
CONFIG_MSM_MHI_UCI=y
CONFIG_MSM_MHI_DEBUG=y
-CONFIG_MSM_11AD=y
CONFIG_SEEMP_CORE=y
CONFIG_USB_BAM=y
CONFIG_MSM_MDSS_PLL=y
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 72ef08668808..61e1e347532d 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -232,6 +232,8 @@ CONFIG_MSM_BT_POWER=y
CONFIG_BTFM_SLIM=y
CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
CONFIG_CFG80211_INTERNAL_REGDB=y
CONFIG_RFKILL=y
CONFIG_NFC_NQ=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 039efa9a16e0..54d4c7e87ccc 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -233,6 +233,8 @@ CONFIG_MSM_BT_POWER=y
CONFIG_BTFM_SLIM=y
CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
CONFIG_CFG80211_INTERNAL_REGDB=y
# CONFIG_CFG80211_CRDA_SUPPORT is not set
CONFIG_RFKILL=y
@@ -694,6 +696,7 @@ CONFIG_IPC_LOGGING=y
CONFIG_QCOM_RTB=y
CONFIG_QCOM_RTB_SEPARATE_CPUS=y
CONFIG_FUNCTION_TRACER=y
+CONFIG_PREEMPTIRQ_EVENTS=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
diff --git a/arch/arm64/configs/msmcortex_mediabox-perf_defconfig b/arch/arm64/configs/msmcortex_mediabox-perf_defconfig
index 0feefe5ba25a..2c7be3c7b5d6 100644
--- a/arch/arm64/configs/msmcortex_mediabox-perf_defconfig
+++ b/arch/arm64/configs/msmcortex_mediabox-perf_defconfig
@@ -232,6 +232,7 @@ CONFIG_BTFM_SLIM=y
CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
CONFIG_CFG80211_INTERNAL_REGDB=y
CONFIG_CFG80211_WEXT=y
CONFIG_MAC80211=m
diff --git a/arch/arm64/configs/msmcortex_mediabox_defconfig b/arch/arm64/configs/msmcortex_mediabox_defconfig
index 8c1bce2e4f12..03e43dc0b7c0 100644
--- a/arch/arm64/configs/msmcortex_mediabox_defconfig
+++ b/arch/arm64/configs/msmcortex_mediabox_defconfig
@@ -233,6 +233,7 @@ CONFIG_BTFM_SLIM=y
CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
CONFIG_CFG80211_INTERNAL_REGDB=y
# CONFIG_CFG80211_CRDA_SUPPORT is not set
CONFIG_CFG80211_WEXT=y
diff --git a/arch/arm64/configs/sdm660-perf_defconfig b/arch/arm64/configs/sdm660-perf_defconfig
index 81cf6eb30e90..1a832addca83 100644
--- a/arch/arm64/configs/sdm660-perf_defconfig
+++ b/arch/arm64/configs/sdm660-perf_defconfig
@@ -232,6 +232,8 @@ CONFIG_MSM_BT_POWER=y
CONFIG_BTFM_SLIM=y
CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
CONFIG_CFG80211_INTERNAL_REGDB=y
CONFIG_RFKILL=y
CONFIG_NFC_NQ=y
diff --git a/arch/arm64/configs/sdm660_defconfig b/arch/arm64/configs/sdm660_defconfig
index bd7ec3ab3a94..1614bb44106b 100644
--- a/arch/arm64/configs/sdm660_defconfig
+++ b/arch/arm64/configs/sdm660_defconfig
@@ -233,6 +233,8 @@ CONFIG_MSM_BT_POWER=y
CONFIG_BTFM_SLIM=y
CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
CONFIG_CFG80211_INTERNAL_REGDB=y
# CONFIG_CFG80211_CRDA_SUPPORT is not set
CONFIG_RFKILL=y
@@ -695,6 +697,7 @@ CONFIG_IPC_LOGGING=y
CONFIG_QCOM_RTB=y
CONFIG_QCOM_RTB_SEPARATE_CPUS=y
CONFIG_FUNCTION_TRACER=y
+CONFIG_PREEMPTIRQ_EVENTS=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 85c4a8981d47..f32b42e8725d 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -48,16 +48,16 @@ do { \
} while (0)
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
+ int oparg = (int)(encoded_op << 8) >> 20;
+ int cmparg = (int)(encoded_op << 20) >> 20;
int oldval = 0, ret, tmp;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
+ oparg = 1U << (oparg & 0x1f);
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index c97ce91cf023..c39872a7b03c 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -256,7 +256,6 @@ monotonic_raw:
seqcnt_check fail=monotonic_raw
/* All computations are done with left-shifted nsecs. */
- lsl x14, x14, x12
get_nsec_per_sec res=x9
lsl x9, x9, x12
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index daba1f9a4f79..174aedce3167 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -40,7 +40,8 @@ typedef union mips_instruction kprobe_opcode_t;
#define flush_insn_slot(p) \
do { \
- flush_icache_range((unsigned long)p->addr, \
+ if (p->addr) \
+ flush_icache_range((unsigned long)p->addr, \
(unsigned long)p->addr + \
(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \
} while (0)
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 832e2167d00f..ef7c02af7522 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -18,6 +18,10 @@
#include <asm-generic/pgtable-nopmd.h>
+#ifdef CONFIG_HIGHMEM
+#include <asm/highmem.h>
+#endif
+
extern int temp_tlb_entry;
/*
@@ -61,7 +65,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
#define VMALLOC_START MAP_BASE
-#define PKMAP_BASE (0xfe000000UL)
+#define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
+#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index b6e20f3053f4..a827655c052c 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -1238,6 +1238,13 @@ __clear_user(void __user *addr, __kernel_size_t size)
{
__kernel_size_t res;
+#ifdef CONFIG_CPU_MICROMIPS
+/* micromips memset / bzero also clobbers t7 & t8 */
+#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
+#else
+#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
+#endif /* CONFIG_CPU_MICROMIPS */
+
if (eva_kernel_access()) {
__asm__ __volatile__(
"move\t$4, %1\n\t"
@@ -1247,7 +1254,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
"move\t%0, $6"
: "=r" (res)
: "r" (addr), "r" (size)
- : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
+ : bzero_clobbers);
} else {
might_fault();
__asm__ __volatile__(
@@ -1258,7 +1265,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
"move\t%0, $6"
: "=r" (res)
: "r" (addr), "r" (size)
- : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
+ : bzero_clobbers);
}
return res;
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 8f0019a2e5c8..2d33cf2185d9 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -218,7 +218,7 @@
1: PTR_ADDIU a0, 1 /* fill bytewise */
R10KCBARRIER(0(ra))
bne t1, a0, 1b
- sb a1, -1(a0)
+ EX(sb, a1, -1(a0), .Lsmall_fixup\@)
2: jr ra /* done */
move a2, zero
@@ -249,13 +249,18 @@
PTR_L t0, TI_TASK($28)
andi a2, STORMASK
LONG_L t0, THREAD_BUADDR(t0)
- LONG_ADDU a2, t1
+ LONG_ADDU a2, a0
jr ra
LONG_SUBU a2, t0
.Llast_fixup\@:
jr ra
- andi v1, a2, STORMASK
+ nop
+
+.Lsmall_fixup\@:
+ PTR_SUBU a2, t1, a0
+ jr ra
+ PTR_ADDIU a2, 1
.endm
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index adc6911ba748..b19a3c506b1e 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -51,15 +51,15 @@ void __init pagetable_init(void)
/*
* Fixed mappings:
*/
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
+ fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
#ifdef CONFIG_HIGHMEM
/*
* Permanent kmaps:
*/
vaddr = PKMAP_BASE;
- fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+ fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
pgd = swapper_pg_dir + __pgd_offset(vaddr);
pud = pud_offset(pgd, vaddr);
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index dba508fe1683..4f7060ec6875 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -648,6 +648,10 @@ static int match_pci_device(struct device *dev, int index,
(modpath->mod == PCI_FUNC(devfn)));
}
+ /* index might be out of bounds for bc[] */
+ if (index >= 6)
+ return 0;
+
id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
return (modpath->bc[index] == id);
}
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 0eca6efc0631..b9e16855a037 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -36,7 +36,8 @@
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
-#ifdef __SUBARCH_HAS_LWSYNC
+/* The sub-arch has lwsync */
+#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
# define SMPWMB LWSYNC
#else
# define SMPWMB eieio
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 07a99e638449..bab3461115bb 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -21,6 +21,9 @@
/* We calculate number of sg entries based on PAGE_SIZE */
#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
+/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */
+#define OPAL_BUSY_DELAY_MS 10
+
/* /sys/firmware/opal */
extern struct kobject *opal_kobj;
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 3140c19c448c..70b379ee6b7e 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -132,7 +132,19 @@ extern long long virt_phys_offset;
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * On hash the vmalloc and other regions alias to the kernel region when passed
+ * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
+ * return true for some vmalloc addresses, which is incorrect. So explicitly
+ * check that the address is in the kernel region.
+ */
+#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
+ pfn_valid(virt_to_pfn(kaddr)))
+#else
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
+#endif
/*
* On Book-E parts we need __va to parse the device tree and we can't
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index c50868681f9e..e8d6a842f4bb 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -5,10 +5,6 @@
#include <linux/stringify.h>
#include <asm/feature-fixups.h>
-#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
-#define __SUBARCH_HAS_LWSYNC
-#endif
-
#ifndef __ASSEMBLY__
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 98f81800e00c..304f07cfa262 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -788,7 +788,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
/* PCI Command: 0x4 */
- eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
+ eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
+ PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
/* Check the PCIe link is ready */
eeh_bridge_check_link(edev);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 1be1092c7204..9baba9576e99 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -686,12 +686,20 @@ static int __init get_freq(char *name, int cells, unsigned long *val)
static void start_cpu_decrementer(void)
{
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+ unsigned int tcr;
+
/* Clear any pending timer interrupts */
mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
- /* Enable decrementer interrupt */
- mtspr(SPRN_TCR, TCR_DIE);
-#endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
+ tcr = mfspr(SPRN_TCR);
+ /*
+ * The watchdog may have already been enabled by u-boot. So leave
+ * TRC[WP] (Watchdog Period) alone.
+ */
+ tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */
+ tcr |= TCR_DIE; /* Enable decrementer */
+ mtspr(SPRN_TCR, tcr);
+#endif
}
void __init generic_calibrate_decr(void)
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index f2c75a1e0536..0d91baf63fed 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -50,7 +50,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
pteg_addr = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
- copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
+ ret = H_FUNCTION;
+ if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)))
+ goto done;
hpte = pteg;
ret = H_PTEG_FULL;
@@ -71,7 +73,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
pteg_addr += i * HPTE_SIZE;
- copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
+ ret = H_FUNCTION;
+ if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE))
+ goto done;
kvmppc_set_gpr(vcpu, 4, pte_index | i);
ret = H_SUCCESS;
@@ -93,7 +97,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
- copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ ret = H_FUNCTION;
+ if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
+ goto done;
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);
@@ -103,7 +109,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
goto done;
- copy_to_user((void __user *)pteg, &v, sizeof(v));
+ ret = H_FUNCTION;
+ if (copy_to_user((void __user *)pteg, &v, sizeof(v)))
+ goto done;
rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
@@ -171,7 +179,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
}
pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
- copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) {
+ ret = H_FUNCTION;
+ break;
+ }
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);
@@ -184,7 +195,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
tsh |= H_BULK_REMOVE_NOT_FOUND;
} else {
/* Splat the pteg in (userland) hpt */
- copy_to_user((void __user *)pteg, &v, sizeof(v));
+ if (copy_to_user((void __user *)pteg, &v, sizeof(v))) {
+ ret = H_FUNCTION;
+ break;
+ }
rb = compute_tlbie_rb(pte[0], pte[1],
tsh & H_BULK_REMOVE_PTEX);
@@ -211,7 +225,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
- copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ ret = H_FUNCTION;
+ if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
+ goto done;
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);
@@ -234,7 +250,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
pte[0] = (__force u64)cpu_to_be64(pte[0]);
pte[1] = (__force u64)cpu_to_be64(pte[1]);
- copy_to_user((void __user *)pteg, pte, sizeof(pte));
+ ret = H_FUNCTION;
+ if (copy_to_user((void __user *)pteg, pte, sizeof(pte)))
+ goto done;
ret = H_SUCCESS;
done:
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index a18d648d31a6..3af014684872 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -53,7 +53,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
unsigned int *target = (unsigned int *)branch_target(src);
/* Branch within the section doesn't need translating */
- if (target < alt_start || target >= alt_end) {
+ if (target < alt_start || target > alt_end) {
instr = translate_branch(dest, src);
if (!instr)
return 1;
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index be6212ddbf06..7e42e3ec2142 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -174,6 +174,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
if (!dump_skip(cprm,
roundup(cprm->written - total + sz, 4) - cprm->written))
goto Eio;
+
+ rc = 0;
out:
free_page((unsigned long)buf);
return rc;
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
index 9db4398ded5d..1bceb95f422d 100644
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -11,6 +11,7 @@
#define DEBUG
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of.h>
@@ -56,9 +57,17 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_write_nvram(__pa(buf), count, off);
- if (rc == OPAL_BUSY_EVENT)
+ if (rc == OPAL_BUSY_EVENT) {
+ msleep(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
+ } else if (rc == OPAL_BUSY) {
+ msleep(OPAL_BUSY_DELAY_MS);
+ }
}
+
+ if (rc)
+ return -EIO;
+
*index += count;
return count;
}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 0ba746d8912c..e92a684e855d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -111,6 +111,7 @@ config S390
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_DEVICES if !SMP
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_FIND_FIRST_BIT
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
@@ -706,6 +707,51 @@ config SECCOMP
If unsure, say Y.
+config KERNEL_NOBP
+ def_bool n
+ prompt "Enable modified branch prediction for the kernel by default"
+ help
+ If this option is selected the kernel will switch to a modified
+ branch prediction mode if the firmware interface is available.
+ The modified branch prediction mode improves the behaviour in
+ regard to speculative execution.
+
+ With the option enabled the kernel parameter "nobp=0" or "nospec"
+ can be used to run the kernel in the normal branch prediction mode.
+
+ With the option disabled the modified branch prediction mode is
+ enabled with the "nobp=1" kernel parameter.
+
+ If unsure, say N.
+
+config EXPOLINE
+ def_bool n
+ prompt "Avoid speculative indirect branches in the kernel"
+ help
+ Compile the kernel with the expoline compiler options to guard
+ against kernel-to-user data leaks by avoiding speculative indirect
+ branches.
+ Requires a compiler with -mindirect-branch=thunk support for full
+ protection. The kernel may run slower.
+
+ If unsure, say N.
+
+choice
+ prompt "Expoline default"
+ depends on EXPOLINE
+ default EXPOLINE_FULL
+
+config EXPOLINE_OFF
+ bool "spectre_v2=off"
+
+config EXPOLINE_AUTO
+ bool "spectre_v2=auto"
+
+config EXPOLINE_FULL
+ bool "spectre_v2=on"
+
+endchoice
+
endmenu
menu "Power Management"
@@ -755,6 +801,7 @@ config PFAULT
config SHARED_KERNEL
bool "VM shared kernel support"
depends on !JUMP_LABEL
+ depends on !ALTERNATIVES
help
Select this option, if you want to share the text segment of the
Linux kernel between different VM guests. This reduces memory
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index e8d4423e4f85..d924f9b6dc73 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -77,6 +77,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
endif
+ifdef CONFIG_EXPOLINE
+ ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y)
+ CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
+ CC_FLAGS_EXPOLINE += -mfunction-return=thunk
+ CC_FLAGS_EXPOLINE += -mindirect-branch-table
+ export CC_FLAGS_EXPOLINE
+ cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
+ endif
+endif
+
ifdef CONFIG_FUNCTION_TRACER
# make use of hotpatch feature if the compiler supports it
cc_hotpatch := -mhotpatch=0,3
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index b2e5902bd8f4..c670279b33f0 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -318,7 +318,7 @@ static void hypfs_kill_super(struct super_block *sb)
if (sb->s_root)
hypfs_delete_tree(sb->s_root);
- if (sb_info->update_file)
+ if (sb_info && sb_info->update_file)
hypfs_remove(sb_info->update_file);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
new file mode 100644
index 000000000000..a72002056b54
--- /dev/null
+++ b/arch/s390/include/asm/alternative.h
@@ -0,0 +1,149 @@
+#ifndef _ASM_S390_ALTERNATIVE_H
+#define _ASM_S390_ALTERNATIVE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+struct alt_instr {
+ s32 instr_offset; /* original instruction */
+ s32 repl_offset; /* offset to replacement instruction */
+ u16 facility; /* facility bit set for replacement */
+ u8 instrlen; /* length of original instruction */
+ u8 replacementlen; /* length of new instruction */
+} __packed;
+
+void apply_alternative_instructions(void);
+void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+
+/*
+ * |661: |662: |6620 |663:
+ * +-----------+---------------------+
+ * | oldinstr | oldinstr_padding |
+ * | +----------+----------+
+ * | | | |
+ * | | >6 bytes |6/4/2 nops|
+ * | |6 bytes jg----------->
+ * +-----------+---------------------+
+ * ^^ static padding ^^
+ *
+ * .altinstr_replacement section
+ * +---------------------+-----------+
+ * |6641: |6651:
+ * | alternative instr 1 |
+ * +-----------+---------+- - - - - -+
+ * |6642: |6652: |
+ * | alternative instr 2 | padding
+ * +---------------------+- - - - - -+
+ * ^ runtime ^
+ *
+ * .altinstructions section
+ * +---------------------------------+
+ * | alt_instr entries for each |
+ * | alternative instr |
+ * +---------------------------------+
+ */
+
+#define b_altinstr(num) "664"#num
+#define e_altinstr(num) "665"#num
+
+#define e_oldinstr_pad_end "663"
+#define oldinstr_len "662b-661b"
+#define oldinstr_total_len e_oldinstr_pad_end"b-661b"
+#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b"
+#define oldinstr_pad_len(num) \
+ "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
+ "((" altinstr_len(num) ")-(" oldinstr_len "))"
+
+#define INSTR_LEN_SANITY_CHECK(len) \
+ ".if " len " > 254\n" \
+ "\t.error \"cpu alternatives does not support instructions " \
+ "blocks > 254 bytes\"\n" \
+ ".endif\n" \
+ ".if (" len ") %% 2\n" \
+ "\t.error \"cpu alternatives instructions length is odd\"\n" \
+ ".endif\n"
+
+#define OLDINSTR_PADDING(oldinstr, num) \
+ ".if " oldinstr_pad_len(num) " > 6\n" \
+ "\tjg " e_oldinstr_pad_end "f\n" \
+ "6620:\n" \
+ "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
+ ".else\n" \
+ "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \
+ "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \
+ "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \
+ ".endif\n"
+
+#define OLDINSTR(oldinstr, num) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ OLDINSTR_PADDING(oldinstr, num) \
+ e_oldinstr_pad_end ":\n" \
+ INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define OLDINSTR_2(oldinstr, num1, num2) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \
+ OLDINSTR_PADDING(oldinstr, num2) \
+ ".else\n" \
+ OLDINSTR_PADDING(oldinstr, num1) \
+ ".endif\n" \
+ e_oldinstr_pad_end ":\n" \
+ INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define ALTINSTR_ENTRY(facility, num) \
+ "\t.long 661b - .\n" /* old instruction */ \
+ "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
+ "\t.word " __stringify(facility) "\n" /* facility bit */ \
+ "\t.byte " oldinstr_total_len "\n" /* source len */ \
+ "\t.byte " altinstr_len(num) "\n" /* alt instruction len */
+
+#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \
+ b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
+ INSTR_LEN_SANITY_CHECK(altinstr_len(num))
+
+/* alternative assembly primitive: */
+#define ALTERNATIVE(oldinstr, altinstr, facility) \
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(altinstr, 1) \
+ ".popsection\n" \
+ OLDINSTR(oldinstr, 1) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(facility, 1) \
+ ".popsection\n"
+
+#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(altinstr1, 1) \
+ ALTINSTR_REPLACEMENT(altinstr2, 2) \
+ ".popsection\n" \
+ OLDINSTR_2(oldinstr, 1, 2) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(facility1, 1) \
+ ALTINSTR_ENTRY(facility2, 2) \
+ ".popsection\n"
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * oldinstr is padded with jump and nops at compile time if altinstr is
+ * longer. altinstr is padded with jump and nops at run-time during patching.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, altinstr, facility) \
+ asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
+
+#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
+ asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
+ altinstr2, facility2) ::: "memory")
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_ALTERNATIVE_H */
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index d68e11e0df5e..e903b28e7358 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -53,4 +53,28 @@ do { \
___p1; \
})
+/**
+ * array_index_mask_nospec - generate a mask for array_idx() that is
+ * ~0UL when the bounds check succeeds and 0 otherwise
+ * @index: array element index
+ * @size: number of elements in array
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long index,
+ unsigned long size)
+{
+ unsigned long mask;
+
+ if (__builtin_constant_p(size) && size > 0) {
+ asm(" clgr %2,%1\n"
+ " slbgr %0,%0\n"
+ :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
+ return mask;
+ }
+ asm(" clgr %1,%2\n"
+ " slbgr %0,%0\n"
+ :"=d" (mask) : "d" (size), "d" (index) :"cc");
+ return ~mask;
+}
+
#endif /* __ASM_BARRIER_H */
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 0aa6a7ed95a3..155fcc7bcba6 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -13,6 +13,24 @@
#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
+static inline void __set_facility(unsigned long nr, void *facilities)
+{
+ unsigned char *ptr = (unsigned char *) facilities;
+
+ if (nr >= MAX_FACILITY_BIT)
+ return;
+ ptr[nr >> 3] |= 0x80 >> (nr & 7);
+}
+
+static inline void __clear_facility(unsigned long nr, void *facilities)
+{
+ unsigned char *ptr = (unsigned char *) facilities;
+
+ if (nr >= MAX_FACILITY_BIT)
+ return;
+ ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
+}
+
static inline int __test_facility(unsigned long nr, void *facilities)
{
unsigned char *ptr;
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index e9a983f40a24..7d9c5917da2b 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -136,7 +136,8 @@ struct kvm_s390_sie_block {
__u16 ipa; /* 0x0056 */
__u32 ipb; /* 0x0058 */
__u32 scaoh; /* 0x005c */
- __u8 reserved60; /* 0x0060 */
+#define FPF_BPBC 0x20
+ __u8 fpf; /* 0x0060 */
__u8 ecb; /* 0x0061 */
__u8 ecb2; /* 0x0062 */
#define ECB3_AES 0x04
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index afe1cfebf1a4..8520c23e419b 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -155,7 +155,9 @@ struct _lowcore {
/* Per cpu primary space access list */
__u32 paste[16]; /* 0x0400 */
- __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */
+ /* br %r1 trampoline */
+ __u16 br_r1_trampoline; /* 0x0440 */
+ __u8 pad_0x0442[0x0e00-0x0442]; /* 0x0442 */
/*
* 0xe00 contains the address of the IPL Parameter Information
@@ -170,7 +172,8 @@ struct _lowcore {
__u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
/* Extended facility list */
- __u64 stfle_fac_list[32]; /* 0x0f00 */
+ __u64 stfle_fac_list[16]; /* 0x0f00 */
+ __u64 alt_stfle_fac_list[16]; /* 0x0f80 */
__u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
/* Pointer to vector register save area */
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
new file mode 100644
index 000000000000..b4bd8c41e9d3
--- /dev/null
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_EXPOLINE_H
+#define _ASM_S390_EXPOLINE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+extern int nospec_disable;
+
+void nospec_init_branches(void);
+void nospec_auto_detect(void);
+void nospec_revert(s32 *start, s32 *end);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_EXPOLINE_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index c61ed7890cef..f915a0f1b0fc 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -69,6 +69,7 @@ extern void s390_adjust_jiffies(void);
extern const struct seq_operations cpuinfo_op;
extern int sysctl_ieee_emulation_warnings;
extern void execve_tail(void);
+extern void __bpon(void);
/*
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
@@ -315,6 +316,9 @@ extern void memcpy_absolute(void *, void *, size_t);
memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
}
+extern int s390_isolate_bp(void);
+extern int s390_isolate_bp_guest(void);
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 692b9247c019..b2504163c8fa 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -78,6 +78,8 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_SECCOMP 5 /* secure computing */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define TIF_UPROBE 7 /* breakpointed or single-stepping */
+#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
+#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
#define TIF_31BIT 16 /* 32bit process */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
@@ -93,6 +95,8 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_SECCOMP _BITUL(TIF_SECCOMP)
#define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT)
#define _TIF_UPROBE _BITUL(TIF_UPROBE)
+#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP)
+#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST)
#define _TIF_31BIT _BITUL(TIF_31BIT)
#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index ef1a5fcc6c66..beb508a9e72c 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -151,6 +151,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_ARCH0 (1UL << 4)
#define KVM_SYNC_PFAULT (1UL << 5)
#define KVM_SYNC_VRS (1UL << 6)
+#define KVM_SYNC_BPBC (1UL << 10)
/* definition of registers in kvm_run */
struct kvm_sync_regs {
__u64 prefix; /* prefix register */
@@ -168,6 +169,8 @@ struct kvm_sync_regs {
__u64 vrs[32][2]; /* vector registers */
__u8 reserved[512]; /* for future vector expansion */
__u32 fpc; /* only valid with vector registers */
+ __u8 bpbc : 1; /* bp mode */
+ __u8 reserved2 : 7;
};
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index dc167a23b920..8ccfbf22ecbb 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -44,10 +44,13 @@ obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o dumpstack.o
-obj-y += entry.o reipl.o relocate_kernel.o
+obj-y += entry.o reipl.o relocate_kernel.o alternative.o
+obj-y += nospec-branch.o
extra-y += head.o head64.o vmlinux.lds
+CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
+
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCHED_BOOK) += topology.o
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
new file mode 100644
index 000000000000..b57b293998dc
--- /dev/null
+++ b/arch/s390/kernel/alternative.c
@@ -0,0 +1,112 @@
+#include <linux/module.h>
+#include <asm/alternative.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+#define MAX_PATCH_LEN (255 - 1)
+
+static int __initdata_or_module alt_instr_disabled;
+
+static int __init disable_alternative_instructions(char *str)
+{
+ alt_instr_disabled = 1;
+ return 0;
+}
+
+early_param("noaltinstr", disable_alternative_instructions);
+
+struct brcl_insn {
+ u16 opc;
+ s32 disp;
+} __packed;
+
+static u16 __initdata_or_module nop16 = 0x0700;
+static u32 __initdata_or_module nop32 = 0x47000000;
+static struct brcl_insn __initdata_or_module nop48 = {
+ 0xc004, 0
+};
+
+static const void *nops[] __initdata_or_module = {
+ &nop16,
+ &nop32,
+ &nop48
+};
+
+static void __init_or_module add_jump_padding(void *insns, unsigned int len)
+{
+ struct brcl_insn brcl = {
+ 0xc0f4,
+ len / 2
+ };
+
+ memcpy(insns, &brcl, sizeof(brcl));
+ insns += sizeof(brcl);
+ len -= sizeof(brcl);
+
+ while (len > 0) {
+ memcpy(insns, &nop16, 2);
+ insns += 2;
+ len -= 2;
+ }
+}
+
+static void __init_or_module add_padding(void *insns, unsigned int len)
+{
+ if (len > 6)
+ add_jump_padding(insns, len);
+ else if (len >= 2)
+ memcpy(insns, nops[len / 2 - 1], len);
+}
+
+static void __init_or_module __apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end)
+{
+ struct alt_instr *a;
+ u8 *instr, *replacement;
+ u8 insnbuf[MAX_PATCH_LEN];
+
+ /*
+ * The scan order should be from start to end. A later scanned
+ * alternative code can overwrite previously scanned alternative code.
+ */
+ for (a = start; a < end; a++) {
+ int insnbuf_sz = 0;
+
+ instr = (u8 *)&a->instr_offset + a->instr_offset;
+ replacement = (u8 *)&a->repl_offset + a->repl_offset;
+
+ if (!__test_facility(a->facility,
+ S390_lowcore.alt_stfle_fac_list))
+ continue;
+
+ if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
+ WARN_ONCE(1, "cpu alternatives instructions length is "
+ "odd, skipping patching\n");
+ continue;
+ }
+
+ memcpy(insnbuf, replacement, a->replacementlen);
+ insnbuf_sz = a->replacementlen;
+
+ if (a->instrlen > a->replacementlen) {
+ add_padding(insnbuf + a->replacementlen,
+ a->instrlen - a->replacementlen);
+ insnbuf_sz += a->instrlen - a->replacementlen;
+ }
+
+ s390_kernel_write(instr, insnbuf, insnbuf_sz);
+ }
+}
+
+void __init_or_module apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end)
+{
+ if (!alt_instr_disabled)
+ __apply_alternatives(start, end);
+}
+
+extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+void __init apply_alternative_instructions(void)
+{
+ apply_alternatives(__alt_instructions, __alt_instructions_end);
+}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index ee7b8e7ca4f8..8eccead675d4 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -279,6 +279,11 @@ static noinline __init void setup_facility_list(void)
{
stfle(S390_lowcore.stfle_fac_list,
ARRAY_SIZE(S390_lowcore.stfle_fac_list));
+ memcpy(S390_lowcore.alt_stfle_fac_list,
+ S390_lowcore.stfle_fac_list,
+ sizeof(S390_lowcore.alt_stfle_fac_list));
+ if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
}
static __init void detect_diag9c(void)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 4612ed7ec2e5..c63730326215 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -104,6 +104,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
j 3f
1: LAST_BREAK %r14
UPDATE_VTIME %r14,%r15,\timer
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
2: lg %r15,__LC_ASYNC_STACK # load async stack
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
@@ -162,8 +163,137 @@ _PIF_WORK = (_PIF_PER_TRAP)
tm off+\addr, \mask
.endm
+ .macro BPOFF
+ .pushsection .altinstr_replacement, "ax"
+660: .long 0xb2e8c000
+ .popsection
+661: .long 0x47000000
+ .pushsection .altinstructions, "a"
+ .long 661b - .
+ .long 660b - .
+ .word 82
+ .byte 4
+ .byte 4
+ .popsection
+ .endm
+
+ .macro BPON
+ .pushsection .altinstr_replacement, "ax"
+662: .long 0xb2e8d000
+ .popsection
+663: .long 0x47000000
+ .pushsection .altinstructions, "a"
+ .long 663b - .
+ .long 662b - .
+ .word 82
+ .byte 4
+ .byte 4
+ .popsection
+ .endm
+
+ .macro BPENTER tif_ptr,tif_mask
+ .pushsection .altinstr_replacement, "ax"
+662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop
+ .word 0xc004, 0x0000, 0x0000 # 6 byte nop
+ .popsection
+664: TSTMSK \tif_ptr,\tif_mask
+ jz . + 8
+ .long 0xb2e8d000
+ .pushsection .altinstructions, "a"
+ .long 664b - .
+ .long 662b - .
+ .word 82
+ .byte 12
+ .byte 12
+ .popsection
+ .endm
+
+ .macro BPEXIT tif_ptr,tif_mask
+ TSTMSK \tif_ptr,\tif_mask
+ .pushsection .altinstr_replacement, "ax"
+662: jnz . + 8
+ .long 0xb2e8d000
+ .popsection
+664: jz . + 8
+ .long 0xb2e8c000
+ .pushsection .altinstructions, "a"
+ .long 664b - .
+ .long 662b - .
+ .word 82
+ .byte 8
+ .byte 8
+ .popsection
+ .endm
+
+#ifdef CONFIG_EXPOLINE
+
+ .macro GEN_BR_THUNK name,reg,tmp
+ .section .text.\name,"axG",@progbits,\name,comdat
+ .globl \name
+ .hidden \name
+ .type \name,@function
+\name:
+ .cfi_startproc
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+ exrl 0,0f
+#else
+ larl \tmp,0f
+ ex 0,0(\tmp)
+#endif
+ j .
+0: br \reg
+ .cfi_endproc
+ .endm
+
+ GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
+ GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
+ GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
+
+ .macro BASR_R14_R9
+0: brasl %r14,__s390x_indirect_jump_r1use_r9
+ .pushsection .s390_indirect_branches,"a",@progbits
+ .long 0b-.
+ .popsection
+ .endm
+
+ .macro BR_R1USE_R14
+0: jg __s390x_indirect_jump_r1use_r14
+ .pushsection .s390_indirect_branches,"a",@progbits
+ .long 0b-.
+ .popsection
+ .endm
+
+ .macro BR_R11USE_R14
+0: jg __s390x_indirect_jump_r11use_r14
+ .pushsection .s390_indirect_branches,"a",@progbits
+ .long 0b-.
+ .popsection
+ .endm
+
+#else /* CONFIG_EXPOLINE */
+
+ .macro BASR_R14_R9
+ basr %r14,%r9
+ .endm
+
+ .macro BR_R1USE_R14
+ br %r14
+ .endm
+
+ .macro BR_R11USE_R14
+ br %r14
+ .endm
+
+#endif /* CONFIG_EXPOLINE */
+
+
.section .kprobes.text, "ax"
+ENTRY(__bpon)
+ .globl __bpon
+ BPON
+ BR_R1USE_R14
+
/*
* Scheduler resume function, called by switch_to
* gpr2 = (task_struct *) prev
@@ -190,9 +320,9 @@ ENTRY(__switch_to)
mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
- bzr %r14
+ jz 0f
.insn s,0xb2800000,__LC_LPP # set program parameter
- br %r14
+0: BR_R1USE_R14
.L__critical_start:
@@ -204,9 +334,11 @@ ENTRY(__switch_to)
*/
ENTRY(sie64a)
stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
+ lg %r12,__LC_CURRENT
stg %r2,__SF_EMPTY(%r15) # save control block pointer
stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
+ mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
jno .Lsie_load_guest_gprs
brasl %r14,load_fpu_regs # load guest fp/vx regs
@@ -223,7 +355,11 @@ ENTRY(sie64a)
jnz .Lsie_skip
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lsie_skip # exit if fp/vx regs changed
+ BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
sie 0(%r14)
+.Lsie_exit:
+ BPOFF
+ BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_skip:
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
@@ -244,9 +380,15 @@ ENTRY(sie64a)
sie_exit:
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ xgr %r0,%r0 # clear guest registers to
+ xgr %r1,%r1 # prevent speculative use
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
- br %r14
+ BR_R1USE_R14
.Lsie_fault:
lghi %r14,-EFAULT
stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
@@ -267,6 +409,7 @@ ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
.Lsysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ BPOFF
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
lghi %r14,_PIF_SYSCALL
@@ -276,12 +419,15 @@ ENTRY(system_call)
LAST_BREAK %r13
.Lsysc_vtime:
UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11)
.Lsysc_do_svc:
+ # clear user controlled register to prevent speculative use
+ xgr %r0,%r0
lg %r10,__TI_sysc_table(%r12) # address of system call table
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
@@ -299,7 +445,7 @@ ENTRY(system_call)
lgf %r9,0(%r8,%r10) # get system call add.
TSTMSK __TI_flags(%r12),_TIF_TRACE
jnz .Lsysc_tracesys
- basr %r14,%r9 # call sys_xxxx
+ BASR_R14_R9 # call sys_xxxx
stg %r2,__PT_R2(%r11) # store return value
.Lsysc_return:
@@ -311,6 +457,7 @@ ENTRY(system_call)
jnz .Lsysc_work # check for work
TSTMSK __LC_CPU_FLAGS,_CIF_WORK
jnz .Lsysc_work
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
.Lsysc_restore:
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
@@ -438,7 +585,7 @@ ENTRY(system_call)
lmg %r3,%r7,__PT_R3(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
lg %r2,__PT_ORIG_GPR2(%r11)
- basr %r14,%r9 # call sys_xxx
+ BASR_R14_R9 # call sys_xxx
stg %r2,__PT_R2(%r11) # store return value
.Lsysc_tracenogo:
TSTMSK __TI_flags(%r12),_TIF_TRACE
@@ -462,7 +609,7 @@ ENTRY(ret_from_fork)
lmg %r9,%r10,__PT_R9(%r11) # load gprs
ENTRY(kernel_thread_starter)
la %r2,0(%r10)
- basr %r14,%r9
+ BASR_R14_R9
j .Lsysc_tracenogo
/*
@@ -471,6 +618,7 @@ ENTRY(kernel_thread_starter)
ENTRY(pgm_check_handler)
stpt __LC_SYNC_ENTER_TIMER
+ BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
@@ -495,6 +643,7 @@ ENTRY(pgm_check_handler)
j 3f
2: LAST_BREAK %r14
UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
lg %r15,__LC_KERNEL_STACK
lg %r14,__TI_task(%r12)
aghi %r14,__TASK_thread # pointer to thread_struct
@@ -504,6 +653,15 @@ ENTRY(pgm_check_handler)
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
@@ -525,9 +683,9 @@ ENTRY(pgm_check_handler)
nill %r10,0x007f
sll %r10,2
je .Lpgm_return
- lgf %r1,0(%r10,%r1) # load address of handler routine
+ lgf %r9,0(%r10,%r1) # load address of handler routine
lgr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # branch to interrupt-handler
+ BASR_R14_R9 # branch to interrupt-handler
.Lpgm_return:
LOCKDEP_SYS_EXIT
tm __PT_PSW+1(%r11),0x01 # returning to user ?
@@ -560,6 +718,7 @@ ENTRY(pgm_check_handler)
ENTRY(io_int_handler)
STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
+ BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
@@ -567,6 +726,16 @@ ENTRY(io_int_handler)
lmg %r8,%r9,__LC_IO_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r10,%r10
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
@@ -601,9 +770,13 @@ ENTRY(io_int_handler)
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
+ jno .Lio_exit_kernel
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
.Lio_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+.Lio_exit_kernel:
lmg %r11,%r15,__PT_R11(%r11)
lpswe __LC_RETURN_PSW
.Lio_done:
@@ -735,6 +908,7 @@ ENTRY(io_int_handler)
ENTRY(ext_int_handler)
STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
+ BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
@@ -742,6 +916,16 @@ ENTRY(ext_int_handler)
lmg %r8,%r9,__LC_EXT_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r10,%r10
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
lghi %r1,__LC_EXT_PARAMS2
@@ -773,11 +957,12 @@ ENTRY(psw_idle)
.insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
.Lpsw_idle_stcctm:
#endif
+ BPON
STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
.Lpsw_idle_lpsw:
lpswe __SF_EMPTY(%r15)
- br %r14
+ BR_R1USE_R14
.Lpsw_idle_end:
/*
@@ -791,7 +976,7 @@ ENTRY(save_fpu_regs)
lg %r2,__LC_CURRENT
aghi %r2,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- bor %r14
+ jo .Lsave_fpu_regs_exit
stfpc __THREAD_FPU_fpc(%r2)
.Lsave_fpu_regs_fpc_end:
lg %r3,__THREAD_FPU_regs(%r2)
@@ -821,7 +1006,8 @@ ENTRY(save_fpu_regs)
std 15,120(%r3)
.Lsave_fpu_regs_done:
oi __LC_CPU_FLAGS+7,_CIF_FPU
- br %r14
+.Lsave_fpu_regs_exit:
+ BR_R1USE_R14
.Lsave_fpu_regs_end:
/*
@@ -838,7 +1024,7 @@ load_fpu_regs:
lg %r4,__LC_CURRENT
aghi %r4,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- bnor %r14
+ jno .Lload_fpu_regs_exit
lfpc __THREAD_FPU_fpc(%r4)
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
@@ -867,7 +1053,8 @@ load_fpu_regs:
ld 15,120(%r4)
.Lload_fpu_regs_done:
ni __LC_CPU_FLAGS+7,255-_CIF_FPU
- br %r14
+.Lload_fpu_regs_exit:
+ BR_R1USE_R14
.Lload_fpu_regs_end:
.L__critical_end:
@@ -877,6 +1064,7 @@ load_fpu_regs:
*/
ENTRY(mcck_int_handler)
STCK __LC_MCCK_CLOCK
+ BPOFF
la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
@@ -908,6 +1096,16 @@ ENTRY(mcck_int_handler)
.Lmcck_skip:
lghi %r14,__LC_GPREGS_SAVE_AREA+64
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r10,%r10
mvc __PT_R8(64,%r11),0(%r14)
stmg %r8,%r9,__PT_PSW(%r11)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
@@ -933,6 +1131,7 @@ ENTRY(mcck_int_handler)
mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
0: lmg %r11,%r15,__PT_R11(%r11)
@@ -1028,7 +1227,7 @@ cleanup_critical:
jl 0f
clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
jl .Lcleanup_load_fpu_regs
-0: br %r14
+0: BR_R11USE_R14
.align 8
.Lcleanup_table:
@@ -1053,11 +1252,12 @@ cleanup_critical:
.quad .Lsie_done
.Lcleanup_sie:
+ BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
lg %r9,__SF_EMPTY(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
- br %r14
+ BR_R11USE_R14
#endif
.Lcleanup_system_call:
@@ -1099,7 +1299,8 @@ cleanup_critical:
srag %r9,%r9,23
jz 0f
mvc __TI_last_break(8,%r12),16(%r11)
-0: # set up saved register r11
+0: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ # set up saved register r11
lg %r15,__LC_KERNEL_STACK
la %r9,STACK_FRAME_OVERHEAD(%r15)
stg %r9,24(%r11) # r11 pt_regs pointer
@@ -1114,7 +1315,7 @@ cleanup_critical:
stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
larl %r9,.Lsysc_do_svc
- br %r14
+ BR_R11USE_R14
.Lcleanup_system_call_insn:
.quad system_call
.quad .Lsysc_stmg
@@ -1124,7 +1325,7 @@ cleanup_critical:
.Lcleanup_sysc_tif:
larl %r9,.Lsysc_tif
- br %r14
+ BR_R11USE_R14
.Lcleanup_sysc_restore:
# check if stpt has been executed
@@ -1141,14 +1342,14 @@ cleanup_critical:
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
1: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
+ BR_R11USE_R14
.Lcleanup_sysc_restore_insn:
.quad .Lsysc_exit_timer
.quad .Lsysc_done - 4
.Lcleanup_io_tif:
larl %r9,.Lio_tif
- br %r14
+ BR_R11USE_R14
.Lcleanup_io_restore:
# check if stpt has been executed
@@ -1162,7 +1363,7 @@ cleanup_critical:
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
1: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
+ BR_R11USE_R14
.Lcleanup_io_restore_insn:
.quad .Lio_exit_timer
.quad .Lio_done - 4
@@ -1214,17 +1415,17 @@ cleanup_critical:
# prepare return psw
nihh %r8,0xfcfd # clear irq & wait state bits
lg %r9,48(%r11) # return from psw_idle
- br %r14
+ BR_R11USE_R14
.Lcleanup_idle_insn:
.quad .Lpsw_idle_lpsw
.Lcleanup_save_fpu_regs:
larl %r9,save_fpu_regs
- br %r14
+ BR_R11USE_R14
.Lcleanup_load_fpu_regs:
larl %r9,load_fpu_regs
- br %r14
+ BR_R11USE_R14
/*
* Integer constants
@@ -1240,7 +1441,6 @@ cleanup_critical:
.Lsie_critical_length:
.quad .Lsie_done - .Lsie_gmap
#endif
-
.section .rodata, "a"
#define SYSCALL(esame,emu) .long esame
.globl sys_call_table
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 42570d8fb265..837bb301023f 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -563,6 +563,7 @@ static struct kset *ipl_kset;
static void __ipl_run(void *unused)
{
+ __bpon();
diag308(DIAG308_IPL, NULL);
if (MACHINE_IS_VM)
__cpcmd("IPL", NULL, 0, NULL);
@@ -798,6 +799,7 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
/* copy and convert to ebcdic */
memcpy(ipb->hdr.loadparm, buf, lp_len);
ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
+ ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID;
return len;
}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 0c1a679314dd..9bd1933848b8 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -31,6 +31,9 @@
#include <linux/kernel.h>
#include <linux/moduleloader.h>
#include <linux/bug.h>
+#include <asm/alternative.h>
+#include <asm/nospec-branch.h>
+#include <asm/facility.h>
#if 0
#define DEBUGP printk
@@ -163,7 +166,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
me->arch.got_offset = me->core_size;
me->core_size += me->arch.got_size;
me->arch.plt_offset = me->core_size;
- me->core_size += me->arch.plt_size;
+ if (me->arch.plt_size) {
+ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
+ me->arch.plt_size += PLT_ENTRY_SIZE;
+ me->core_size += me->arch.plt_size;
+ }
return 0;
}
@@ -317,9 +324,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
unsigned int *ip;
ip = me->module_core + me->arch.plt_offset +
info->plt_offset;
- ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
- ip[1] = 0x100a0004;
- ip[2] = 0x07f10000;
+ ip[0] = 0x0d10e310; /* basr 1,0 */
+ ip[1] = 0x100a0004; /* lg 1,10(1) */
+ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
+ unsigned int *ij;
+ ij = me->module_core +
+ me->arch.plt_offset +
+ me->arch.plt_size - PLT_ENTRY_SIZE;
+ ip[2] = 0xa7f40000 + /* j __jump_r1 */
+ (unsigned int)(u16)
+ (((unsigned long) ij - 8 -
+ (unsigned long) ip) / 2);
+ } else {
+ ip[2] = 0x07f10000; /* br %r1 */
+ }
ip[3] = (unsigned int) (val >> 32);
ip[4] = (unsigned int) val;
info->plt_initialized = 1;
@@ -424,6 +442,45 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
+ const Elf_Shdr *s;
+ char *secstrings, *secname;
+ void *aseg;
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ !nospec_disable && me->arch.plt_size) {
+ unsigned int *ij;
+
+ ij = me->module_core + me->arch.plt_offset +
+ me->arch.plt_size - PLT_ENTRY_SIZE;
+ if (test_facility(35)) {
+ ij[0] = 0xc6000000; /* exrl %r0,.+10 */
+ ij[1] = 0x0005a7f4; /* j . */
+ ij[2] = 0x000007f1; /* br %r1 */
+ } else {
+ ij[0] = 0x44000000 | (unsigned int)
+ offsetof(struct _lowcore, br_r1_trampoline);
+ ij[1] = 0xa7f40000; /* j . */
+ }
+ }
+
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+ aseg = (void *) s->sh_addr;
+ secname = secstrings + s->sh_name;
+
+ if (!strcmp(".altinstructions", secname))
+ /* patch .altinstructions */
+ apply_alternatives(aseg, aseg + s->sh_size);
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ (!strncmp(".s390_indirect", secname, 14)))
+ nospec_revert(aseg, aseg + s->sh_size);
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ (!strncmp(".s390_return", secname, 12)))
+ nospec_revert(aseg, aseg + s->sh_size);
+ }
+
jump_label_apply_nops(me);
vfree(me->arch.syminfo);
me->arch.syminfo = NULL;
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
new file mode 100644
index 000000000000..9f3b5b382743
--- /dev/null
+++ b/arch/s390/kernel/nospec-branch.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/module.h>
+#include <linux/device.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+static int __init nobp_setup_early(char *str)
+{
+ bool enabled;
+ int rc;
+
+ rc = kstrtobool(str, &enabled);
+ if (rc)
+ return rc;
+ if (enabled && test_facility(82)) {
+ /*
+ * The user explicitely requested nobp=1, enable it and
+ * disable the expoline support.
+ */
+ __set_facility(82, S390_lowcore.alt_stfle_fac_list);
+ if (IS_ENABLED(CONFIG_EXPOLINE))
+ nospec_disable = 1;
+ } else {
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
+ return 0;
+}
+early_param("nobp", nobp_setup_early);
+
+static int __init nospec_setup_early(char *str)
+{
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ return 0;
+}
+early_param("nospec", nospec_setup_early);
+
+static int __init nospec_report(void)
+{
+ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+ pr_info("Spectre V2 mitigation: execute trampolines.\n");
+ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+ pr_info("Spectre V2 mitigation: limited branch prediction.\n");
+ return 0;
+}
+arch_initcall(nospec_report);
+
+#ifdef CONFIG_SYSFS
+ssize_t cpu_show_spectre_v1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+ return sprintf(buf, "Mitigation: execute trampolines\n");
+ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+ return sprintf(buf, "Mitigation: limited branch prediction.\n");
+ return sprintf(buf, "Vulnerable\n");
+}
+#endif
+
+#ifdef CONFIG_EXPOLINE
+
+int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
+
+static int __init nospectre_v2_setup_early(char *str)
+{
+ nospec_disable = 1;
+ return 0;
+}
+early_param("nospectre_v2", nospectre_v2_setup_early);
+
+void __init nospec_auto_detect(void)
+{
+ if (IS_ENABLED(CC_USING_EXPOLINE)) {
+ /*
+ * The kernel has been compiled with expolines.
+ * Keep expolines enabled and disable nobp.
+ */
+ nospec_disable = 0;
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
+ /*
+ * If the kernel has not been compiled with expolines the
+ * nobp setting decides what is done, this depends on the
+ * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
+ */
+}
+
+static int __init spectre_v2_setup_early(char *str)
+{
+ if (str && !strncmp(str, "on", 2)) {
+ nospec_disable = 0;
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
+ if (str && !strncmp(str, "off", 3))
+ nospec_disable = 1;
+ if (str && !strncmp(str, "auto", 4))
+ nospec_auto_detect();
+ return 0;
+}
+early_param("spectre_v2", spectre_v2_setup_early);
+
+static void __init_or_module __nospec_revert(s32 *start, s32 *end)
+{
+ enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
+ u8 *instr, *thunk, *br;
+ u8 insnbuf[6];
+ s32 *epo;
+
+ /* Second part of the instruction replace is always a nop */
+ memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
+ for (epo = start; epo < end; epo++) {
+ instr = (u8 *) epo + *epo;
+ if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
+ type = BRCL_EXPOLINE; /* brcl instruction */
+ else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
+ type = BRASL_EXPOLINE; /* brasl instruction */
+ else
+ continue;
+ thunk = instr + (*(int *)(instr + 2)) * 2;
+ if (thunk[0] == 0xc6 && thunk[1] == 0x00)
+ /* exrl %r0,<target-br> */
+ br = thunk + (*(int *)(thunk + 2)) * 2;
+ else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
+ thunk[6] == 0x44 && thunk[7] == 0x00 &&
+ (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
+ (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
+ /* larl %rx,<target br> + ex %r0,0(%rx) */
+ br = thunk + (*(int *)(thunk + 2)) * 2;
+ else
+ continue;
+ if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
+ continue;
+ switch (type) {
+ case BRCL_EXPOLINE:
+ /* brcl to thunk, replace with br + nop */
+ insnbuf[0] = br[0];
+ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+ break;
+ case BRASL_EXPOLINE:
+ /* brasl to thunk, replace with basr + nop */
+ insnbuf[0] = 0x0d;
+ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+ break;
+ }
+
+ s390_kernel_write(instr, insnbuf, 6);
+ }
+}
+
+void __init_or_module nospec_revert(s32 *start, s32 *end)
+{
+ if (nospec_disable)
+ __nospec_revert(start, end);
+}
+
+extern s32 __nospec_call_start[], __nospec_call_end[];
+extern s32 __nospec_return_start[], __nospec_return_end[];
+void __init nospec_init_branches(void)
+{
+ nospec_revert(__nospec_call_start, __nospec_call_end);
+ nospec_revert(__nospec_return_start, __nospec_return_end);
+}
+
+#endif /* CONFIG_EXPOLINE */
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 7ce00e7a709a..ab236bd970bb 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -13,6 +13,7 @@
#include <linux/cpu.h>
#include <asm/diag.h>
#include <asm/elf.h>
+#include <asm/facility.h>
#include <asm/lowcore.h>
#include <asm/param.h>
#include <asm/smp.h>
@@ -113,3 +114,20 @@ const struct seq_operations cpuinfo_op = {
.show = show_cpuinfo,
};
+int s390_isolate_bp(void)
+{
+ if (!test_facility(82))
+ return -EOPNOTSUPP;
+ set_thread_flag(TIF_ISOLATE_BP);
+ return 0;
+}
+EXPORT_SYMBOL(s390_isolate_bp);
+
+int s390_isolate_bp_guest(void)
+{
+ if (!test_facility(82))
+ return -EOPNOTSUPP;
+ set_thread_flag(TIF_ISOLATE_BP_GUEST);
+ return 0;
+}
+EXPORT_SYMBOL(s390_isolate_bp_guest);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index d097d71685df..e7a43a30e3ff 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -63,6 +63,8 @@
#include <asm/sclp.h>
#include <asm/sysinfo.h>
#include <asm/numa.h>
+#include <asm/alternative.h>
+#include <asm/nospec-branch.h>
#include "entry.h"
/*
@@ -333,7 +335,9 @@ static void __init setup_lowcore(void)
lc->machine_flags = S390_lowcore.machine_flags;
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
- MAX_FACILITY_BIT/8);
+ sizeof(lc->stfle_fac_list));
+ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
+ sizeof(lc->alt_stfle_fac_list));
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
@@ -370,6 +374,7 @@ static void __init setup_lowcore(void)
#ifdef CONFIG_SMP
lc->spinlock_lockval = arch_spin_lockval(0);
#endif
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
set_prefix((u32)(unsigned long) lc);
lowcore_ptr[0] = lc;
@@ -841,6 +846,9 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end;
+ if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
+ nospec_auto_detect();
+
parse_early_param();
os_info_init();
setup_ipl();
@@ -893,6 +901,10 @@ void __init setup_arch(char **cmdline_p)
conmode_default();
set_preferred_console();
+ apply_alternative_instructions();
+ if (IS_ENABLED(CONFIG_EXPOLINE))
+ nospec_init_branches();
+
/* Setup zfcpdump support */
setup_zfcpdump();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 9062df575afe..77f4f334a465 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -200,6 +200,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
@@ -250,7 +251,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
__ctl_store(lc->cregs_save_area, 0, 15);
save_access_regs((unsigned int *) lc->access_regs_save_area);
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
- MAX_FACILITY_BIT/8);
+ sizeof(lc->stfle_fac_list));
+ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
+ sizeof(lc->alt_stfle_fac_list));
}
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
@@ -299,6 +302,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
mem_assign_absolute(lc->restart_fn, (unsigned long) func);
mem_assign_absolute(lc->restart_data, (unsigned long) data);
mem_assign_absolute(lc->restart_source, source_cpu);
+ __bpon();
asm volatile(
"0: sigp 0,%0,%2 # sigp restart to target cpu\n"
" brc 2,0b # busy, try again\n"
@@ -888,6 +892,7 @@ void __cpu_die(unsigned int cpu)
void __noreturn cpu_die(void)
{
idle_task_exit();
+ __bpon();
pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
for (;;) ;
}
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index 66956c09d5bf..3d04dfdabc9f 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -147,6 +147,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
return orig;
}
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+ struct pt_regs *regs)
+{
+ if (ctx == RP_CHECK_CHAIN_CALL)
+ return user_stack_pointer(regs) <= ret->stack;
+ else
+ return user_stack_pointer(regs) < ret->stack;
+}
+
/* Instruction Emulation */
static void adjust_psw_addr(psw_t *psw, unsigned long len)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 0f41a8286378..fb98894a1361 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -21,8 +21,14 @@ SECTIONS
{
. = 0x00000000;
.text : {
- _text = .; /* Text and read-only data */
+ /* Text and read-only data */
HEAD_TEXT
+ /*
+ * E.g. perf doesn't like symbols starting at address zero,
+ * therefore skip the initial PSW and channel program located
+ * at address zero and let _text start at 0x200.
+ */
+ _text = 0x200;
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
@@ -73,6 +79,43 @@ SECTIONS
EXIT_DATA
}
+ /*
+ * struct alt_inst entries. From the header (alternative.h):
+ * "Alternative instructions for different CPU types or capabilities"
+ * Think locking instructions on spinlocks.
+ * Note, that it is a part of __init region.
+ */
+ . = ALIGN(8);
+ .altinstructions : {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
+
+ /*
+ * And here are the replacement instructions. The linker sticks
+ * them as binary blobs. The .altinstructions has enough data to
+ * get the address and the length of them to patch the kernel safely.
+ * Note, that it is a part of __init region.
+ */
+ .altinstr_replacement : {
+ *(.altinstr_replacement)
+ }
+
+ /*
+ * Table with the patch locations to undo expolines
+ */
+ .nospec_call_table : {
+ __nospec_call_start = . ;
+ *(.s390_indirect*)
+ __nospec_call_end = . ;
+ }
+ .nospec_return_table : {
+ __nospec_return_start = . ;
+ *(.s390_return*)
+ __nospec_return_end = . ;
+ }
+
/* early.c uses stsi, which requires page aligned data. */
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 23e3f5d77a24..b011140e6b06 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -257,6 +257,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_VECTOR_REGISTERS:
r = MACHINE_HAS_VX;
break;
+ case KVM_CAP_S390_BPB:
+ r = test_facility(82);
+ break;
default:
r = 0;
}
@@ -1264,6 +1267,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
KVM_SYNC_PFAULT;
if (test_kvm_facility(vcpu->kvm, 129))
vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
+ if (test_kvm_facility(vcpu->kvm, 82))
+ vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
if (kvm_is_ucontrol(vcpu->kvm))
return __kvm_ucontrol_vcpu_init(vcpu);
@@ -1327,6 +1332,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
current->thread.fpu.fpc = 0;
vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0;
+ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
kvm_clear_async_pf_completion_queue(vcpu);
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
@@ -2145,6 +2151,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
kvm_clear_async_pf_completion_queue(vcpu);
}
+ if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
+ test_kvm_facility(vcpu->kvm, 82)) {
+ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+ vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
+ }
kvm_run->kvm_dirty_regs = 0;
}
@@ -2162,6 +2173,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_run->s.regs.pft = vcpu->arch.pfault_token;
kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
+ kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 59d503866431..9cc600b2d68c 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1733,9 +1733,14 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
lp->rcv_nxt = p->seqid;
+ /*
+ * If this is a control-only packet, there is nothing
+ * else to do but advance the rx queue since the packet
+ * was already processed above.
+ */
if (!(p->type & LDC_DATA)) {
new = rx_advance(lp, new);
- goto no_data;
+ break;
}
if (p->stype & (LDC_ACK | LDC_NACK)) {
err = data_ack_nack(lp, p);
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index c211153ca69a..56648f4f8b41 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -140,7 +140,7 @@ static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
static void hard_handler(int sig, siginfo_t *si, void *p)
{
- struct ucontext *uc = p;
+ ucontext_t *uc = p;
mcontext_t *mc = &uc->uc_mcontext;
unsigned long pending = 1UL << sig;
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
new file mode 100644
index 000000000000..512d009146dd
--- /dev/null
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -0,0 +1,442 @@
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_PCSPKR_PLATFORM is not set
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SMP=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+CONFIG_PARAVIRT_SPINLOCKS=y
+CONFIG_MCORE2=y
+CONFIG_PROCESSOR_SELECT=y
+# CONFIG_CPU_SUP_CENTAUR is not set
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT=y
+# CONFIG_MICROCODE is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+CONFIG_TRANSPARENT_HUGEPAGE=y
+# CONFIG_MTRR is not set
+CONFIG_HZ_100=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_PHYSICAL_START=0x200000
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_PHYSICAL_ALIGN=0x1000000
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0 reboot=p"
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_ACPI_PROCFS_POWER=y
+# CONFIG_ACPI_FAN is not set
+# CONFIG_ACPI_THERMAL is not set
+# CONFIG_X86_PM_TIMER is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_X86_ACPI_CPUFREQ=y
+# CONFIG_X86_ACPI_CPUFREQ_CPB is not set
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_MSI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_IA32_EMULATION=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETLABEL=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
+CONFIG_RFKILL=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEBUG_DEVRES=y
+CONFIG_OF=y
+CONFIG_OF_UNITTEST=y
+# CONFIG_PNP_DEBUG_MESSAGES is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_MIRROR=y
+CONFIG_DM_ZERO=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_NETCONSOLE=y
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=y
+CONFIG_VIRTIO_NET=y
+# CONFIG_ETHERNET is not set
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_AX8817X is not set
+# CONFIG_USB_NET_AX88179_178A is not set
+# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_NCM is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_MAC80211_HWSIM=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_INPUT_TABLET=y
+CONFIG_TABLET_USB_ACECAD=y
+CONFIG_TABLET_USB_AIPTEK=y
+CONFIG_TABLET_USB_GTCO=y
+CONFIG_TABLET_USB_HANWANG=y
+CONFIG_TABLET_USB_KBTAB=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=48
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_INTEL is not set
+# CONFIG_HW_RANDOM_AMD is not set
+# CONFIG_HW_RANDOM_VIA is not set
+CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_HPET=y
+# CONFIG_HPET_MMAP_DEFAULT is not set
+# CONFIG_DEVPORT is not set
+# CONFIG_ACPI_I2C_OPREGION is not set
+# CONFIG_I2C_COMPAT is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_GPIOLIB=y
+# CONFIG_HWMON is not set
+# CONFIG_X86_PKG_TEMP_THERMAL is not set
+CONFIG_WATCHDOG=y
+CONFIG_SOFT_WATCHDOG=y
+CONFIG_MEDIA_SUPPORT=y
+# CONFIG_DVB_TUNER_DIB0070 is not set
+# CONFIG_DVB_TUNER_DIB0090 is not set
+# CONFIG_VGA_ARB is not set
+CONFIG_DRM=y
+# CONFIG_DRM_FBDEV_EMULATION is not set
+CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+# CONFIG_HID_GENERIC is not set
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_HOLTEK=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_DUMMY_HCD=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_SW_SYNC_USER=y
+CONFIG_ANDROID_VSOC=y
+CONFIG_ION=y
+# CONFIG_X86_PLATFORM_DEVICES is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+# CONFIG_FIRMWARE_MEMMAP is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_SDCARD_FS=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_PANIC_TIMEOUT=5
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_IO_DELAY_NONE=y
+CONFIG_DEBUG_BOOT_PARAMS=y
+CONFIG_OPTIMIZE_INLINING=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_PATH=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_ECHAINIV=y
diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h
index 809134c644a6..90ab9a795b49 100644
--- a/arch/x86/include/uapi/asm/msgbuf.h
+++ b/arch/x86/include/uapi/asm/msgbuf.h
@@ -1 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_X64_MSGBUF_H
+#define __ASM_X64_MSGBUF_H
+
+#if !defined(__x86_64__) || !defined(__ILP32__)
#include <asm-generic/msgbuf.h>
+#else
+/*
+ * The msqid64_ds structure for x86 architecture with x32 ABI.
+ *
+ * On x86-32 and x86-64 we can just use the generic definition, but
+ * x32 uses the same binary layout as x86_64, which is differnet
+ * from other 32-bit architectures.
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ __kernel_time_t msg_stime; /* last msgsnd time */
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+ __kernel_time_t msg_ctime; /* last change time */
+ __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */
+ __kernel_ulong_t msg_qnum; /* number of messages in queue */
+ __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ __kernel_ulong_t __unused4;
+ __kernel_ulong_t __unused5;
+};
+
+#endif
+
+#endif /* __ASM_GENERIC_MSGBUF_H */
diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h
index 83c05fc2de38..644421f3823b 100644
--- a/arch/x86/include/uapi/asm/shmbuf.h
+++ b/arch/x86/include/uapi/asm/shmbuf.h
@@ -1 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_X86_SHMBUF_H
+#define __ASM_X86_SHMBUF_H
+
+#if !defined(__x86_64__) || !defined(__ILP32__)
#include <asm-generic/shmbuf.h>
+#else
+/*
+ * The shmid64_ds structure for x86 architecture with x32 ABI.
+ *
+ * On x86-32 and x86-64 we can just use the generic definition, but
+ * x32 uses the same binary layout as x86_64, which is differnet
+ * from other 32-bit architectures.
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_time_t shm_atime; /* last attach time */
+ __kernel_time_t shm_dtime; /* last detach time */
+ __kernel_time_t shm_ctime; /* last change time */
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ __kernel_ulong_t shm_nattch; /* no. of current attaches */
+ __kernel_ulong_t __unused4;
+ __kernel_ulong_t __unused5;
+};
+
+struct shminfo64 {
+ __kernel_ulong_t shmmax;
+ __kernel_ulong_t shmmin;
+ __kernel_ulong_t shmmni;
+ __kernel_ulong_t shmseg;
+ __kernel_ulong_t shmall;
+ __kernel_ulong_t __unused1;
+ __kernel_ulong_t __unused2;
+ __kernel_ulong_t __unused3;
+ __kernel_ulong_t __unused4;
+};
+
+#endif
+
+#endif /* __ASM_X86_SHMBUF_H */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index fe89f938e0f0..00c7878043ef 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1442,6 +1442,8 @@ static inline void mwait_play_dead(void)
void *mwait_ptr;
int i;
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return;
if (!this_cpu_has(X86_FEATURE_MWAIT))
return;
if (!this_cpu_has(X86_FEATURE_CLFLUSH))
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index c7c4d9c51e99..c42d4a3d9494 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -365,6 +365,8 @@ static int __init tsc_setup(char *str)
tsc_clocksource_reliable = 1;
if (!strncmp(str, "noirqtime", 9))
no_sched_irq_time = 1;
+ if (!strcmp(str, "unstable"))
+ mark_tsc_unstable("boot parameter");
return 1;
}
@@ -406,7 +408,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
hpet2 -= hpet1;
tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
do_div(tmp, 1000000);
- do_div(deltatsc, tmp);
+ deltatsc = div64_u64(deltatsc, tmp);
return (unsigned long) deltatsc;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2038e5bacce6..42654375b73f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1386,6 +1386,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
*/
if (var->unusable)
var->db = 0;
+ /* This is symmetric with svm_set_segment() */
var->dpl = to_svm(vcpu)->vmcb->save.cpl;
break;
}
@@ -1531,18 +1532,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
s->base = var->base;
s->limit = var->limit;
s->selector = var->selector;
- if (var->unusable)
- s->attrib = 0;
- else {
- s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
- s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
- s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
- s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
- s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
- s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
- s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
- s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
- }
+ s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
+ s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
+ s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
+ s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
+ s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
+ s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
+ s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
+ s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
/*
* This is always accurate, except if SYSRET returned to a segment
@@ -1551,7 +1548,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
* would entail passing the CPL to userspace and back.
*/
if (seg == VCPU_SREG_SS)
- svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
+ /* This is symmetric with svm_get_segment() */
+ svm->vmcb->save.cpl = (var->dpl & 3);
mark_dirty(svm->vmcb, VMCB_SEG);
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 46bbc69844bd..528b4352fa99 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7657,11 +7657,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
int cr = exit_qualification & 15;
- int reg = (exit_qualification >> 8) & 15;
- unsigned long val = kvm_register_readl(vcpu, reg);
+ int reg;
+ unsigned long val;
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
+ reg = (exit_qualification >> 8) & 15;
+ val = kvm_register_readl(vcpu, reg);
switch (cr) {
case 0:
if (vmcs12->cr0_guest_host_mask &
@@ -7716,6 +7718,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
* lmsw can change bits 1..3 of cr0, and only set bit 0 of
* cr0. Other attempted changes are ignored, with no exit.
*/
+ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
if (vmcs12->cr0_guest_host_mask & 0xe &
(val ^ vmcs12->cr0_read_shadow))
return true;
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index 7e48807b2fa1..45a53dfe1859 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -55,7 +55,7 @@ ENTRY(csum_partial_copy_generic)
movq %r12, 3*8(%rsp)
movq %r14, 4*8(%rsp)
movq %r13, 5*8(%rsp)
- movq %rbp, 6*8(%rsp)
+ movq %r15, 6*8(%rsp)
movq %r8, (%rsp)
movq %r9, 1*8(%rsp)
@@ -74,7 +74,7 @@ ENTRY(csum_partial_copy_generic)
/* main loop. clear in 64 byte blocks */
/* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
/* r11: temp3, rdx: temp4, r12 loopcnt */
- /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
+ /* r10: temp5, r15: temp6, r14 temp7, r13 temp8 */
.p2align 4
.Lloop:
source
@@ -89,7 +89,7 @@ ENTRY(csum_partial_copy_generic)
source
movq 32(%rdi), %r10
source
- movq 40(%rdi), %rbp
+ movq 40(%rdi), %r15
source
movq 48(%rdi), %r14
source
@@ -103,7 +103,7 @@ ENTRY(csum_partial_copy_generic)
adcq %r11, %rax
adcq %rdx, %rax
adcq %r10, %rax
- adcq %rbp, %rax
+ adcq %r15, %rax
adcq %r14, %rax
adcq %r13, %rax
@@ -121,7 +121,7 @@ ENTRY(csum_partial_copy_generic)
dest
movq %r10, 32(%rsi)
dest
- movq %rbp, 40(%rsi)
+ movq %r15, 40(%rsi)
dest
movq %r14, 48(%rsi)
dest
@@ -203,7 +203,7 @@ ENTRY(csum_partial_copy_generic)
movq 3*8(%rsp), %r12
movq 4*8(%rsp), %r14
movq 5*8(%rsp), %r13
- movq 6*8(%rsp), %rbp
+ movq 6*8(%rsp), %r15
addq $7*8, %rsp
ret
diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c
index 1518d2805ae8..fd6825537b97 100644
--- a/arch/x86/um/stub_segv.c
+++ b/arch/x86/um/stub_segv.c
@@ -10,7 +10,7 @@
void __attribute__ ((__section__ (".__syscall_stub")))
stub_segv_handler(int sig, siginfo_t *info, void *p)
{
- struct ucontext *uc = p;
+ ucontext_t *uc = p;
GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
&uc->uc_mcontext);
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index f6325d573c10..6e091ccadcd4 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -175,6 +175,9 @@ bool bio_integrity_enabled(struct bio *bio)
if (!bio_is_rw(bio))
return false;
+ if (!bio_sectors(bio))
+ return false;
+
/* Already protected? */
if (bio_integrity(bio))
return false;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1452db06ba45..d65ddc18d7e4 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1252,13 +1252,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_bounce(q, &bio);
+ blk_queue_split(q, &bio, q->bio_split);
+
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
bio_io_error(bio);
return BLK_QC_T_NONE;
}
- blk_queue_split(q, &bio, q->bio_split);
-
if (!is_flush_fua && !blk_queue_nomerges(q) &&
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
return BLK_QC_T_NONE;
@@ -1634,7 +1634,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
{
unsigned flush_start_tag = set->queue_depth;
- blk_mq_tag_idle(hctx);
+ if (blk_mq_hw_queue_mapped(hctx))
+ blk_mq_tag_idle(hctx);
if (set->ops->exit_request)
set->ops->exit_request(set->driver_data,
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 19cf33b91a5a..f75be6a4411f 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -320,8 +320,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
if (info) {
struct partition_meta_info *pinfo = alloc_part_info(disk);
- if (!pinfo)
+ if (!pinfo) {
+ err = -ENOMEM;
goto out_free_stats;
+ }
memcpy(pinfo, info, sizeof(*info));
p->info = pinfo;
}
diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64
new file mode 100644
index 000000000000..b3d89109fe75
--- /dev/null
+++ b/build.config.cuttlefish.x86_64
@@ -0,0 +1,15 @@
+ARCH=x86_64
+BRANCH=android-4.4
+CLANG_TRIPLE=x86_64-linux-gnu-
+CROSS_COMPILE=x86_64-linux-androidkernel-
+DEFCONFIG=x86_64_cuttlefish_defconfig
+EXTRA_CMDS=''
+KERNEL_DIR=common
+POST_DEFCONFIG_CMDS="check_defconfig"
+CLANG_PREBUILT_BIN=prebuilts/clang/host/linux-x86/clang-4630689/bin
+LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin
+FILES="
+arch/x86/boot/bzImage
+vmlinux
+System.map
+"
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 84f8d4d8b6bc..09f706b7b06e 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -62,9 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan,
dma_addr_t dma_dest[2];
int src_off = 0;
- if (submit->flags & ASYNC_TX_FENCE)
- dma_flags |= DMA_PREP_FENCE;
-
while (src_cnt > 0) {
submit->flags = flags_orig;
pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
@@ -83,6 +80,8 @@ do_async_gen_syndrome(struct dma_chan *chan,
if (cb_fn_orig)
dma_flags |= DMA_PREP_INTERRUPT;
}
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
/* Drivers force forward progress in case they can not provide
* a descriptor
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 10ce48e16ebf..d830705f8a18 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -180,6 +180,12 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
ACPI_FUNCTION_TRACE(acpi_enable_event);
+ /* If Hardware Reduced flag is set, there are no fixed events */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Decode the Fixed Event */
if (event > ACPI_EVENT_MAX) {
@@ -237,6 +243,12 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
ACPI_FUNCTION_TRACE(acpi_disable_event);
+ /* If Hardware Reduced flag is set, there are no fixed events */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Decode the Fixed Event */
if (event > ACPI_EVENT_MAX) {
@@ -290,6 +302,12 @@ acpi_status acpi_clear_event(u32 event)
ACPI_FUNCTION_TRACE(acpi_clear_event);
+ /* If Hardware Reduced flag is set, there are no fixed events */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Decode the Fixed Event */
if (event > ACPI_EVENT_MAX) {
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index e54bc2aa7a88..a05b3b79b987 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -121,6 +121,9 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
(u32)(aml_offset +
sizeof(struct acpi_table_header)));
+ ACPI_ERROR((AE_INFO,
+ "Aborting disassembly, AML byte code is corrupt"));
+
/* Dump the context surrounding the invalid opcode */
acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
@@ -129,6 +132,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
sizeof(struct acpi_table_header) -
16));
acpi_os_printf(" */\n");
+
+ /*
+ * Just abort the disassembly, cannot continue because the
+ * parser is essentially lost. The disassembler can then
+ * randomly fail because an ill-constructed parse tree
+ * can result.
+ */
+ return_ACPI_STATUS(AE_AML_BAD_OPCODE);
#endif
}
@@ -293,6 +304,9 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
if (status == AE_CTRL_PARSE_CONTINUE) {
return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
}
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
/* Create Op structure and append to parent's argument list */
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index b48ecbfc4498..8c5503c0bad7 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -206,6 +206,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */
+ .callback = video_detect_force_video,
+ .ident = "SAMSUNG 670Z5E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
.callback = video_detect_force_video,
.ident = "SAMSUNG 730U3E/740U3E",
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 42086ad535c5..1accc01fb0ca 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -68,11 +68,12 @@ static ssize_t driver_override_show(struct device *_dev,
struct device_attribute *attr, char *buf)
{
struct amba_device *dev = to_amba_device(_dev);
+ ssize_t len;
- if (!dev->driver_override)
- return 0;
-
- return sprintf(buf, "%s\n", dev->driver_override);
+ device_lock(_dev);
+ len = sprintf(buf, "%s\n", dev->driver_override);
+ device_unlock(_dev);
+ return len;
}
static ssize_t driver_override_store(struct device *_dev,
@@ -80,7 +81,7 @@ static ssize_t driver_override_store(struct device *_dev,
const char *buf, size_t count)
{
struct amba_device *dev = to_amba_device(_dev);
- char *driver_override, *old = dev->driver_override, *cp;
+ char *driver_override, *old, *cp;
/* We need to keep extra room for a newline */
if (count >= (PAGE_SIZE - 1))
@@ -94,12 +95,15 @@ static ssize_t driver_override_store(struct device *_dev,
if (cp)
*cp = '\0';
+ device_lock(_dev);
+ old = dev->driver_override;
if (strlen(driver_override)) {
dev->driver_override = driver_override;
} else {
kfree(driver_override);
dev->driver_override = NULL;
}
+ device_unlock(_dev);
kfree(old);
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index aaa761b9081c..cd2eab6aa92e 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev,
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
- dev_err(dev, "no irq\n");
- return -EINVAL;
+ if (irq != -EPROBE_DEFER)
+ dev_err(dev, "no irq\n");
+ return irq;
}
hpriv->irq = irq;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 576b5facdf43..b3a62e94d1f3 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1582,7 +1582,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
- if (map->max_raw_write && map->max_raw_write > val_len)
+ if (map->max_raw_write && map->max_raw_write < val_len)
return -E2BIG;
map->lock(map->lock_arg);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 3e65ae144fde..e8165ec55e6f 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -623,6 +623,9 @@ static int loop_switch(struct loop_device *lo, struct file *file)
*/
static int loop_flush(struct loop_device *lo)
{
+ /* loop not yet configured, no running thread, nothing to flush */
+ if (lo->lo_state != Lo_bound)
+ return 0;
return loop_switch(lo, NULL);
}
@@ -1118,11 +1121,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (info->lo_encrypt_type) {
unsigned int type = info->lo_encrypt_type;
- if (type >= MAX_LO_CRYPT)
- return -EINVAL;
+ if (type >= MAX_LO_CRYPT) {
+ err = -EINVAL;
+ goto exit;
+ }
xfer = xfer_funcs[type];
- if (xfer == NULL)
- return -EINVAL;
+ if (xfer == NULL) {
+ err = -EINVAL;
+ goto exit;
+ }
} else
xfer = NULL;
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index f364fa4d24eb..f59183018280 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -33,8 +33,6 @@
#define ARB_ERR_CAP_CLEAR (1 << 0)
#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12)
#define ARB_ERR_CAP_STATUS_TEA (1 << 11)
-#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2)
-#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c
#define ARB_ERR_CAP_STATUS_WRITE (1 << 1)
#define ARB_ERR_CAP_STATUS_VALID (1 << 0)
@@ -43,7 +41,6 @@ enum {
ARB_ERR_CAP_CLR,
ARB_ERR_CAP_HI_ADDR,
ARB_ERR_CAP_ADDR,
- ARB_ERR_CAP_DATA,
ARB_ERR_CAP_STATUS,
ARB_ERR_CAP_MASTER,
};
@@ -53,7 +50,6 @@ static const int gisb_offsets_bcm7038[] = {
[ARB_ERR_CAP_CLR] = 0x0c4,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x0c8,
- [ARB_ERR_CAP_DATA] = 0x0cc,
[ARB_ERR_CAP_STATUS] = 0x0d0,
[ARB_ERR_CAP_MASTER] = -1,
};
@@ -63,7 +59,6 @@ static const int gisb_offsets_bcm7400[] = {
[ARB_ERR_CAP_CLR] = 0x0c8,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x0cc,
- [ARB_ERR_CAP_DATA] = 0x0d0,
[ARB_ERR_CAP_STATUS] = 0x0d4,
[ARB_ERR_CAP_MASTER] = 0x0d8,
};
@@ -73,7 +68,6 @@ static const int gisb_offsets_bcm7435[] = {
[ARB_ERR_CAP_CLR] = 0x168,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x16c,
- [ARB_ERR_CAP_DATA] = 0x170,
[ARB_ERR_CAP_STATUS] = 0x174,
[ARB_ERR_CAP_MASTER] = 0x178,
};
@@ -83,7 +77,6 @@ static const int gisb_offsets_bcm7445[] = {
[ARB_ERR_CAP_CLR] = 0x7e4,
[ARB_ERR_CAP_HI_ADDR] = 0x7e8,
[ARB_ERR_CAP_ADDR] = 0x7ec,
- [ARB_ERR_CAP_DATA] = 0x7f0,
[ARB_ERR_CAP_STATUS] = 0x7f4,
[ARB_ERR_CAP_MASTER] = 0x7f8,
};
@@ -105,9 +98,13 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
{
int offset = gdev->gisb_offsets[reg];
- /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
- if (offset == -1)
- return 1;
+ if (offset < 0) {
+ /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
+ if (reg == ARB_ERR_CAP_MASTER)
+ return 1;
+ else
+ return 0;
+ }
if (gdev->big_endian)
return ioread32be(gdev->base + offset);
@@ -115,6 +112,16 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
return ioread32(gdev->base + offset);
}
+static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev)
+{
+ u64 value;
+
+ value = gisb_read(gdev, ARB_ERR_CAP_ADDR);
+ value |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
+
+ return value;
+}
+
static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
{
int offset = gdev->gisb_offsets[reg];
@@ -123,9 +130,9 @@ static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
return;
if (gdev->big_endian)
- iowrite32be(val, gdev->base + reg);
+ iowrite32be(val, gdev->base + offset);
else
- iowrite32(val, gdev->base + reg);
+ iowrite32(val, gdev->base + offset);
}
static ssize_t gisb_arb_get_timeout(struct device *dev,
@@ -181,7 +188,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
const char *reason)
{
u32 cap_status;
- unsigned long arb_addr;
+ u64 arb_addr;
u32 master;
const char *m_name;
char m_fmt[11];
@@ -193,10 +200,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
return 1;
/* Read the address and master */
- arb_addr = gisb_read(gdev, ARB_ERR_CAP_ADDR) & 0xffffffff;
-#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
- arb_addr |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
-#endif
+ arb_addr = gisb_read_address(gdev);
master = gisb_read(gdev, ARB_ERR_CAP_MASTER);
m_name = brcmstb_gisb_master_to_str(gdev, master);
@@ -205,7 +209,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
m_name = m_fmt;
}
- pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n",
+ pr_crit("%s: %s at 0x%llx [%c %s], core: %s\n",
__func__, reason, arb_addr,
cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index c206ccda899b..b5f245d2875c 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2358,7 +2358,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
return media_changed(cdi, 1);
- if ((unsigned int)arg >= cdi->capacity)
+ if (arg >= cdi->capacity)
return -EINVAL;
info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 16ff781cde65..b0b36d00415d 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -689,7 +689,7 @@ int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
byte_mask = 0x01 << (item_num % 8);
offset = equip_id * 514;
- if (offset + byte_index > DCI_LOG_MASK_SIZE) {
+ if (offset + byte_index >= DCI_LOG_MASK_SIZE) {
pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n",
__func__, offset, log_code, byte_index);
return 0;
@@ -716,7 +716,7 @@ int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
bit_index = event_id % 8;
byte_mask = 0x1 << bit_index;
- if (byte_index > DCI_EVENT_MASK_SIZE) {
+ if (byte_index >= DCI_EVENT_MASK_SIZE) {
pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n",
__func__, event_id, byte_index);
return 0;
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index a752cdc675a3..001a1b367dc6 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -385,12 +385,12 @@ int diag_md_close_peripheral(int id, uint8_t peripheral)
return 0;
}
-int diag_md_init()
+int diag_md_init(void)
{
int i, j;
struct diag_md_info *ch = NULL;
- for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+ for (i = 0; i < DIAG_MD_LOCAL_LAST; i++) {
ch = &diag_md[i];
ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
ch->tbl = kzalloc(ch->num_tbl_entries *
@@ -414,12 +414,53 @@ fail:
return -ENOMEM;
}
-void diag_md_exit()
+int diag_md_mdm_init(void)
+{
+ int i, j;
+ struct diag_md_info *ch = NULL;
+
+ for (i = DIAG_MD_BRIDGE_BASE; i < NUM_DIAG_MD_DEV; i++) {
+ ch = &diag_md[i];
+ ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
+ ch->tbl = kcalloc(ch->num_tbl_entries, sizeof(*ch->tbl),
+ GFP_KERNEL);
+ if (!ch->tbl)
+ goto fail;
+
+ for (j = 0; j < ch->num_tbl_entries; j++) {
+ ch->tbl[j].buf = NULL;
+ ch->tbl[j].len = 0;
+ ch->tbl[j].ctx = 0;
+ }
+ spin_lock_init(&(ch->lock));
+ }
+
+ return 0;
+
+fail:
+ diag_md_mdm_exit();
+ return -ENOMEM;
+}
+
+void diag_md_exit(void)
{
int i;
struct diag_md_info *ch = NULL;
- for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+ for (i = 0; i < DIAG_MD_LOCAL_LAST; i++) {
+ ch = &diag_md[i];
+ kfree(ch->tbl);
+ ch->num_tbl_entries = 0;
+ ch->ops = NULL;
+ }
+}
+
+void diag_md_mdm_exit(void)
+{
+ int i;
+ struct diag_md_info *ch = NULL;
+
+ for (i = DIAG_MD_BRIDGE_BASE; i < NUM_DIAG_MD_DEV; i++) {
ch = &diag_md[i];
kfree(ch->tbl);
ch->num_tbl_entries = 0;
diff --git a/drivers/char/diag/diag_memorydevice.h b/drivers/char/diag/diag_memorydevice.h
index 35a1ee35a956..9b4aa392233d 100644
--- a/drivers/char/diag/diag_memorydevice.h
+++ b/drivers/char/diag/diag_memorydevice.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -46,7 +46,9 @@ struct diag_md_info {
extern struct diag_md_info diag_md[NUM_DIAG_MD_DEV];
int diag_md_init(void);
+int diag_md_mdm_init(void);
void diag_md_exit(void);
+void diag_md_mdm_exit(void);
void diag_md_open_all(void);
void diag_md_close_all(void);
int diag_md_register(int id, int ctx, struct diag_mux_ops *ops);
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index be7a565ca04e..6df597dfa750 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -1001,6 +1001,7 @@ static int diag_remote_init(void)
poolsize_mdm_dci_write);
diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
poolsize_qsc_usb);
+ diag_md_mdm_init();
driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
if (!driver->hdlc_encode_buf)
return -ENOMEM;
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index f53e8ba2c718..83c206f0fc98 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -409,6 +409,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
msg = ipmi_alloc_smi_msg();
if (!msg) {
ssif_info->ssif_state = SSIF_NORMAL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -431,6 +432,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
msg = ipmi_alloc_smi_msg();
if (!msg) {
ssif_info->ssif_state = SSIF_NORMAL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 1822472dffab..dffd06a3bb76 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -724,7 +724,7 @@ retry:
static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
{
- const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
+ const int nbits_max = r->poolinfo->poolwords * 32;
if (nbits < 0)
return -EINVAL;
@@ -886,12 +886,16 @@ static void add_interrupt_bench(cycles_t start)
static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{
__u32 *ptr = (__u32 *) regs;
+ unsigned int idx;
if (regs == NULL)
return 0;
- if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
- f->reg_idx = 0;
- return *(ptr + f->reg_idx++);
+ idx = READ_ONCE(f->reg_idx);
+ if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
+ idx = 0;
+ ptr += idx++;
+ WRITE_ONCE(f->reg_idx, idx);
+ return *ptr;
}
void add_interrupt_randomness(int irq, int irq_flags)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index be0b09a0fb44..2aca689061e1 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1399,7 +1399,6 @@ static int add_port(struct ports_device *portdev, u32 id)
{
char debugfs_name[16];
struct port *port;
- struct port_buffer *buf;
dev_t devt;
unsigned int nr_added_bufs;
int err;
@@ -1510,8 +1509,6 @@ static int add_port(struct ports_device *portdev, u32 id)
return 0;
free_inbufs:
- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf, true);
free_device:
device_destroy(pdrvdata.class, port->dev->devt);
free_cdev:
@@ -1536,34 +1533,14 @@ static void remove_port(struct kref *kref)
static void remove_port_data(struct port *port)
{
- struct port_buffer *buf;
-
spin_lock_irq(&port->inbuf_lock);
/* Remove unused data this port might have received. */
discard_port_data(port);
spin_unlock_irq(&port->inbuf_lock);
- /* Remove buffers we queued up for the Host to send us data in. */
- do {
- spin_lock_irq(&port->inbuf_lock);
- buf = virtqueue_detach_unused_buf(port->in_vq);
- spin_unlock_irq(&port->inbuf_lock);
- if (buf)
- free_buf(buf, true);
- } while (buf);
-
spin_lock_irq(&port->outvq_lock);
reclaim_consumed_buffers(port);
spin_unlock_irq(&port->outvq_lock);
-
- /* Free pending buffers from the out-queue. */
- do {
- spin_lock_irq(&port->outvq_lock);
- buf = virtqueue_detach_unused_buf(port->out_vq);
- spin_unlock_irq(&port->outvq_lock);
- if (buf)
- free_buf(buf, true);
- } while (buf);
}
/*
@@ -1788,13 +1765,24 @@ static void control_work_handler(struct work_struct *work)
spin_unlock(&portdev->c_ivq_lock);
}
+static void flush_bufs(struct virtqueue *vq, bool can_sleep)
+{
+ struct port_buffer *buf;
+ unsigned int len;
+
+ while ((buf = virtqueue_get_buf(vq, &len)))
+ free_buf(buf, can_sleep);
+}
+
static void out_intr(struct virtqueue *vq)
{
struct port *port;
port = find_port_by_vq(vq->vdev->priv, vq);
- if (!port)
+ if (!port) {
+ flush_bufs(vq, false);
return;
+ }
wake_up_interruptible(&port->waitqueue);
}
@@ -1805,8 +1793,10 @@ static void in_intr(struct virtqueue *vq)
unsigned long flags;
port = find_port_by_vq(vq->vdev->priv, vq);
- if (!port)
+ if (!port) {
+ flush_bufs(vq, false);
return;
+ }
spin_lock_irqsave(&port->inbuf_lock, flags);
port->inbuf = get_inbuf(port);
@@ -1981,6 +1971,15 @@ static const struct file_operations portdev_fops = {
static void remove_vqs(struct ports_device *portdev)
{
+ struct virtqueue *vq;
+
+ virtio_device_for_each_vq(portdev->vdev, vq) {
+ struct port_buffer *buf;
+
+ flush_bufs(vq, true);
+ while ((buf = virtqueue_detach_unused_buf(vq)))
+ free_buf(buf, true);
+ }
portdev->vdev->config->del_vqs(portdev->vdev);
kfree(portdev->in_vqs);
kfree(portdev->out_vqs);
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 7c4b1ffe874f..d56ba46e6b78 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -891,9 +891,7 @@ static void bcm2835_pll_off(struct clk_hw *hw)
const struct bcm2835_pll_data *data = pll->data;
spin_lock(&cprman->regs_lock);
- cprman_write(cprman, data->cm_ctrl_reg,
- cprman_read(cprman, data->cm_ctrl_reg) |
- CM_PLL_ANARST);
+ cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
cprman_write(cprman, data->a2w_ctrl_reg,
cprman_read(cprman, data->a2w_ctrl_reg) |
A2W_PLL_CTRL_PWRDN);
@@ -929,6 +927,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
cpu_relax();
}
+ cprman_write(cprman, data->a2w_ctrl_reg,
+ cprman_read(cprman, data->a2w_ctrl_reg) |
+ A2W_PLL_CTRL_PRST_DISABLE);
+
return 0;
}
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 43a218f35b19..4ad32ce428cf 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -106,7 +106,7 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
rc = clk_set_rate(clk, rate);
if (rc < 0)
- pr_err("clk: couldn't set %s clk rate to %d (%d), current rate: %ld\n",
+ pr_err("clk: couldn't set %s clk rate to %u (%d), current rate: %lu\n",
__clk_get_name(clk), rate, rc,
clk_get_rate(clk));
clk_put(clk);
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index cd0f2726f5e0..c40445488d3a 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -71,15 +71,15 @@ static const struct clk_ops scpi_clk_ops = {
};
/* find closest match to given frequency in OPP table */
-static int __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate)
+static long __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate)
{
int idx;
- u32 fmin = 0, fmax = ~0, ftmp;
+ unsigned long fmin = 0, fmax = ~0, ftmp;
const struct scpi_opp *opp = clk->info->opps;
for (idx = 0; idx < clk->info->count; idx++, opp++) {
ftmp = opp->freq;
- if (ftmp >= (u32)rate) {
+ if (ftmp >= rate) {
if (ftmp <= fmax)
fmax = ftmp;
break;
diff --git a/drivers/clk/msm/virtclk-front-8996.c b/drivers/clk/msm/virtclk-front-8996.c
index 68ef5967df58..f2a70f240984 100644
--- a/drivers/clk/msm/virtclk-front-8996.c
+++ b/drivers/clk/msm/virtclk-front-8996.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <dt-bindings/clock/msm-clocks-8996.h>
+#include "virtclk-front.h"
#include "virt-reset-front.h"
static struct virtclk_front gcc_blsp1_ahb_clk = {
@@ -498,6 +499,33 @@ static struct virtclk_front gcc_mss_mnoc_bimc_axi_clk = {
},
};
+static struct virtclk_front ipa_clk = {
+ .c = {
+ .dbg_name = "ipa",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(ipa_clk.c),
+ },
+ .flag = CLOCK_FLAG_NODE_TYPE_REMOTE,
+};
+
+static struct virtclk_front pnoc_clk = {
+ .c = {
+ .dbg_name = "pnoc",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(pnoc_clk.c),
+ },
+ .flag = CLOCK_FLAG_NODE_TYPE_REMOTE,
+};
+
+static struct virtclk_front qdss_clk = {
+ .c = {
+ .dbg_name = "qdss",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(qdss_clk.c),
+ },
+ .flag = CLOCK_FLAG_NODE_TYPE_REMOTE,
+};
+
static struct clk_lookup msm_clocks_8996[] = {
CLK_LIST(gcc_blsp1_ahb_clk),
CLK_LIST(gcc_blsp1_qup1_spi_apps_clk),
@@ -559,6 +587,9 @@ static struct clk_lookup msm_clocks_8996[] = {
CLK_LIST(gpll0_out_msscc),
CLK_LIST(gcc_mss_snoc_axi_clk),
CLK_LIST(gcc_mss_mnoc_bimc_axi_clk),
+ CLK_LIST(ipa_clk),
+ CLK_LIST(pnoc_clk),
+ CLK_LIST(qdss_clk),
};
static struct virt_reset_map msm_resets_8996[] = {
diff --git a/drivers/clk/msm/virtclk-front.c b/drivers/clk/msm/virtclk-front.c
index 2d8a9e8ec61c..ad89dda6514f 100644
--- a/drivers/clk/msm/virtclk-front.c
+++ b/drivers/clk/msm/virtclk-front.c
@@ -62,7 +62,7 @@ static int virtclk_front_get_id(struct clk *clk)
if (v->id)
return ret;
- msg.header.cmd = CLK_MSG_GETID;
+ msg.header.cmd = CLK_MSG_GETID | v->flag;
msg.header.len = sizeof(msg);
strlcpy(msg.name, clk->dbg_name, sizeof(msg.name));
@@ -119,7 +119,7 @@ static int virtclk_front_prepare(struct clk *clk)
return ret;
msg.clk_id = v->id;
- msg.cmd = CLK_MSG_ENABLE;
+ msg.cmd = CLK_MSG_ENABLE | v->flag;
msg.len = sizeof(struct clk_msg_header);
rt_mutex_lock(&virtclk_front_ctx.lock);
@@ -173,7 +173,7 @@ static void virtclk_front_unprepare(struct clk *clk)
return;
msg.clk_id = v->id;
- msg.cmd = CLK_MSG_DISABLE;
+ msg.cmd = CLK_MSG_DISABLE | v->flag;
msg.len = sizeof(struct clk_msg_header);
rt_mutex_lock(&virtclk_front_ctx.lock);
@@ -224,7 +224,7 @@ static int virtclk_front_reset(struct clk *clk, enum clk_reset_action action)
return ret;
msg.header.clk_id = v->id;
- msg.header.cmd = CLK_MSG_RESET;
+ msg.header.cmd = CLK_MSG_RESET | v->flag;
msg.header.len = sizeof(struct clk_msg_header);
msg.reset = action;
@@ -279,7 +279,7 @@ static int virtclk_front_set_rate(struct clk *clk, unsigned long rate)
return ret;
msg.header.clk_id = v->id;
- msg.header.cmd = CLK_MSG_SETFREQ;
+ msg.header.cmd = CLK_MSG_SETFREQ | v->flag;
msg.header.len = sizeof(msg);
msg.freq = (u32)rate;
@@ -352,7 +352,7 @@ static unsigned long virtclk_front_get_rate(struct clk *clk)
return 0;
msg.clk_id = v->id;
- msg.cmd = CLK_MSG_GETFREQ;
+ msg.cmd = CLK_MSG_GETFREQ | v->flag;
msg.len = sizeof(msg);
rt_mutex_lock(&virtclk_front_ctx.lock);
diff --git a/drivers/clk/msm/virtclk-front.h b/drivers/clk/msm/virtclk-front.h
index 60650f8d1ed1..4d3bdf7cd841 100644
--- a/drivers/clk/msm/virtclk-front.h
+++ b/drivers/clk/msm/virtclk-front.h
@@ -23,6 +23,8 @@ enum virtclk_cmd {
CLK_MSG_MAX
};
+#define CLOCK_FLAG_NODE_TYPE_REMOTE 0xff00
+
struct clk_msg_header {
u32 cmd;
u32 len;
diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
index 8bccf4ecdab6..9ff4ea63932d 100644
--- a/drivers/clk/mvebu/armada-38x.c
+++ b/drivers/clk/mvebu/armada-38x.c
@@ -46,10 +46,11 @@ static u32 __init armada_38x_get_tclk_freq(void __iomem *sar)
}
static const u32 armada_38x_cpu_frequencies[] __initconst = {
- 0, 0, 0, 0,
- 1066 * 1000 * 1000, 0, 0, 0,
+ 666 * 1000 * 1000, 0, 800 * 1000 * 1000, 0,
+ 1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0,
1332 * 1000 * 1000, 0, 0, 0,
- 1600 * 1000 * 1000,
+ 1600 * 1000 * 1000, 0, 0, 0,
+ 1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000,
};
static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
@@ -75,11 +76,11 @@ static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = {
};
static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
- {0, 1}, {0, 1}, {0, 1}, {0, 1},
- {1, 2}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {1, 2}, {0, 1},
+ {1, 2}, {0, 1}, {1, 2}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
- {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {1, 2},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
@@ -90,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = {
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
- {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {7, 15},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index ff0c8327fabe..3c3cf8e04eea 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016-2018, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -210,9 +210,11 @@ static unsigned long
clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f_curr;
u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
- if (rcg->enable_safe_config && !clk_hw_is_prepared(hw)) {
+ if (rcg->enable_safe_config && (!clk_hw_is_prepared(hw)
+ || !clk_hw_is_enabled(hw))) {
if (!rcg->current_freq)
rcg->current_freq = cxo_f.freq;
return rcg->current_freq;
@@ -232,9 +234,17 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
mode >>= CFG_MODE_SHIFT;
}
- mask = BIT(rcg->hid_width) - 1;
- hid_div = cfg >> CFG_SRC_DIV_SHIFT;
- hid_div &= mask;
+ if (rcg->enable_safe_config) {
+ f_curr = qcom_find_freq(rcg->freq_tbl, rcg->current_freq);
+ if (!f_curr)
+ return -EINVAL;
+
+ hid_div = f_curr->pre_div;
+ } else {
+ mask = BIT(rcg->hid_width) - 1;
+ hid_div = cfg >> CFG_SRC_DIV_SHIFT;
+ hid_div &= mask;
+ }
return calc_rate(parent_rate, m, n, mode, hid_div);
}
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index a5c111b67f37..ea11a33e7fff 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -174,8 +174,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
if (!state_node)
break;
- if (!of_device_is_available(state_node))
+ if (!of_device_is_available(state_node)) {
+ of_node_put(state_node);
continue;
+ }
if (!idle_state_valid(state_node, i, cpumask)) {
pr_warn("%s idle state not valid, bailing out\n",
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index 81a9f9763915..b4675df551b3 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -451,10 +451,6 @@ static int parse_legacy_cluster_params(struct device_node *node,
return 0;
failed:
pr_err("%s(): Failed reading %s\n", __func__, key);
- kfree(c->name);
- kfree(c->lpm_dev);
- c->name = NULL;
- c->lpm_dev = NULL;
return ret;
}
@@ -640,8 +636,6 @@ static int parse_cluster_level(struct device_node *node,
return 0;
failed:
pr_err("Failed %s() key = %s ret = %d\n", __func__, key, ret);
- kfree(level->mode);
- level->mode = NULL;
return ret;
}
@@ -836,19 +830,12 @@ static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
return 0;
failed:
- for (i = 0; i < c->cpu->nlevels; i++) {
- kfree(c->cpu->levels[i].name);
- c->cpu->levels[i].name = NULL;
- }
- kfree(c->cpu);
- c->cpu = NULL;
pr_err("%s(): Failed with error code:%d\n", __func__, ret);
return ret;
}
void free_cluster_node(struct lpm_cluster *cluster)
{
- int i;
struct lpm_cluster *cl, *m;
list_for_each_entry_safe(cl, m, &cluster->child, list) {
@@ -856,22 +843,6 @@ void free_cluster_node(struct lpm_cluster *cluster)
free_cluster_node(cl);
};
- if (cluster->cpu) {
- for (i = 0; i < cluster->cpu->nlevels; i++) {
- kfree(cluster->cpu->levels[i].name);
- cluster->cpu->levels[i].name = NULL;
- }
- }
- for (i = 0; i < cluster->nlevels; i++) {
- kfree(cluster->levels[i].mode);
- cluster->levels[i].mode = NULL;
- }
- kfree(cluster->cpu);
- kfree(cluster->name);
- kfree(cluster->lpm_dev);
- cluster->cpu = NULL;
- cluster->name = NULL;
- cluster->lpm_dev = NULL;
cluster->ndevices = 0;
}
@@ -989,9 +960,7 @@ failed_parse_cluster:
list_del(&c->list);
free_cluster_node(c);
failed_parse_params:
- c->parent = NULL;
pr_err("Failed parse params\n");
- kfree(c);
return NULL;
}
struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 584a1857624a..324cce5d7354 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1706,7 +1706,8 @@ static int cluster_cpuidle_register(struct lpm_cluster *cl)
struct cpuidle_state *st = &cl->drv->states[i];
struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i];
snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
- snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name);
+ snprintf(st->desc, CPUIDLE_DESC_LEN, "%s",
+ cpu_level->name);
st->flags = 0;
st->exit_latency = cpu_level->pwr.latency_us;
st->power_usage = cpu_level->pwr.ss_power;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 66c073fc8afc..82a7c89caae2 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1473,10 +1473,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
rmb();
- initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
- rmb();
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
rmb();
+ initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
+ rmb();
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
rmb();
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f8d740a6740d..48d4dddf4941 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1722,17 +1722,24 @@ static int sdma_probe(struct platform_device *pdev)
if (IS_ERR(sdma->clk_ahb))
return PTR_ERR(sdma->clk_ahb);
- clk_prepare(sdma->clk_ipg);
- clk_prepare(sdma->clk_ahb);
+ ret = clk_prepare(sdma->clk_ipg);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare(sdma->clk_ahb);
+ if (ret)
+ goto err_clk;
ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
sdma);
if (ret)
- return ret;
+ goto err_irq;
sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
- if (!sdma->script_addrs)
- return -ENOMEM;
+ if (!sdma->script_addrs) {
+ ret = -ENOMEM;
+ goto err_irq;
+ }
/* initially no scripts available */
saddr_arr = (s32 *)sdma->script_addrs;
@@ -1847,6 +1854,10 @@ err_register:
dma_async_device_unregister(&sdma->dma_device);
err_init:
kfree(sdma->script_addrs);
+err_irq:
+ clk_unprepare(sdma->clk_ahb);
+err_clk:
+ clk_unprepare(sdma->clk_ipg);
return ret;
}
@@ -1857,6 +1868,8 @@ static int sdma_remove(struct platform_device *pdev)
dma_async_device_unregister(&sdma->dma_device);
kfree(sdma->script_addrs);
+ clk_unprepare(sdma->clk_ahb);
+ clk_unprepare(sdma->clk_ipg);
/* Kill the tasklet */
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct sdma_channel *sdmac = &sdma->channel[i];
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 0574e1bbe45c..3ce5609b4611 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -763,7 +763,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
/* Non-ECC RAM? */
printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
res = -ENODEV;
- goto err2;
+ goto err;
}
edac_dbg(3, "init mci\n");
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index d071e89d3124..99fd598b5069 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -230,10 +230,15 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
}
msleep(100);
}
- if (status_down)
+ if (status_down) {
dev_dbg(dev, "shutdown successful\n");
- else
+ esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc);
+ } else {
dev_err(mdm->dev, "graceful poff ipc fail\n");
+ graceful_shutdown = false;
+ goto force_poff;
+ }
+ break;
force_poff:
case ESOC_FORCE_PWR_OFF:
if (!graceful_shutdown) {
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
index 4be66a16a3a1..0288082cea00 100644
--- a/drivers/esoc/esoc-mdm-pon.c
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -55,7 +55,7 @@ static int mdm9x55_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
if (!atomic)
usleep_range(reset_time_us, reset_time_us + 100000);
else
- mdelay(mdm->reset_time_ms);
+ mdelay(DEF_MDM9X55_RESET_TIME);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
soft_reset_direction_de_assert);
return 0;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 06d345b087f8..fe89fd56eabf 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2117,6 +2117,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
struct gpio_desc *desc = NULL;
int status;
enum gpio_lookup_flags lookupflags = 0;
+ /* Maybe we have a device name, maybe not */
+ const char *devname = dev ? dev_name(dev) : "?";
dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
@@ -2145,7 +2147,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
return desc;
}
- status = gpiod_request(desc, con_id);
+ /*
+ * If a connection label was passed use that, else attempt to use
+ * the device name as label
+ */
+ status = gpiod_request(desc, con_id ? con_id : devname);
if (status < 0)
return ERR_PTR(status);
diff --git a/drivers/gpu/drm/msm/dba_bridge.c b/drivers/gpu/drm/msm/dba_bridge.c
index 9144dfdf30c9..7887bda23df0 100644
--- a/drivers/gpu/drm/msm/dba_bridge.c
+++ b/drivers/gpu/drm/msm/dba_bridge.c
@@ -51,6 +51,7 @@ struct dba_bridge {
u32 num_of_input_lanes;
bool pluggable;
u32 panel_count;
+ bool cont_splash_enabled;
};
#define to_dba_bridge(x) container_of((x), struct dba_bridge, base)
@@ -324,6 +325,7 @@ struct drm_bridge *dba_bridge_init(struct drm_device *dev,
bridge->panel_count = data->panel_count;
bridge->base.funcs = &_dba_bridge_ops;
bridge->base.encoder = encoder;
+ bridge->cont_splash_enabled = data->cont_splash_enabled;
rc = drm_bridge_attach(dev, &bridge->base);
if (rc) {
@@ -339,7 +341,10 @@ struct drm_bridge *dba_bridge_init(struct drm_device *dev,
encoder->bridge = &bridge->base;
}
- if (!bridge->pluggable) {
+ /* If early splash has enabled bridge chip in bootloader,
+ * below call should be skipped.
+ */
+ if (!bridge->pluggable && !bridge->cont_splash_enabled) {
if (bridge->ops.power_on)
bridge->ops.power_on(bridge->dba_ctx, true, 0);
if (bridge->ops.check_hpd)
diff --git a/drivers/gpu/drm/msm/dba_bridge.h b/drivers/gpu/drm/msm/dba_bridge.h
index 5562d2b2aef9..edc130f92257 100644
--- a/drivers/gpu/drm/msm/dba_bridge.h
+++ b/drivers/gpu/drm/msm/dba_bridge.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -44,6 +44,7 @@ struct dba_bridge_init {
struct drm_bridge *precede_bridge;
bool pluggable;
u32 panel_count;
+ bool cont_splash_enabled;
};
/**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 6015cf35e030..7a90c7be4e5c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -1598,7 +1598,7 @@ exit:
*
* Return: error code.
*/
-int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl)
+int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool cont_splash_enabled)
{
int rc = 0;
@@ -1615,37 +1615,40 @@ int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl)
goto error;
}
- dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
+ if (!cont_splash_enabled) {
+ dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
&dsi_ctrl->host_config.lane_map);
- dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
+ dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
&dsi_ctrl->host_config.common_config);
- if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
- dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
+ if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
+ dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
&dsi_ctrl->host_config.common_config,
&dsi_ctrl->host_config.u.cmd_engine);
- dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
+ dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
dsi_ctrl->host_config.video_timing.h_active,
dsi_ctrl->host_config.video_timing.h_active * 3,
dsi_ctrl->host_config.video_timing.v_active,
0x0);
- } else {
- dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
+ } else {
+ dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
&dsi_ctrl->host_config.common_config,
&dsi_ctrl->host_config.u.video_engine);
- dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw,
+ dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw,
&dsi_ctrl->host_config.video_timing);
+ }
}
-
-
dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0x0);
- /* Perform a soft reset before enabling dsi controller */
- dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
+ /* Perform a soft reset before enabling dsi controller
+ * But skip the reset if dsi is enabled in bootloader.
+ */
+ if (!cont_splash_enabled)
+ dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
pr_debug("[DSI_%d]Host initialization complete\n", dsi_ctrl->index);
dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1);
error:
@@ -1967,6 +1970,12 @@ error:
return rc;
}
+void dsi_ctrl_update_power_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_power_state state)
+{
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_POWER_STATE_CHANGE, state);
+}
+
/**
* dsi_ctrl_set_tpg_state() - enable/disable test pattern on the controller
* @dsi_ctrl: DSI controller handle.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index 993a35cbf84a..c0ba532011b5 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -331,6 +331,7 @@ int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_host_init() - Initialize DSI host hardware.
* @dsi_ctrl: DSI controller handle.
+ * @cont_splash_enabled: Flag for DSI splash enabled in bootloader.
*
* Initializes DSI controller hardware with host configuration provided by
* dsi_ctrl_update_host_config(). Initialization can be performed only during
@@ -339,7 +340,7 @@ int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl);
*
* Return: error code.
*/
-int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl);
+int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool cont_splash_enabled);
/**
* dsi_ctrl_host_deinit() - De-Initialize DSI host hardware.
@@ -404,6 +405,16 @@ int dsi_ctrl_set_power_state(struct dsi_ctrl *dsi_ctrl,
enum dsi_power_state state);
/**
+ * dsi_ctrl_update_power_state() - update power state for dsi controller
+ * @dsi_ctrl: DSI controller handle.
+ * @state: Power state.
+ *
+ * Update power state for DSI controller.
+ *
+ */
+void dsi_ctrl_update_power_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_power_state state);
+/**
* dsi_ctrl_set_cmd_engine_state() - set command engine state
* @dsi_ctrl: DSI Controller handle.
* @state: Engine state.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 09ab14cc4746..c468a6f5caa2 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -174,6 +174,11 @@ static int dsi_display_ctrl_power_on(struct dsi_display *display)
int i;
struct dsi_display_ctrl *ctrl;
+ if (display->cont_splash_enabled) {
+ pr_debug("skip ctrl power on\n");
+ return rc;
+ }
+
/* Sequence does not matter for split dsi usecases */
for (i = 0; i < display->ctrl_count; i++) {
@@ -460,7 +465,8 @@ static int dsi_display_ctrl_init(struct dsi_display *display)
for (i = 0 ; i < display->ctrl_count; i++) {
ctrl = &display->ctrl[i];
- rc = dsi_ctrl_host_init(ctrl->ctrl);
+ rc = dsi_ctrl_host_init(ctrl->ctrl,
+ display->cont_splash_enabled);
if (rc) {
pr_err("[%s] failed to init host_%d, rc=%d\n",
display->name, i, rc);
@@ -720,7 +726,7 @@ static int dsi_display_phy_enable(struct dsi_display *display)
rc = dsi_phy_enable(m_ctrl->phy,
&display->config,
m_src,
- true);
+ true, display->cont_splash_enabled);
if (rc) {
pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
display->name, rc);
@@ -735,7 +741,7 @@ static int dsi_display_phy_enable(struct dsi_display *display)
rc = dsi_phy_enable(ctrl->phy,
&display->config,
DSI_PLL_SOURCE_NON_NATIVE,
- true);
+ true, display->cont_splash_enabled);
if (rc) {
pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
display->name, rc);
@@ -848,6 +854,11 @@ static int dsi_display_phy_sw_reset(struct dsi_display *display)
int i;
struct dsi_display_ctrl *m_ctrl, *ctrl;
+ if (display->cont_splash_enabled) {
+ pr_debug("skip phy sw reset\n");
+ return 0;
+ }
+
m_ctrl = &display->ctrl[display->cmd_master_idx];
rc = dsi_ctrl_phy_sw_reset(m_ctrl->ctrl);
@@ -1748,6 +1759,45 @@ static int _dsi_display_dev_deinit(struct dsi_display *display)
return rc;
}
+/*
+ * _dsi_display_config_ctrl_for_splash
+ *
+ * Config ctrl engine for DSI display.
+ * @display: Handle to the display
+ * Returns: Zero on success
+ */
+static int _dsi_display_config_ctrl_for_splash(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
+ rc = dsi_display_vid_engine_enable(display);
+ if (rc) {
+ pr_err("[%s]failed to enable video engine, rc=%d\n",
+ display->name, rc);
+ goto error_out;
+ }
+ } else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
+ rc = dsi_display_cmd_engine_enable(display);
+ if (rc) {
+ pr_err("[%s]failed to enable cmd engine, rc=%d\n",
+ display->name, rc);
+ goto error_out;
+ }
+ } else {
+ pr_err("[%s] Invalid configuration\n", display->name);
+ rc = -EINVAL;
+ }
+
+error_out:
+ return rc;
+}
+
/**
* dsi_display_bind - bind dsi device with controlling device
* @dev: Pointer to base of platform device
@@ -2141,6 +2191,8 @@ int dsi_display_drm_bridge_init(struct dsi_display *display,
init_data.num_of_input_lanes = num_of_lanes;
init_data.precede_bridge = precede_bridge;
init_data.panel_count = display->panel_count;
+ init_data.cont_splash_enabled =
+ display->cont_splash_enabled;
dba_bridge = dba_bridge_init(display->drm_dev, enc,
&init_data);
if (IS_ERR_OR_NULL(dba_bridge)) {
@@ -2451,26 +2503,28 @@ int dsi_display_prepare(struct dsi_display *display)
mutex_lock(&display->display_lock);
- for (i = 0; i < display->panel_count; i++) {
- rc = dsi_panel_pre_prepare(display->panel[i]);
- if (rc) {
- SDE_ERROR("[%s] panel pre-prepare failed, rc=%d\n",
- display->name, rc);
- goto error_panel_post_unprep;
+ if (!display->cont_splash_enabled) {
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_pre_prepare(display->panel[i]);
+ if (rc) {
+ SDE_ERROR("[%s]pre-prepare failed, rc=%d\n",
+ display->name, rc);
+ goto error_panel_post_unprep;
+ }
}
}
rc = dsi_display_ctrl_power_on(display);
if (rc) {
pr_err("[%s] failed to power on dsi controllers, rc=%d\n",
- display->name, rc);
+ display->name, rc);
goto error_panel_post_unprep;
}
rc = dsi_display_phy_power_on(display);
if (rc) {
pr_err("[%s] failed to power on dsi phy, rc = %d\n",
- display->name, rc);
+ display->name, rc);
goto error_ctrl_pwr_off;
}
@@ -2497,21 +2551,21 @@ int dsi_display_prepare(struct dsi_display *display)
rc = dsi_display_ctrl_init(display);
if (rc) {
pr_err("[%s] failed to setup DSI controller, rc=%d\n",
- display->name, rc);
+ display->name, rc);
goto error_phy_disable;
}
rc = dsi_display_ctrl_link_clk_on(display);
if (rc) {
pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
- display->name, rc);
+ display->name, rc);
goto error_ctrl_deinit;
}
rc = dsi_display_ctrl_host_enable(display);
if (rc) {
pr_err("[%s] failed to enable DSI host, rc=%d\n",
- display->name, rc);
+ display->name, rc);
goto error_ctrl_link_off;
}
@@ -2519,11 +2573,10 @@ int dsi_display_prepare(struct dsi_display *display)
rc = dsi_panel_prepare(display->panel[j]);
if (rc) {
SDE_ERROR("[%s] panel prepare failed, rc=%d\n",
- display->name, rc);
+ display->name, rc);
goto error_panel_unprep;
}
}
-
goto error;
error_panel_unprep:
@@ -2559,6 +2612,12 @@ int dsi_display_enable(struct dsi_display *display)
return -EINVAL;
}
+ if (display->cont_splash_enabled) {
+ _dsi_display_config_ctrl_for_splash(display);
+ display->cont_splash_enabled = false;
+ return 0;
+ }
+
mutex_lock(&display->display_lock);
for (i = 0; i < display->panel_count; i++) {
@@ -2755,6 +2814,46 @@ int dsi_display_unprepare(struct dsi_display *display)
return rc;
}
+int dsi_dsiplay_setup_splash_resource(struct dsi_display *display)
+{
+ int ret = 0, i = 0;
+ struct dsi_display_ctrl *ctrl;
+
+ if (!display)
+ return -EINVAL;
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl)
+ return -EINVAL;
+
+ dsi_pwr_enable_regulator(&ctrl->ctrl->pwr_info.host_pwr, true);
+ dsi_pwr_enable_regulator(&ctrl->ctrl->pwr_info.digital, true);
+ dsi_pwr_enable_regulator(&ctrl->phy->pwr_info.phy_pwr, true);
+
+ ret = dsi_clk_enable_core_clks(&ctrl->ctrl->clk_info.core_clks,
+ true);
+ if (ret) {
+ SDE_ERROR("failed to set core clk for dsi, ret = %d\n",
+ ret);
+ return -EINVAL;
+ }
+
+ ret = dsi_clk_enable_link_clks(&ctrl->ctrl->clk_info.link_clks,
+ true);
+ if (ret) {
+ SDE_ERROR("failed to set link clk for dsi, ret = %d\n",
+ ret);
+ return -EINVAL;
+ }
+
+ dsi_ctrl_update_power_state(ctrl->ctrl,
+ DSI_CTRL_POWER_LINK_CLK_ON);
+ }
+
+ return ret;
+}
+
static int __init dsi_display_register(void)
{
dsi_phy_drv_register();
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 210b8d00850b..3723f19fd0e7 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -158,6 +158,8 @@ struct dsi_display {
/* DEBUG FS */
struct dentry *root;
+
+ bool cont_splash_enabled;
};
int dsi_display_dev_probe(struct platform_device *pdev);
@@ -338,4 +340,15 @@ int dsi_display_clock_gate(struct dsi_display *display, bool enable);
int dsi_dispaly_static_frame(struct dsi_display *display, bool enable);
int dsi_display_set_backlight(void *display, u32 bl_lvl);
+
+/**
+ * dsi_dsiplay_setup_splash_resource
+ * @display: Handle to display.
+ *
+ * Setup DSI splash resource to avoid reset and glitch if DSI is enabled
+ * in bootloder.
+ *
+ * Return: error code.
+ */
+int dsi_dsiplay_setup_splash_resource(struct dsi_display *display);
#endif /* _DSI_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index 1ccbbe7df573..da3b3b548e5f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -721,9 +721,10 @@ error:
* Return: error code.
*/
int dsi_phy_enable(struct msm_dsi_phy *phy,
- struct dsi_host_config *config,
- enum dsi_phy_pll_source pll_source,
- bool skip_validation)
+ struct dsi_host_config *config,
+ enum dsi_phy_pll_source pll_source,
+ bool skip_validation,
+ bool cont_splash_enabled)
{
int rc = 0;
@@ -758,7 +759,8 @@ int dsi_phy_enable(struct msm_dsi_phy *phy,
goto error_disable_clks;
}
- dsi_phy_enable_hw(phy);
+ if (!cont_splash_enabled)
+ dsi_phy_enable_hw(phy);
error_disable_clks:
rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, false);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
index 6c31bfa3ea00..aa21d0b347e8 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -157,9 +157,10 @@ int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable);
* Return: error code.
*/
int dsi_phy_enable(struct msm_dsi_phy *dsi_phy,
- struct dsi_host_config *config,
- enum dsi_phy_pll_source pll_source,
- bool skip_validation);
+ struct dsi_host_config *config,
+ enum dsi_phy_pll_source pll_source,
+ bool skip_validation,
+ bool cont_splash_enabled);
/**
* dsi_phy_disable() - disable DSI PHY hardware.
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
index 21b89663a9c3..46cc521a09f3 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -1311,7 +1311,7 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi)
}
}
- if (!sde_kms->splash_info.handoff) {
+ if (!sde_hdmi->cont_splash_enabled) {
sde_hdmi_set_mode(hdmi, false);
_sde_hdmi_phy_reset(hdmi);
sde_hdmi_set_mode(hdmi, true);
@@ -1353,23 +1353,28 @@ int sde_hdmi_core_enable(struct sde_hdmi *sde_hdmi)
struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
- int i, ret;
+ int i, ret = 0;
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->hpd_regs[i]);
if (ret) {
SDE_ERROR("failed to enable hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
+ goto err_regulator_enable;
}
}
ret = pinctrl_pm_select_default_state(dev);
- if (ret)
+ if (ret) {
SDE_ERROR("pinctrl state chg failed: %d\n", ret);
+ goto err_pinctrl_state;
+ }
ret = _sde_hdmi_gpio_config(hdmi, true);
- if (ret)
+ if (ret) {
SDE_ERROR("failed to configure GPIOs: %d\n", ret);
+ goto err_gpio_config;
+ }
for (i = 0; i < config->hpd_clk_cnt; i++) {
if (config->hpd_freq && config->hpd_freq[i]) {
@@ -1384,13 +1389,23 @@ int sde_hdmi_core_enable(struct sde_hdmi *sde_hdmi)
if (ret) {
SDE_ERROR("failed to enable hpd clk: %s (%d)\n",
config->hpd_clk_names[i], ret);
+ goto err_clk_prepare_enable;
}
}
sde_hdmi_set_mode(hdmi, true);
+ goto exit;
- /* Wait for vsync */
- msleep(20);
-
+err_clk_prepare_enable:
+ for (i = 0; i < config->hpd_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->hpd_clks[i]);
+err_gpio_config:
+ _sde_hdmi_gpio_config(hdmi, false);
+err_pinctrl_state:
+ pinctrl_pm_select_sleep_state(dev);
+err_regulator_enable:
+ for (i = 0; i < config->hpd_reg_cnt; i++)
+ regulator_disable(hdmi->hpd_regs[i]);
+exit:
return ret;
}
@@ -3186,7 +3201,6 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
struct msm_drm_private *priv = NULL;
struct hdmi *hdmi;
struct platform_device *pdev;
- struct sde_kms *sde_kms;
DBG("");
if (!display || !display->drm_dev || !enc) {
@@ -3252,12 +3266,9 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
* clocks. This can skip the clock disabling operation in
* clock_late_init when finding clk.count == 1.
*/
- sde_kms = to_sde_kms(priv->kms);
- if (sde_kms->splash_info.handoff) {
+ if (display->cont_splash_enabled) {
sde_hdmi_bridge_power_on(hdmi->bridge);
hdmi->power_on = true;
- } else {
- hdmi->power_on = false;
}
mutex_unlock(&display->display_lock);
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
index 607d2cf3c7b7..9cf807e829c7 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -196,6 +196,8 @@ struct sde_hdmi {
struct dss_io_data io[HDMI_TX_MAX_IO];
/* DEBUG FS */
struct dentry *root;
+
+ bool cont_splash_enabled;
};
/**
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
index 8b399e02ec0c..bae6b1c84420 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -125,20 +125,20 @@ static void sde_hdmi_clear_hdr_info(struct drm_bridge *bridge)
connector->hdr_supported = false;
}
-static void _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
+static int _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
- int i, ret;
+ int i, ret = 0;
struct sde_hdmi *display = sde_hdmi_bridge->display;
if ((display->non_pluggable) && (!hdmi->power_on)) {
ret = sde_hdmi_core_enable(display);
- if (ret)
+ if (ret) {
SDE_ERROR("failed to enable HDMI core (%d)\n", ret);
- else
- hdmi->power_on = true;
+ goto err_core_enable;
+ }
}
for (i = 0; i < config->pwr_reg_cnt; i++) {
@@ -146,15 +146,17 @@ static void _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
if (ret) {
SDE_ERROR("failed to enable pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
+ goto err_regulator_enable;
}
}
- if (config->pwr_clk_cnt > 0) {
+ if (config->pwr_clk_cnt > 0 && hdmi->pixclock) {
DRM_DEBUG("pixclock: %lu", hdmi->pixclock);
ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
if (ret) {
- SDE_ERROR("failed to set pixel clk: %s (%d)\n",
- config->pwr_clk_names[0], ret);
+ pr_warn("failed to set pixclock: %s %ld (%d)\n",
+ config->pwr_clk_names[0],
+ hdmi->pixclock, ret);
}
}
@@ -163,17 +165,31 @@ static void _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
if (ret) {
SDE_ERROR("failed to enable pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
+ goto err_prepare_enable;
}
}
+ goto exit;
+
+err_prepare_enable:
+ for (i = 0; i < config->pwr_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->pwr_clks[i]);
+err_regulator_enable:
+ for (i = 0; i < config->pwr_reg_cnt; i++)
+ regulator_disable(hdmi->pwr_regs[i]);
+err_core_enable:
+ if (display->non_pluggable)
+ sde_hdmi_core_disable(display);
+exit:
+ return ret;
}
-static void _sde_hdmi_bridge_power_off(struct drm_bridge *bridge)
+static int _sde_hdmi_bridge_power_off(struct drm_bridge *bridge)
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct sde_hdmi *display = sde_hdmi_bridge->display;
- int i, ret;
+ int i, ret = 0;
/* Wait for vsync */
msleep(20);
@@ -183,14 +199,15 @@ static void _sde_hdmi_bridge_power_off(struct drm_bridge *bridge)
for (i = 0; i < config->pwr_reg_cnt; i++) {
ret = regulator_disable(hdmi->pwr_regs[i]);
- if (ret) {
+ if (ret)
SDE_ERROR("failed to disable pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
- }
}
if (display->non_pluggable)
sde_hdmi_core_disable(display);
+
+ return ret;
}
static int _sde_hdmi_bridge_ddc_clear_irq(struct hdmi *hdmi,
@@ -489,9 +506,15 @@ static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
DRM_DEBUG("power up");
if (!hdmi->power_on) {
- _sde_hdmi_bridge_power_on(bridge);
+ if (_sde_hdmi_bridge_power_on(bridge)) {
+ DEV_ERR("failed to power on bridge\n");
+ return;
+ }
+ hdmi->power_on = true;
}
+ _sde_hdmi_bridge_setup_scrambler(hdmi, &display->mode);
+
if (phy)
phy->funcs->powerup(phy, hdmi->pixclock);
@@ -830,10 +853,6 @@ static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
mode = adjusted_mode;
- if (display->non_pluggable && !hdmi->power_on)
- if (sde_hdmi_core_enable(display))
- pr_err("mode set enable core failed\n");
-
display->dc_enable = mode->private_flags &
(MSM_MODE_FLAG_RGB444_DC_ENABLE |
MSM_MODE_FLAG_YUV420_DC_ENABLE);
@@ -908,10 +927,7 @@ static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
}
_sde_hdmi_save_mode(hdmi, mode);
- _sde_hdmi_bridge_setup_scrambler(hdmi, mode);
_sde_hdmi_bridge_setup_deep_color(hdmi);
- if (display->non_pluggable && !hdmi->power_on)
- sde_hdmi_core_disable(display);
}
static bool _sde_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index e8dfd1f08236..f74a682c5f04 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -572,8 +572,6 @@ void sde_connector_complete_commit(struct drm_connector *connector)
{
struct drm_device *dev;
struct msm_drm_private *priv;
- struct sde_connector *c_conn;
- struct sde_kms *sde_kms;
if (!connector) {
SDE_ERROR("invalid connector\n");
@@ -582,7 +580,6 @@ void sde_connector_complete_commit(struct drm_connector *connector)
dev = connector->dev;
priv = dev->dev_private;
- sde_kms = to_sde_kms(priv->kms);
/* signal connector's retire fence */
sde_fence_signal(&to_sde_connector(connector)->retire_fence, 0);
@@ -590,14 +587,8 @@ void sde_connector_complete_commit(struct drm_connector *connector)
/* after first vsync comes,
* early splash resource should start to be released.
*/
- if (sde_splash_get_lk_complete_status(&sde_kms->splash_info)) {
- c_conn = to_sde_connector(connector);
-
- sde_splash_clean_up_free_resource(priv->kms,
- &priv->phandle,
- c_conn->connector_type,
- c_conn->display);
- }
+ if (sde_splash_get_lk_complete_status(priv->kms))
+ sde_splash_free_resource(priv->kms, &priv->phandle);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index ea3f138ee461..6ad1ce16c20a 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -39,10 +39,10 @@
#include "sde_trace.h"
/* default input fence timeout, in ms */
-#define SDE_CRTC_INPUT_FENCE_TIMEOUT 2000
+#define SDE_CRTC_INPUT_FENCE_TIMEOUT 10000
/*
- * The default input fence timeout is 2 seconds while max allowed
+ * The default input fence timeout is 10 seconds while max allowed
* range is 10 seconds. Any value above 10 seconds adds glitches beyond
* tolerance limit.
*/
@@ -304,6 +304,8 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
struct sde_crtc_mixer *mixer = sde_crtc->mixers;
struct sde_hw_ctl *ctl;
struct sde_hw_mixer *lm;
+ struct sde_splash_info *sinfo;
+ struct sde_kms *sde_kms = _sde_crtc_get_kms(crtc);
int i;
@@ -314,6 +316,17 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
return;
}
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ }
+
+ sinfo = &sde_kms->splash_info;
+ if (!sinfo) {
+ SDE_ERROR("invalid splash info\n");
+ return;
+ }
+
for (i = 0; i < sde_crtc->num_mixers; i++) {
if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
SDE_ERROR("invalid lm or ctl assigned to mixer\n");
@@ -323,7 +336,10 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
mixer[i].flush_mask = 0;
if (mixer[i].hw_ctl->ops.clear_all_blendstages)
mixer[i].hw_ctl->ops.clear_all_blendstages(
- mixer[i].hw_ctl);
+ mixer[i].hw_ctl,
+ sinfo->handoff,
+ sinfo->reserved_pipe_info,
+ MAX_BLOCKS);
}
/* initialize stage cfg */
@@ -350,7 +366,8 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
mixer[i].flush_mask);
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
- &sde_crtc->stage_cfg, i);
+ &sde_crtc->stage_cfg, i,
+ sinfo->handoff, sinfo->reserved_pipe_info, MAX_BLOCKS);
}
}
@@ -1625,8 +1642,18 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion);
sde_kms_info_add_keyint(info, "max_linewidth",
catalog->max_mixer_width);
- sde_kms_info_add_keyint(info, "max_blendstages",
- catalog->max_mixer_blendstages);
+
+ /* till now, we can't know which display early RVC will run on.
+ * Not to impact early RVC's layer, we decrease all lm's blend stage.
+ * This should be restored after handoff is done.
+ */
+ if (sde_kms->splash_info.handoff)
+ sde_kms_info_add_keyint(info, "max_blendstages",
+ catalog->max_mixer_blendstages - 1);
+ else
+ sde_kms_info_add_keyint(info, "max_blendstages",
+ catalog->max_mixer_blendstages);
+
if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED2)
sde_kms_info_add_keystr(info, "qseed_type", "qseed2");
if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED3)
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index ed9a6ea37397..9e0bf09bff0a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1205,7 +1205,7 @@ static int sde_mixer_parse_dt(struct device_node *np,
if (!prop_exists[MIXER_LEN])
mixer->len = DEFAULT_SDE_HW_BLOCK_LEN;
- if (lm_pair_mask[i])
+ if ((i < ARRAY_SIZE(lm_pair_mask)) && lm_pair_mask[i])
mixer->lm_pair_mask = 1 << lm_pair_mask[i];
sblk->maxblendstages = max_blendstages;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 46e2a13cecc4..341738f624db 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -258,6 +258,35 @@ static inline int sde_hw_ctl_get_bitmask_cdm(struct sde_hw_ctl *ctx,
return 0;
}
+static inline int sde_hw_ctl_get_splash_mixercfg(const u32 *resv_pipes,
+ u32 length)
+{
+ int i = 0;
+ u32 mixercfg = 0;
+
+ for (i = 0; i < length; i++) {
+ /* LK's splash VIG layer always stays on top */
+ switch (resv_pipes[i]) {
+ case SSPP_VIG0:
+ mixercfg |= 0x7 << 0;
+ break;
+ case SSPP_VIG1:
+ mixercfg |= 0x7 << 3;
+ break;
+ case SSPP_VIG2:
+ mixercfg |= 0x7 << 6;
+ break;
+ case SSPP_VIG3:
+ mixercfg |= 0x7 << 26;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return mixercfg;
+}
+
static u32 sde_hw_ctl_poll_reset_status(struct sde_hw_ctl *ctx, u32 count)
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
@@ -312,15 +341,29 @@ static int sde_hw_ctl_wait_reset_status(struct sde_hw_ctl *ctx)
return 0;
}
-static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx)
+static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx,
+ bool handoff, const u32 *resv_pipes, u32 resv_pipes_length)
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
int i;
for (i = 0; i < ctx->mixer_count; i++) {
int mixer_id = ctx->mixer_hw_caps[i].id;
+ u32 mixercfg = 0;
+
+ /*
+ * if bootloaer still has early RVC running, mixer status
+ * can't be direcly cleared.
+ */
+ if (handoff) {
+ mixercfg =
+ sde_hw_ctl_get_splash_mixercfg(resv_pipes,
+ resv_pipes_length);
+
+ mixercfg &= SDE_REG_READ(c, CTL_LAYER(mixer_id));
+ }
- SDE_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
+ SDE_REG_WRITE(c, CTL_LAYER(mixer_id), mixercfg);
SDE_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
SDE_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
SDE_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
@@ -328,7 +371,8 @@ static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx)
}
static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
- enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg, u32 index)
+ enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg, u32 index,
+ bool handoff, const u32 *resv_pipes, u32 resv_pipes_length)
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
u32 mixercfg, mixercfg_ext, mix, ext, mixercfg_ext2;
@@ -353,6 +397,20 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
mixercfg_ext = 0;
mixercfg_ext2 = 0;
+ /*
+ * if bootloader still have RVC running, its mixer stauts
+ * should be updated to kernel's mixer setup.
+ */
+ if (handoff) {
+ mixercfg =
+ sde_hw_ctl_get_splash_mixercfg(resv_pipes,
+ resv_pipes_length);
+
+ mixercfg &= SDE_REG_READ(c, CTL_LAYER(lm));
+ mixercfg |= BIT(24);
+ stages--;
+ }
+
for (i = 0; i <= stages; i++) {
/* overflow to ext register if 'i + 1 > 7' */
mix = (i + 1) & 0x7;
@@ -458,6 +516,38 @@ static void sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx,
SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
}
+static inline u32 sde_hw_ctl_read_ctl_top_for_splash(struct sde_hw_ctl *ctx)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 ctl_top;
+
+ if (!ctx) {
+ pr_err("Invalid ctx\n");
+ return 0;
+ }
+
+ c = &ctx->hw;
+ ctl_top = SDE_REG_READ(c, CTL_TOP);
+ return ctl_top;
+}
+
+static inline u32 sde_hw_ctl_read_ctl_layers_for_splash(struct sde_hw_ctl *ctx,
+ int index)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 ctl_top;
+
+ if (!ctx) {
+ pr_err("Invalid ctx\n");
+ return 0;
+ }
+
+ c = &ctx->hw;
+ ctl_top = SDE_REG_READ(c, CTL_LAYER(index));
+
+ return ctl_top;
+}
+
static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
unsigned long cap)
{
@@ -478,6 +568,8 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
ops->get_bitmask_intf = sde_hw_ctl_get_bitmask_intf;
ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm;
ops->get_bitmask_wb = sde_hw_ctl_get_bitmask_wb;
+ ops->read_ctl_top_for_splash = sde_hw_ctl_read_ctl_top_for_splash;
+ ops->read_ctl_layers_for_splash = sde_hw_ctl_read_ctl_layers_for_splash;
};
struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index 74dbde92639a..a008ecf4a11d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -146,17 +146,40 @@ struct sde_hw_ctl_ops {
/**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer
+ * @handoff : handoff flag
+ * @resv_pipes : reserved pipes in DT
+ * @resv_pipes_length: array size of array reserved_pipes
*/
- void (*clear_all_blendstages)(struct sde_hw_ctl *ctx);
+ void (*clear_all_blendstages)(struct sde_hw_ctl *ctx,
+ bool handoff, const u32 *resv_pipes, u32 resv_pipes_length);
/**
* Configure layer mixer to pipe configuration
* @ctx : ctl path ctx pointer
* @lm : layer mixer enumeration
* @cfg : blend stage configuration
+ * @handoff : handoff flag
+ * @resv_pipes : reserved pipes in DT
+ * @resv_pipes_length: array size of array reserved_pipes
*/
void (*setup_blendstage)(struct sde_hw_ctl *ctx,
- enum sde_lm lm, struct sde_hw_stage_cfg *cfg, u32 index);
+ enum sde_lm lm, struct sde_hw_stage_cfg *cfg, u32 index,
+ bool handoff, const u32 *resv_pipes, u32 resv_pipes_length);
+
+ /**
+ * read CTL_TOP register value for splash case
+ * @ctx : ctl path ctx pointer
+ * @Return : CTL top register value
+ */
+ u32 (*read_ctl_top_for_splash)(struct sde_hw_ctl *ctx);
+
+ /**
+ * read CTL layers register value for splash case
+ * @ctx : ctl path ctx pointer
+ * @index : layer index for this ctl path
+ * @Return : CTL layers register value
+ */
+ u32 (*read_ctl_layers_for_splash)(struct sde_hw_ctl *ctx, int index);
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index a94de553c855..1da8b5b4ff10 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -343,8 +343,9 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
struct drm_device *dev = sde_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
- if (sde_kms->splash_info.handoff)
- sde_splash_clean_up_exit_lk(kms);
+ if (sde_kms->splash_info.handoff &&
+ sde_kms->splash_info.display_splash_enabled)
+ sde_splash_lk_stop_splash(kms);
else
sde_power_resource_enable(&priv->phandle,
sde_kms->core_client, true);
@@ -639,6 +640,15 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
continue;
}
+ rc = sde_splash_setup_display_resource(&sde_kms->splash_info,
+ display, DRM_MODE_CONNECTOR_DSI);
+ if (rc) {
+ SDE_ERROR("dsi %d splash resource setup failed %d\n",
+ i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
rc = dsi_display_drm_bridge_init(display, encoder);
if (rc) {
SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
@@ -731,6 +741,15 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
continue;
}
+ rc = sde_splash_setup_display_resource(&sde_kms->splash_info,
+ display, DRM_MODE_CONNECTOR_HDMIA);
+ if (rc) {
+ SDE_ERROR("hdmi %d splash resource setup failed %d\n",
+ i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
rc = sde_hdmi_drm_init(display, encoder);
if (rc) {
SDE_ERROR("hdmi drm %d init failed, %d\n", i, rc);
@@ -812,6 +831,7 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
struct msm_drm_private *priv;
struct sde_mdss_cfg *catalog;
+ struct sde_splash_info *sinfo;
int primary_planes_idx, i, ret;
int max_crtc_count, max_plane_count;
@@ -824,6 +844,7 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
dev = sde_kms->dev;
priv = dev->dev_private;
catalog = sde_kms->catalog;
+ sinfo = &sde_kms->splash_info;
ret = sde_core_irq_domain_add(sde_kms);
if (ret)
@@ -851,7 +872,7 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
primary = false;
plane = sde_plane_init(dev, catalog->vp[i].id,
- primary, 1UL << crtc_id, true);
+ primary, 1UL << crtc_id, true, false);
if (IS_ERR(plane)) {
SDE_ERROR("sde_plane_init failed\n");
ret = PTR_ERR(plane);
@@ -869,14 +890,22 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
for (i = 0; i < max_plane_count; i++) {
bool primary = true;
+ bool resv_plane = false;
if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
|| primary_planes_idx >= max_crtc_count)
primary = false;
+ if (sde_splash_query_plane_is_reserved(sinfo,
+ catalog->sspp[i].id)) {
+ resv_plane = true;
+ DRM_INFO("pipe%d is reserved\n",
+ catalog->sspp[i].id);
+ }
+
plane = sde_plane_init(dev, catalog->sspp[i].id,
primary, (1UL << max_crtc_count) - 1,
- false);
+ false, resv_plane);
if (IS_ERR(plane)) {
SDE_ERROR("sde_plane_init failed\n");
ret = PTR_ERR(plane);
@@ -1337,12 +1366,17 @@ static int sde_kms_hw_init(struct msm_kms *kms)
*/
sinfo = &sde_kms->splash_info;
if (sinfo->handoff) {
- rc = sde_splash_parse_dt(dev);
+ rc = sde_splash_parse_memory_dt(dev);
if (rc) {
- SDE_ERROR("parse dt for splash info failed: %d\n", rc);
+ SDE_ERROR("parse memory dt failed: %d\n", rc);
goto power_error;
}
+ rc = sde_splash_parse_reserved_plane_dt(sinfo,
+ sde_kms->catalog);
+ if (rc)
+ SDE_ERROR("parse reserved plane dt failed: %d\n", rc);
+
sde_splash_init(&priv->phandle, kms);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index acd5687f6d11..ceac5a931e7e 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1798,7 +1798,7 @@ static void sde_plane_atomic_update(struct drm_plane *plane,
/* helper to install properties which are common to planes and crtcs */
static void _sde_plane_install_properties(struct drm_plane *plane,
- struct sde_mdss_cfg *catalog)
+ struct sde_mdss_cfg *catalog, bool plane_reserved)
{
static const struct drm_prop_enum_list e_blend_op[] = {
{SDE_DRM_BLEND_OP_NOT_DEFINED, "not_defined"},
@@ -1994,6 +1994,16 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
sde_kms_info_add_keyint(info, "max_downscale", maxdwnscale);
sde_kms_info_add_keyint(info, "max_horizontal_deci", maxhdeciexp);
sde_kms_info_add_keyint(info, "max_vertical_deci", maxvdeciexp);
+
+ /* When early RVC is enabled in bootloader and doesn't exit,
+ * user app should not touch the pipe which RVC is on.
+ * So mark the plane_unavailibility to the special pipe's property,
+ * user can parse this property of this pipe and stop this pipe's
+ * allocation after parsing.
+ * plane_reserved is 1, means the pipe is occupied in bootloader.
+ * plane_reserved is 0, means it's not used in bootloader.
+ */
+ sde_kms_info_add_keyint(info, "plane_unavailability", plane_reserved);
msm_property_set_blob(&psde->property_info, &psde->blob_info,
info->data, info->len, PLANE_PROP_INFO);
@@ -2731,7 +2741,8 @@ end:
/* initialize plane */
struct drm_plane *sde_plane_init(struct drm_device *dev,
uint32_t pipe, bool primary_plane,
- unsigned long possible_crtcs, bool vp_enabled)
+ unsigned long possible_crtcs,
+ bool vp_enabled, bool plane_reserved)
{
struct drm_plane *plane = NULL;
struct sde_plane *psde;
@@ -2856,7 +2867,7 @@ struct drm_plane *sde_plane_init(struct drm_device *dev,
PLANE_PROP_COUNT, PLANE_PROP_BLOBCOUNT,
sizeof(struct sde_plane_state));
- _sde_plane_install_properties(plane, kms->catalog);
+ _sde_plane_install_properties(plane, kms->catalog, plane_reserved);
/* save user friendly pipe name for later */
snprintf(psde->pipe_name, SDE_NAME_SIZE, "plane%u", plane->base.id);
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 7b91822d4cde..8ac582643926 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -77,10 +77,12 @@ void sde_plane_flush(struct drm_plane *plane);
* @primary_plane: true if this pipe is primary plane for crtc
* @possible_crtcs: bitmask of crtc that can be attached to the given pipe
* @vp_enabled: Flag indicating if virtual planes enabled
+ * @plane_reserved: Flag indicating the plane is occupied in bootloader
*/
struct drm_plane *sde_plane_init(struct drm_device *dev,
uint32_t pipe, bool primary_plane,
- unsigned long possible_crtcs, bool vp_enabled);
+ unsigned long possible_crtcs,
+ bool vp_enabled, bool plane_reserved);
/**
* sde_plane_wait_input_fence - wait for input fence object
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index de0551b22d2e..6055dc861c72 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
#include "sde_encoder.h"
#include "sde_connector.h"
#include "sde_hw_sspp.h"
+#include "sde_splash.h"
#define RESERVED_BY_OTHER(h, r) \
((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
@@ -417,6 +418,8 @@ int sde_rm_init(struct sde_rm *rm,
mutex_init(&rm->rm_lock);
+ rm->dev = dev;
+
INIT_LIST_HEAD(&rm->rsvps);
for (type = 0; type < SDE_HW_BLK_MAX; type++)
INIT_LIST_HEAD(&rm->hw_blks[type]);
@@ -652,7 +655,8 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
static int _sde_rm_reserve_lms(
struct sde_rm *rm,
struct sde_rm_rsvp *rsvp,
- struct sde_rm_requirements *reqs)
+ struct sde_rm_requirements *reqs,
+ uint32_t prefer_lm_id)
{
struct sde_rm_hw_blk *lm[MAX_BLOCKS];
@@ -678,6 +682,10 @@ static int _sde_rm_reserve_lms(
lm_count = 0;
lm[lm_count] = iter_i.blk;
+ /* find the matched lm id */
+ if ((prefer_lm_id > 0) && (iter_i.blk->id != prefer_lm_id))
+ continue;
+
if (!_sde_rm_check_lm_and_get_connected_blks(rm, rsvp, reqs,
lm[lm_count], &dspp[lm_count], &pp[lm_count],
NULL))
@@ -699,6 +707,7 @@ static int _sde_rm_reserve_lms(
continue;
lm[lm_count] = iter_j.blk;
+
++lm_count;
}
}
@@ -747,7 +756,8 @@ static int _sde_rm_reserve_lms(
static int _sde_rm_reserve_ctls(
struct sde_rm *rm,
struct sde_rm_rsvp *rsvp,
- struct sde_rm_requirements *reqs)
+ struct sde_rm_requirements *reqs,
+ uint32_t prefer_ctl_id)
{
struct sde_rm_hw_blk *ctls[MAX_BLOCKS];
struct sde_rm_hw_iter iter;
@@ -769,6 +779,14 @@ static int _sde_rm_reserve_ctls(
SDE_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, caps);
+ /* early return when finding the matched ctl id */
+ if ((prefer_ctl_id > 0) && (iter.blk->id == prefer_ctl_id)) {
+ ctls[i] = iter.blk;
+
+ if (++i == reqs->num_ctl)
+ break;
+ }
+
if (reqs->needs_split_display != has_split_display)
continue;
@@ -928,10 +946,10 @@ static int _sde_rm_make_next_rsvp(
* - Check mixers without DSPPs
* - Only then allow to grab from mixers with DSPP capability
*/
- ret = _sde_rm_reserve_lms(rm, rsvp, reqs);
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs, 0);
if (ret && !RM_RQ_DSPP(reqs)) {
reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
- ret = _sde_rm_reserve_lms(rm, rsvp, reqs);
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs, 0);
}
if (ret) {
@@ -944,10 +962,10 @@ static int _sde_rm_make_next_rsvp(
* - Check mixers without Split Display
* - Only then allow to grab from CTLs with split display capability
*/
- _sde_rm_reserve_ctls(rm, rsvp, reqs);
+ _sde_rm_reserve_ctls(rm, rsvp, reqs, 0);
if (ret && !reqs->needs_split_display) {
reqs->needs_split_display = true;
- _sde_rm_reserve_ctls(rm, rsvp, reqs);
+ _sde_rm_reserve_ctls(rm, rsvp, reqs, 0);
}
if (ret) {
SDE_ERROR("unable to find appropriate CTL\n");
@@ -962,6 +980,109 @@ static int _sde_rm_make_next_rsvp(
return ret;
}
+static int _sde_rm_make_next_rsvp_for_splash(
+ struct sde_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct sde_rm_rsvp *rsvp,
+ struct sde_rm_requirements *reqs)
+{
+ int ret;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct sde_splash_info *sinfo;
+ int i;
+ int intf_id = INTF_0;
+ u32 prefer_lm_id = 0;
+ u32 prefer_ctl_id = 0;
+
+ if (!enc->dev || !enc->dev->dev_private) {
+ SDE_ERROR("drm device invalid\n");
+ return -EINVAL;
+ }
+
+ priv = enc->dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+ sinfo = &sde_kms->splash_info;
+
+ /* Get the intf id first, and reserve the same lk and ctl
+ * in bootloader for kernel resource manager
+ */
+ for (i = 0; i < ARRAY_SIZE(reqs->hw_res.intfs); i++) {
+ if (reqs->hw_res.intfs[i] == INTF_MODE_NONE)
+ continue;
+ intf_id = i + INTF_0;
+ break;
+ }
+
+ /* get preferred lm id and ctl id */
+ for (i = 0; i < CTL_MAX - 1; i++) {
+ if (sinfo->res.top[i].intf_sel != intf_id)
+ continue;
+
+ prefer_lm_id = sinfo->res.top[i].lm[0].lm_id;
+ prefer_ctl_id = sinfo->res.top[i].lm[0].ctl_id;
+ break;
+ }
+
+ SDE_DEBUG("intf_id %d, prefer lm_id %d, ctl_id %d\n",
+ intf_id, prefer_lm_id, prefer_ctl_id);
+
+ /* Create reservation info, tag reserved blocks with it as we go */
+ rsvp->seq = ++rm->rsvp_next_seq;
+ rsvp->enc_id = enc->base.id;
+ rsvp->topology = reqs->top_name;
+ list_add_tail(&rsvp->list, &rm->rsvps);
+
+ /*
+ * Assign LMs and blocks whose usage is tied to them: DSPP & Pingpong.
+ * Do assignment preferring to give away low-resource mixers first:
+ * - Check mixers without DSPPs
+ * - Only then allow to grab from mixers with DSPP capability
+ */
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs, prefer_lm_id);
+ if (ret && !RM_RQ_DSPP(reqs)) {
+ reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs, prefer_lm_id);
+ }
+
+ if (ret) {
+ SDE_ERROR("unable to find appropriate mixers\n");
+ return ret;
+ }
+
+ /*
+ * Do assignment preferring to give away low-resource CTLs first:
+ * - Check mixers without Split Display
+ * - Only then allow to grab from CTLs with split display capability
+ */
+ for (i = 0; i < sinfo->res.ctl_top_cnt; i++)
+ SDE_DEBUG("splash_info ctl_ids[%d] = %d\n",
+ i, sinfo->res.ctl_ids[i]);
+
+ ret = _sde_rm_reserve_ctls(rm, rsvp, reqs, prefer_ctl_id);
+ if (ret && !reqs->needs_split_display) {
+ reqs->needs_split_display = true;
+ _sde_rm_reserve_ctls(rm, rsvp, reqs, prefer_ctl_id);
+ }
+
+ if (ret) {
+ SDE_ERROR("unable to find appropriate CTL\n");
+ return ret;
+ }
+
+ /* Assign INTFs, WBs, and blks whose usage is tied to them: CTL & CDM */
+ ret = _sde_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+
+ return ret;
+}
+
static int _sde_rm_populate_requirements(
struct sde_rm *rm,
struct drm_encoder *enc,
@@ -1253,6 +1374,8 @@ int sde_rm_reserve(
{
struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
struct sde_rm_requirements reqs;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
int ret;
if (!rm || !enc || !crtc_state || !conn_state) {
@@ -1260,6 +1383,19 @@ int sde_rm_reserve(
return -EINVAL;
}
+ if (!enc->dev || !enc->dev->dev_private) {
+ SDE_ERROR("invalid drm device\n");
+ return -EINVAL;
+ }
+
+ priv = enc->dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invald kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+
/* Check if this is just a page-flip */
if (!drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
@@ -1318,8 +1454,13 @@ int sde_rm_reserve(
}
/* Check the proposed reservation, store it in hw's "next" field */
- ret = _sde_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
- rsvp_nxt, &reqs);
+ if (sde_kms->splash_info.handoff) {
+ SDE_DEBUG("Reserve resource for splash\n");
+ ret = _sde_rm_make_next_rsvp_for_splash
+ (rm, enc, crtc_state, conn_state, rsvp_nxt, &reqs);
+ } else
+ ret = _sde_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+ rsvp_nxt, &reqs);
_sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_RSVPNEXT);
@@ -1352,3 +1493,92 @@ end:
return ret;
}
+
+static int _sde_rm_get_ctl_lm_for_splash(struct sde_hw_ctl *ctl,
+ int max_lm_cnt, u8 lm_cnt, u8 *lm_ids,
+ struct splash_ctl_top *top, int index)
+{
+ int j;
+ struct splash_lm_hw *lm;
+
+ if (!ctl || !top) {
+ SDE_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ lm = top->lm;
+ for (j = 0; j < max_lm_cnt; j++) {
+ lm[top->ctl_lm_cnt].lm_reg_value =
+ ctl->ops.read_ctl_layers_for_splash(ctl, j + LM_0);
+
+ if (lm[top->ctl_lm_cnt].lm_reg_value) {
+ lm[top->ctl_lm_cnt].ctl_id = index + CTL_0;
+ lm_ids[lm_cnt++] = j + LM_0;
+ lm[top->ctl_lm_cnt].lm_id = j + LM_0;
+ top->ctl_lm_cnt++;
+ }
+ }
+
+ return top->ctl_lm_cnt;
+}
+
+static void _sde_rm_get_ctl_top_for_splash(struct sde_hw_ctl *ctl,
+ struct splash_ctl_top *top)
+{
+ if (!ctl || !top) {
+ SDE_ERROR("invalid ctl or top\n");
+ return;
+ }
+
+ if (!ctl->ops.read_ctl_top_for_splash) {
+ SDE_ERROR("read_ctl_top not initialized\n");
+ return;
+ }
+
+ top->value = ctl->ops.read_ctl_top_for_splash(ctl);
+ top->intf_sel = (top->value >> 4) & 0xf;
+}
+
+int sde_rm_read_resource_for_splash(struct sde_rm *rm,
+ void *splash_info,
+ struct sde_mdss_cfg *cat)
+{
+ struct sde_rm_hw_iter ctl_iter;
+ int index = 0;
+ struct sde_splash_info *sinfo;
+ struct sde_hw_ctl *ctl;
+
+ if (!rm || !splash_info || !cat)
+ return -EINVAL;
+
+ sinfo = (struct sde_splash_info *)splash_info;
+
+ sde_rm_init_hw_iter(&ctl_iter, 0, SDE_HW_BLK_CTL);
+
+ while (_sde_rm_get_hw_locked(rm, &ctl_iter)) {
+ ctl = (struct sde_hw_ctl *)ctl_iter.hw;
+
+ _sde_rm_get_ctl_top_for_splash(ctl,
+ &sinfo->res.top[index]);
+
+ if (sinfo->res.top[index].intf_sel) {
+ sinfo->res.lm_cnt +=
+ _sde_rm_get_ctl_lm_for_splash(ctl,
+ cat->mixer_count,
+ sinfo->res.lm_cnt,
+ sinfo->res.lm_ids,
+ &sinfo->res.top[index], index);
+
+ sinfo->res.ctl_ids[sinfo->res.ctl_top_cnt] =
+ index + CTL_0;
+
+ sinfo->res.ctl_top_cnt++;
+ }
+ index++;
+ }
+
+ SDE_DEBUG("%s: ctl_top_cnt=%d, lm_cnt=%d\n", __func__,
+ sinfo->res.ctl_top_cnt, sinfo->res.lm_cnt);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 87e95bfebe98..bec398a3b996 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -212,4 +212,14 @@ int sde_rm_check_property_topctl(uint64_t val);
*/
int sde_rm_check_property_topctl(uint64_t val);
+/**
+ * sde_rm_read_resource_for_splash - read splash resource used in bootloader
+ * @rm: SDE Resource Manager handle
+ * @sinfo: handle for splash info
+ * @cat: Pointer to hardware catalog
+ */
+int sde_rm_read_resource_for_splash(struct sde_rm *rm,
+ void *sinfo,
+ struct sde_mdss_cfg *cat);
+
#endif /* __SDE_RM_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.c b/drivers/gpu/drm/msm/sde/sde_splash.c
index 2789ae053663..f124bd7d5904 100644
--- a/drivers/gpu/drm/msm/sde/sde_splash.c
+++ b/drivers/gpu/drm/msm/sde/sde_splash.c
@@ -22,7 +22,9 @@
#include "sde_hw_util.h"
#include "sde_hw_intf.h"
#include "sde_hw_catalog.h"
+#include "sde_rm.h"
#include "dsi_display.h"
+#include "sde_hdmi.h"
#define MDP_SSPP_TOP0_OFF 0x1000
#define DISP_INTF_SEL 0x004
@@ -34,10 +36,12 @@
#define SCRATCH_REGISTER_2 0x01C
#define SDE_LK_RUNNING_VALUE 0xC001CAFE
-#define SDE_LK_SHUT_DOWN_VALUE 0xDEADDEAD
+#define SDE_LK_STOP_SPLASH_VALUE 0xDEADDEAD
#define SDE_LK_EXIT_VALUE 0xDEADBEEF
-#define SDE_LK_EXIT_MAX_LOOP 20
+#define INTF_HDMI_SEL (BIT(25) | BIT(24))
+#define INTF_DSI0_SEL BIT(8)
+#define INTF_DSI1_SEL BIT(16)
static DEFINE_MUTEX(sde_splash_lock);
@@ -184,26 +188,14 @@ static bool _sde_splash_lk_check(struct sde_hw_intr *intr)
}
/**
- * _sde_splash_notify_lk_to_exit.
+ * _sde_splash_notify_lk_stop_splash.
*
- * Function to monitor LK's status and tell it to exit.
+ * Function to stop early splash in LK.
*/
-static void _sde_splash_notify_lk_exit(struct sde_hw_intr *intr)
+static inline void _sde_splash_notify_lk_stop_splash(struct sde_hw_intr *intr)
{
- int i = 0;
-
- /* first is to write exit signal to scratch register*/
- SDE_REG_WRITE(&intr->hw, SCRATCH_REGISTER_1, SDE_LK_SHUT_DOWN_VALUE);
-
- while ((SDE_LK_EXIT_VALUE !=
- SDE_REG_READ(&intr->hw, SCRATCH_REGISTER_1)) &&
- (++i < SDE_LK_EXIT_MAX_LOOP)) {
- DRM_INFO("wait for LK's exit");
- msleep(20);
- }
-
- if (i == SDE_LK_EXIT_MAX_LOOP)
- SDE_ERROR("Loop LK's exit failed\n");
+ /* write splash stop signal to scratch register*/
+ SDE_REG_WRITE(&intr->hw, SCRATCH_REGISTER_1, SDE_LK_STOP_SPLASH_VALUE);
}
static int _sde_splash_gem_new(struct drm_device *dev,
@@ -283,31 +275,66 @@ static void _sde_splash_destroy_splash_node(struct sde_splash_info *sinfo)
sinfo->splash_mem_size = NULL;
}
-static void _sde_splash_get_connector_ref_cnt(struct sde_splash_info *sinfo,
- u32 *hdmi_cnt, u32 *dsi_cnt)
+static void _sde_splash_sent_pipe_update_uevent(struct sde_kms *sde_kms)
{
- mutex_lock(&sde_splash_lock);
- *hdmi_cnt = sinfo->hdmi_connector_cnt;
- *dsi_cnt = sinfo->dsi_connector_cnt;
- mutex_unlock(&sde_splash_lock);
+ char *event_string;
+ char *envp[2];
+ struct drm_device *dev;
+ struct device *kdev;
+ int i = 0;
+
+ if (!sde_kms || !sde_kms->dev) {
+ DRM_ERROR("invalid input\n");
+ return;
+ }
+
+ dev = sde_kms->dev;
+ kdev = dev->primary->kdev;
+
+ event_string = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!event_string) {
+ SDE_ERROR("failed to allocate event string\n");
+ return;
+ }
+
+ for (i = 0; i < MAX_BLOCKS; i++) {
+ if (sde_kms->splash_info.reserved_pipe_info[i] != 0xFFFFFFFF)
+ snprintf(event_string, SZ_4K, "pipe%d avialable",
+ sde_kms->splash_info.reserved_pipe_info[i]);
+ }
+
+ DRM_INFO("generating pipe update event[%s]", event_string);
+
+ envp[0] = event_string;
+ envp[1] = NULL;
+
+ kobject_uevent_env(&kdev->kobj, KOBJ_CHANGE, envp);
+
+ kfree(event_string);
}
-static int _sde_splash_free_resource(struct msm_mmu *mmu,
- struct sde_splash_info *sinfo, enum splash_connector_type conn)
+static int _sde_splash_free_module_resource(struct msm_mmu *mmu,
+ struct sde_splash_info *sinfo)
{
- struct msm_gem_object *msm_obj = to_msm_bo(sinfo->obj[conn]);
+ int i = 0;
+ struct msm_gem_object *msm_obj;
- if (!msm_obj)
- return -EINVAL;
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ msm_obj = to_msm_bo(sinfo->obj[i]);
- if (mmu->funcs && mmu->funcs->unmap)
- mmu->funcs->early_splash_unmap(mmu,
- sinfo->splash_mem_paddr[conn], msm_obj->sgt);
+ if (!msm_obj)
+ return -EINVAL;
- _sde_splash_free_bootup_memory_to_system(sinfo->splash_mem_paddr[conn],
- sinfo->splash_mem_size[conn]);
+ if (mmu->funcs && mmu->funcs->unmap)
+ mmu->funcs->early_splash_unmap(mmu,
+ sinfo->splash_mem_paddr[i], msm_obj->sgt);
- _sde_splash_destroy_gem_object(msm_obj);
+ _sde_splash_free_bootup_memory_to_system(
+ sinfo->splash_mem_paddr[i],
+ sinfo->splash_mem_size[i]);
+
+ _sde_splash_destroy_gem_object(msm_obj);
+ }
return 0;
}
@@ -316,6 +343,7 @@ __ref int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms)
{
struct sde_kms *sde_kms;
struct sde_splash_info *sinfo;
+ int ret = 0;
int i = 0;
if (!phandle || !kms) {
@@ -329,22 +357,27 @@ __ref int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms)
sinfo->dsi_connector_cnt = 0;
sinfo->hdmi_connector_cnt = 0;
+ /* Vote data bus after splash is enabled in bootloader */
sde_power_data_bus_bandwidth_ctrl(phandle,
sde_kms->core_client, true);
for (i = 0; i < sinfo->splash_mem_num; i++) {
if (!memblock_is_reserved(sinfo->splash_mem_paddr[i])) {
- SDE_ERROR("failed to reserve memory\n");
+ SDE_ERROR("LK's splash memory is not reserved\n");
/* withdraw the vote when failed. */
sde_power_data_bus_bandwidth_ctrl(phandle,
sde_kms->core_client, false);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
}
- return 0;
+ ret = sde_rm_read_resource_for_splash(&sde_kms->rm,
+ (void *)sinfo, sde_kms->catalog);
+
+ return ret;
}
void sde_splash_destroy(struct sde_splash_info *sinfo,
@@ -372,12 +405,12 @@ void sde_splash_destroy(struct sde_splash_info *sinfo,
}
/*
- * sde_splash_parse_dt.
+ * sde_splash_parse_memory_dt.
* In the function, it will parse and reserve two kinds of memory node.
* First is to get the reserved memory for display buffers.
- * Second is to get the memory node LK's code stack is running on.
+ * Second is to get the memory node which LK's heap memory is running on.
*/
-int sde_splash_parse_dt(struct drm_device *dev)
+int sde_splash_parse_memory_dt(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct sde_kms *sde_kms;
@@ -404,6 +437,79 @@ int sde_splash_parse_dt(struct drm_device *dev)
return 0;
}
+static inline u32 _sde_splash_parse_sspp_id(struct sde_mdss_cfg *cfg,
+ const char *name)
+{
+ int i;
+
+ for (i = 0; i < cfg->sspp_count; i++) {
+ if (!strcmp(cfg->sspp[i].name, name))
+ return cfg->sspp[i].id;
+ }
+
+ return 0;
+}
+
+int sde_splash_parse_reserved_plane_dt(struct sde_splash_info *splash_info,
+ struct sde_mdss_cfg *cfg)
+{
+ struct device_node *parent, *node;
+ struct property *prop;
+ const char *cname;
+ int ret = 0, i = 0;
+
+ if (!splash_info || !cfg)
+ return -EINVAL;
+
+ parent = of_find_node_by_path("/qcom,sde-reserved-plane");
+ if (!parent)
+ return -EINVAL;
+
+ for (i = 0; i < MAX_BLOCKS; i++)
+ splash_info->reserved_pipe_info[i] = 0xFFFFFFFF;
+
+ i = 0;
+ for_each_child_of_node(parent, node) {
+ if (i >= MAX_BLOCKS) {
+ SDE_ERROR("num of nodes(%d) is bigger than max(%d)\n",
+ i, MAX_BLOCKS);
+ ret = -EINVAL;
+ goto parent_node_err;
+ }
+
+ of_property_for_each_string(node, "qcom,plane-name",
+ prop, cname)
+ splash_info->reserved_pipe_info[i] =
+ _sde_splash_parse_sspp_id(cfg, cname);
+ i++;
+ }
+
+parent_node_err:
+ of_node_put(parent);
+
+ return ret;
+}
+
+bool sde_splash_query_plane_is_reserved(struct sde_splash_info *sinfo,
+ uint32_t pipe)
+{
+ int i = 0;
+
+ if (!sinfo)
+ return false;
+
+ /* early return if no splash is enabled */
+ if (!sinfo->handoff)
+ return false;
+
+ for (i = 0; i < MAX_BLOCKS; i++) {
+ if (sinfo->reserved_pipe_info[i] == pipe)
+ return true;
+ }
+
+ return false;
+}
+
int sde_splash_get_handoff_status(struct msm_kms *kms)
{
uint32_t intf_sel = 0;
@@ -448,17 +554,20 @@ int sde_splash_get_handoff_status(struct msm_kms *kms)
* considered as single display. So decrement
* 'num_of_display_on' by 1
*/
- if (split_display)
+ if (split_display) {
num_of_display_on--;
+ sinfo->split_is_enabled = true;
+ }
}
if (num_of_display_on) {
sinfo->handoff = true;
- sinfo->program_scratch_regs = true;
+ sinfo->display_splash_enabled = true;
sinfo->lk_is_exited = false;
+ sinfo->intf_sel_status = intf_sel;
} else {
sinfo->handoff = false;
- sinfo->program_scratch_regs = false;
+ sinfo->display_splash_enabled = false;
sinfo->lk_is_exited = true;
}
@@ -504,6 +613,71 @@ int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
return ret ? 0 : -ENOMEM;
}
+static bool _sde_splash_get_panel_intf_status(struct sde_splash_info *sinfo,
+ const char *display_name, int connector_type)
+{
+ bool ret = false;
+ int intf_status = 0;
+
+ if (sinfo && sinfo->handoff) {
+ if (connector_type == DRM_MODE_CONNECTOR_DSI) {
+ if (!strcmp(display_name, "dsi_adv_7533_1")) {
+ if (sinfo->intf_sel_status & INTF_DSI0_SEL)
+ ret = true;
+ } else if (!strcmp(display_name, "dsi_adv_7533_2")) {
+ if (sinfo->intf_sel_status & INTF_DSI1_SEL)
+ ret = true;
+ } else
+ DRM_INFO("wrong display name %s\n",
+ display_name);
+ } else if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ intf_status = sinfo->intf_sel_status & INTF_HDMI_SEL;
+ ret = (intf_status == INTF_HDMI_SEL);
+ }
+ }
+
+ return ret;
+}
+
+int sde_splash_setup_display_resource(struct sde_splash_info *sinfo,
+ void *disp, int connector_type)
+{
+ if (!sinfo || !disp)
+ return -EINVAL;
+
+ /* early return if splash is not enabled in bootloader */
+ if (!sinfo->handoff)
+ return 0;
+
+ if (connector_type == DRM_MODE_CONNECTOR_DSI) {
+ struct dsi_display *display = (struct dsi_display *)disp;
+
+ display->cont_splash_enabled =
+ _sde_splash_get_panel_intf_status(sinfo,
+ display->name,
+ connector_type);
+
+ DRM_INFO("DSI splash %s\n",
+ display->cont_splash_enabled ? "enabled" : "disabled");
+
+ if (display->cont_splash_enabled) {
+ if (dsi_dsiplay_setup_splash_resource(display))
+ return -EINVAL;
+ }
+ } else if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)disp;
+
+ sde_hdmi->cont_splash_enabled =
+ _sde_splash_get_panel_intf_status(sinfo,
+ NULL, connector_type);
+
+ DRM_INFO("HDMI splash %s\n",
+ sde_hdmi->cont_splash_enabled ? "enabled" : "disabled");
+ }
+
+ return 0;
+}
+
void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
int connector_type)
{
@@ -519,29 +693,34 @@ void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
}
}
-bool sde_splash_get_lk_complete_status(struct sde_splash_info *sinfo)
+bool sde_splash_get_lk_complete_status(struct msm_kms *kms)
{
- bool ret = 0;
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct sde_hw_intr *intr;
- mutex_lock(&sde_splash_lock);
- ret = !sinfo->handoff && !sinfo->lk_is_exited;
- mutex_unlock(&sde_splash_lock);
+ if (!sde_kms || !sde_kms->hw_intr) {
+ SDE_ERROR("invalid kms\n");
+ return false;
+ }
- return ret;
+ intr = sde_kms->hw_intr;
+
+ if (sde_kms->splash_info.handoff &&
+ SDE_LK_EXIT_VALUE == SDE_REG_READ(&intr->hw,
+ SCRATCH_REGISTER_1)) {
+ SDE_DEBUG("LK totoally exits\n");
+ return true;
+ }
+
+ return false;
}
-int sde_splash_clean_up_free_resource(struct msm_kms *kms,
- struct sde_power_handle *phandle,
- int connector_type, void *display)
+int sde_splash_free_resource(struct msm_kms *kms,
+ struct sde_power_handle *phandle)
{
struct sde_kms *sde_kms;
struct sde_splash_info *sinfo;
struct msm_mmu *mmu;
- struct dsi_display *dsi_display = display;
- int ret = 0;
- int hdmi_conn_count = 0;
- int dsi_conn_count = 0;
- static const char *last_commit_display_type = "unknown";
if (!phandle || !kms) {
SDE_ERROR("invalid phandle/kms.\n");
@@ -555,86 +734,49 @@ int sde_splash_clean_up_free_resource(struct msm_kms *kms,
return -EINVAL;
}
- _sde_splash_get_connector_ref_cnt(sinfo, &hdmi_conn_count,
- &dsi_conn_count);
-
mutex_lock(&sde_splash_lock);
- if (hdmi_conn_count == 0 && dsi_conn_count == 0 &&
- !sinfo->lk_is_exited) {
- /* When both hdmi's and dsi's handoff are finished,
- * 1. Destroy splash node objects.
- * 2. Release the memory which LK's stack is running on.
- * 3. Withdraw AHB data bus bandwidth voting.
- */
- DRM_INFO("HDMI and DSI resource handoff is completed\n");
-
- sinfo->lk_is_exited = true;
-
- _sde_splash_destroy_splash_node(sinfo);
-
- _sde_splash_free_bootup_memory_to_system(sinfo->lk_pool_paddr,
- sinfo->lk_pool_size);
-
- sde_power_data_bus_bandwidth_ctrl(phandle,
- sde_kms->core_client, false);
-
+ if (!sinfo->handoff) {
mutex_unlock(&sde_splash_lock);
return 0;
}
mmu = sde_kms->aspace[0]->mmu;
+ if (!mmu) {
+ mutex_unlock(&sde_splash_lock);
+ return -EINVAL;
+ }
- switch (connector_type) {
- case DRM_MODE_CONNECTOR_HDMIA:
- if (sinfo->hdmi_connector_cnt == 1) {
- sinfo->hdmi_connector_cnt--;
+ /* free HDMI's, DSI's and early camera's reserved memory */
+ _sde_splash_free_module_resource(mmu, sinfo);
- ret = _sde_splash_free_resource(mmu,
- sinfo, SPLASH_HDMI);
- }
- break;
- case DRM_MODE_CONNECTOR_DSI:
- /*
- * Basically, we have commits coming on two DSI connectors.
- * So when releasing DSI resource, it's ensured that the
- * coming commits should happen on different DSIs, to promise
- * the handoff has finished on the two DSIs, then it's safe
- * to release DSI resource, otherwise, problem happens when
- * freeing memory, while DSI0 or DSI1 is still visiting
- * the memory.
- */
- if (strcmp(dsi_display->display_type, "unknown") &&
- strcmp(last_commit_display_type,
- dsi_display->display_type)) {
- if (sinfo->dsi_connector_cnt > 1)
- sinfo->dsi_connector_cnt--;
- else if (sinfo->dsi_connector_cnt == 1) {
- ret = _sde_splash_free_resource(mmu,
- sinfo, SPLASH_DSI);
-
- sinfo->dsi_connector_cnt--;
- }
+ _sde_splash_destroy_splash_node(sinfo);
- last_commit_display_type = dsi_display->display_type;
- }
- break;
- default:
- ret = -EINVAL;
- SDE_ERROR("%s: invalid connector_type %d\n",
- __func__, connector_type);
- }
+ /* free lk_pool heap memory */
+ _sde_splash_free_bootup_memory_to_system(sinfo->lk_pool_paddr,
+ sinfo->lk_pool_size);
- mutex_unlock(&sde_splash_lock);
+ /* withdraw data bus vote */
+ sde_power_data_bus_bandwidth_ctrl(phandle,
+ sde_kms->core_client, false);
- return ret;
+ /* send uevent to notify user to recycle resource */
+ _sde_splash_sent_pipe_update_uevent(sde_kms);
+
+ /* Finally mark handoff flag to false to say handoff is complete */
+ sinfo->handoff = false;
+
+ DRM_INFO("HDMI and DSI resource handoff is completed\n");
+
+ mutex_unlock(&sde_splash_lock);
+ return 0;
}
/*
* In below function, it will
- * 1. Notify LK to exit and wait for exiting is done.
+ * 1. Notify LK to stop display splash.
* 2. Set DOMAIN_ATTR_EARLY_MAP to 1 to enable stage 1 translation in iommu.
*/
-int sde_splash_clean_up_exit_lk(struct msm_kms *kms)
+int sde_splash_lk_stop_splash(struct msm_kms *kms)
{
struct sde_splash_info *sinfo;
struct msm_mmu *mmu;
@@ -650,12 +792,11 @@ int sde_splash_clean_up_exit_lk(struct msm_kms *kms)
/* Monitor LK's status and tell it to exit. */
mutex_lock(&sde_splash_lock);
- if (sinfo->program_scratch_regs) {
+ if (sinfo->display_splash_enabled) {
if (_sde_splash_lk_check(sde_kms->hw_intr))
- _sde_splash_notify_lk_exit(sde_kms->hw_intr);
+ _sde_splash_notify_lk_stop_splash(sde_kms->hw_intr);
- sinfo->handoff = false;
- sinfo->program_scratch_regs = false;
+ sinfo->display_splash_enabled = false;
}
mutex_unlock(&sde_splash_lock);
@@ -672,7 +813,8 @@ int sde_splash_clean_up_exit_lk(struct msm_kms *kms)
*/
if (mmu->funcs && mmu->funcs->set_property) {
ret = mmu->funcs->set_property(mmu,
- DOMAIN_ATTR_EARLY_MAP, &sinfo->handoff);
+ DOMAIN_ATTR_EARLY_MAP,
+ &sinfo->display_splash_enabled);
if (ret)
SDE_ERROR("set_property failed\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.h b/drivers/gpu/drm/msm/sde/sde_splash.h
index babf88335e49..2fd8ba03112f 100644
--- a/drivers/gpu/drm/msm/sde/sde_splash.h
+++ b/drivers/gpu/drm/msm/sde/sde_splash.h
@@ -1,5 +1,5 @@
/**
- * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,18 +15,46 @@
#include "msm_kms.h"
#include "msm_mmu.h"
+#include "sde_hw_mdss.h"
+
+#define SPLASH_CTL_MAX 5
+#define SPLASH_LM_MAX 7
enum splash_connector_type {
SPLASH_DSI = 0,
SPLASH_HDMI,
};
+struct splash_lm_hw {
+ u8 lm_id;
+ u8 ctl_id;
+ u32 lm_reg_value;
+};
+
+struct splash_ctl_top {
+ u32 value;
+ u8 intf_sel;
+ u8 ctl_lm_cnt;
+ struct splash_lm_hw lm[SPLASH_LM_MAX];
+};
+
+struct sde_res_data {
+ struct splash_ctl_top top[SPLASH_CTL_MAX];
+ u8 ctl_ids[SPLASH_CTL_MAX];
+ u8 lm_ids[SPLASH_LM_MAX];
+ u8 ctl_top_cnt;
+ u8 lm_cnt;
+};
+
struct sde_splash_info {
/* handoff flag */
bool handoff;
- /* flag of display scratch registers */
- bool program_scratch_regs;
+ /* current hw configuration */
+ struct sde_res_data res;
+
+ /* flag of display splash status */
+ bool display_splash_enabled;
/* to indicate LK is totally exited */
bool lk_is_exited;
@@ -49,11 +77,20 @@ struct sde_splash_info {
/* memory size of lk pool */
size_t lk_pool_size;
+ /* enabled statue of displays*/
+ uint32_t intf_sel_status;
+
+ /* DSI split enabled flag */
+ bool split_is_enabled;
+
/* registered hdmi connector count */
uint32_t hdmi_connector_cnt;
/* registered dst connector count */
uint32_t dsi_connector_cnt;
+
+ /* reserved pipe info for early RVC */
+ uint32_t reserved_pipe_info[MAX_BLOCKS];
};
/* APIs for early splash handoff functions */
@@ -82,28 +119,43 @@ void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
int connector_type);
/**
- * sde_splash_clean_up_exit_lk.
+ * sde_splash_lk_stop_splash.
*
- * Tell LK to exit, and clean up the resource.
+ * Tell LK to stop display splash.
*/
-int sde_splash_clean_up_exit_lk(struct msm_kms *kms);
+int sde_splash_lk_stop_splash(struct msm_kms *kms);
/**
- * sde_splash_clean_up_free_resource.
+ * sde_splash_free_resource.
*
* According to input connector_type, free
* HDMI's and DSI's resource respectively.
*/
-int sde_splash_clean_up_free_resource(struct msm_kms *kms,
- struct sde_power_handle *phandle,
- int connector_type, void *display);
+int sde_splash_free_resource(struct msm_kms *kms,
+ struct sde_power_handle *phandle);
/**
- * sde_splash_parse_dt.
+ * sde_splash_parse_memory_dt.
*
* Parse reserved memory block from DT for early splash.
*/
-int sde_splash_parse_dt(struct drm_device *dev);
+int sde_splash_parse_memory_dt(struct drm_device *dev);
+
+/**
+ * sde_splash_parse_reserved_plane_dt
+ *
+ * Parse reserved plane information from DT for early RVC case.
+ */
+int sde_splash_parse_reserved_plane_dt(struct sde_splash_info *splash_info,
+ struct sde_mdss_cfg *cfg);
+
+/*
+ * sde_splash_query_plane_is_reserved
+ *
+ * Query plane is reserved in dt.
+ */
+bool sde_splash_query_plane_is_reserved(struct sde_splash_info *sinfo,
+ uint32_t pipe);
/**
* sde_splash_smmu_map.
@@ -127,6 +179,13 @@ void sde_splash_destroy(struct sde_splash_info *sinfo,
*
* Get LK's status to check if it has been stopped.
*/
-bool sde_splash_get_lk_complete_status(struct sde_splash_info *sinfo);
+bool sde_splash_get_lk_complete_status(struct msm_kms *kms);
+/**
+ * sde_splash_setup_display_resource
+ *
+ * Setup display resource based on connector type.
+ */
+int sde_splash_setup_display_resource(struct sde_splash_info *sinfo,
+ void *disp, int connector_type);
#endif
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index c886950e5212..93c4c1e27b0d 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -1643,6 +1643,11 @@ static ssize_t sde_evtlog_dump_read(struct file *file, char __user *buff,
len = sde_evtlog_dump_to_buffer(sde_dbg_base.evtlog, evtlog_buf,
SDE_EVTLOG_BUF_MAX, true);
+ if (len < 0 || len > count) {
+ pr_err("len is more than user buffer size");
+ return 0;
+ }
+
if (copy_to_user(buff, evtlog_buf, len))
return -EFAULT;
*ppos += len;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 7ed08fdc4c42..393e5335e33b 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -158,7 +158,7 @@ static void evict_entry(struct drm_gem_object *obj,
size_t size = PAGE_SIZE * n;
loff_t off = mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT);
- const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+ const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
if (m > 1) {
int i;
@@ -415,7 +415,7 @@ static int fault_2d(struct drm_gem_object *obj,
* into account in some of the math, so figure out virtual stride
* in pages
*/
- const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+ const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = ((unsigned long)vmf->virtual_address -
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index fb6ad143873f..83aee9e814ba 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -238,9 +238,10 @@ int radeon_bo_create(struct radeon_device *rdev,
* may be slow
* See https://bugs.freedesktop.org/show_bug.cgi?id=88758
*/
-
+#ifndef CONFIG_COMPILE_TEST
#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
thanks to write-combining
+#endif
if (bo->flags & RADEON_GEM_GTT_WC)
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index d9007cc37be1..892d0a71d766 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -5964,9 +5964,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
{
u32 lane_width;
u32 new_lane_width =
- (radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
u32 current_lane_width =
- (radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
if (new_lane_width != current_lane_width) {
radeon_set_pcie_lanes(rdev, new_lane_width);
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5a0f8a745b9d..52436b3c01bb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -324,7 +324,7 @@ retry:
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
if (ret == -ENOSPC) {
spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
spin_lock(&vgdev->ctrlq.qlock);
goto retry;
} else {
@@ -399,7 +399,7 @@ retry:
ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
if (ret == -ENOSPC) {
spin_unlock(&vgdev->cursorq.qlock);
- wait_event(vgdev->cursorq.ack_queue, vq->num_free);
+ wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
spin_lock(&vgdev->cursorq.qlock);
goto retry;
} else {
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index bbb3b473c185..d1d399cce06a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1331,7 +1331,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
* of implement() working on 8 byte chunks
*/
- int len = hid_report_len(report) + 7;
+ u32 len = hid_report_len(report) + 7;
return kmalloc(len, flags);
}
@@ -1396,7 +1396,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
{
char *buf;
int ret;
- int len;
+ u32 len;
buf = hid_alloc_report_buf(report, GFP_KERNEL);
if (!buf)
@@ -1422,14 +1422,14 @@ out:
}
EXPORT_SYMBOL_GPL(__hid_request);
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt)
{
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
struct hid_driver *hdrv;
unsigned int a;
- int rsize, csize = size;
+ u32 rsize, csize = size;
u8 *cdata = data;
int ret = 0;
@@ -1487,7 +1487,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
*
* This is data entry for lower layers.
*/
-int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt)
+int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
{
struct hid_report_enum *report_enum;
struct hid_driver *hdrv;
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 53e54855c366..8d74e691ac90 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1258,7 +1258,8 @@ static void hidinput_led_worker(struct work_struct *work)
led_work);
struct hid_field *field;
struct hid_report *report;
- int len, ret;
+ int ret;
+ u32 len;
__u8 *buf;
field = hidinput_get_led_field(hid);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f62a9d6601cc..9de379c1b3fd 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -314,7 +314,8 @@ static struct attribute_group mt_attribute_group = {
static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
{
struct mt_device *td = hid_get_drvdata(hdev);
- int ret, size = hid_report_len(report);
+ int ret;
+ u32 size = hid_report_len(report);
u8 *buf;
/*
@@ -919,7 +920,7 @@ static void mt_set_input_mode(struct hid_device *hdev)
struct hid_report_enum *re;
struct mt_class *cls = &td->mtclass;
char *buf;
- int report_len;
+ u32 report_len;
if (td->inputmode < 0)
return;
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 67cd059a8f46..41a4a2af9db1 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -110,8 +110,8 @@ struct rmi_data {
u8 *writeReport;
u8 *readReport;
- int input_report_size;
- int output_report_size;
+ u32 input_report_size;
+ u32 output_report_size;
unsigned long flags;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 9c2d7c23f296..c0c4df198725 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -197,6 +197,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
int ret = 0, len;
unsigned char report_number;
+ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+ ret = -ENODEV;
+ goto out;
+ }
+
dev = hidraw_table[minor]->hid;
if (!dev->ll_driver->raw_request) {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 312aa1e33fb2..4c3ed078c6b9 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -137,10 +137,10 @@ struct i2c_hid {
* register of the HID
* descriptor. */
unsigned int bufsize; /* i2c buffer size */
- char *inbuf; /* Input buffer */
- char *rawbuf; /* Raw Input buffer */
- char *cmdbuf; /* Command buffer */
- char *argsbuf; /* Command arguments buffer */
+ u8 *inbuf; /* Input buffer */
+ u8 *rawbuf; /* Raw Input buffer */
+ u8 *cmdbuf; /* Command buffer */
+ u8 *argsbuf; /* Command arguments buffer */
unsigned long flags; /* device flags */
@@ -387,7 +387,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
static void i2c_hid_get_input(struct i2c_hid *ihid)
{
- int ret, ret_size;
+ int ret;
+ u32 ret_size;
int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
if (size > ihid->bufsize)
@@ -412,7 +413,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
return;
}
- if (ret_size > size) {
+ if ((ret_size > size) || (ret_size <= 2)) {
dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
__func__, size, ret_size);
return;
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index b24f1d3045f0..ac63e562071f 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -94,18 +94,20 @@ enum ina2xx_ids { ina219, ina226 };
struct ina2xx_config {
u16 config_default;
- int calibration_factor;
+ int calibration_value;
int registers;
int shunt_div;
int bus_voltage_shift;
int bus_voltage_lsb; /* uV */
- int power_lsb; /* uW */
+ int power_lsb_factor;
};
struct ina2xx_data {
const struct ina2xx_config *config;
long rshunt;
+ long current_lsb_uA;
+ long power_lsb_uW;
struct mutex config_lock;
struct regmap *regmap;
@@ -115,21 +117,21 @@ struct ina2xx_data {
static const struct ina2xx_config ina2xx_config[] = {
[ina219] = {
.config_default = INA219_CONFIG_DEFAULT,
- .calibration_factor = 40960000,
+ .calibration_value = 4096,
.registers = INA219_REGISTERS,
.shunt_div = 100,
.bus_voltage_shift = 3,
.bus_voltage_lsb = 4000,
- .power_lsb = 20000,
+ .power_lsb_factor = 20,
},
[ina226] = {
.config_default = INA226_CONFIG_DEFAULT,
- .calibration_factor = 5120000,
+ .calibration_value = 2048,
.registers = INA226_REGISTERS,
.shunt_div = 400,
.bus_voltage_shift = 0,
.bus_voltage_lsb = 1250,
- .power_lsb = 25000,
+ .power_lsb_factor = 25,
},
};
@@ -168,12 +170,16 @@ static u16 ina226_interval_to_reg(int interval)
return INA226_SHIFT_AVG(avg_bits);
}
+/*
+ * Calibration register is set to the best value, which eliminates
+ * truncation errors on calculating current register in hardware.
+ * According to datasheet (eq. 3) the best values are 2048 for
+ * ina226 and 4096 for ina219. They are hardcoded as calibration_value.
+ */
static int ina2xx_calibrate(struct ina2xx_data *data)
{
- u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- data->rshunt);
-
- return regmap_write(data->regmap, INA2XX_CALIBRATION, val);
+ return regmap_write(data->regmap, INA2XX_CALIBRATION,
+ data->config->calibration_value);
}
/*
@@ -186,10 +192,6 @@ static int ina2xx_init(struct ina2xx_data *data)
if (ret < 0)
return ret;
- /*
- * Set current LSB to 1mA, shunt is in uOhms
- * (equation 13 in datasheet).
- */
return ina2xx_calibrate(data);
}
@@ -267,15 +269,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_POWER:
- val = regval * data->config->power_lsb;
+ val = regval * data->power_lsb_uW;
break;
case INA2XX_CURRENT:
- /* signed register, LSB=1mA (selected), in mA */
- val = (s16)regval;
+ /* signed register, result in mA */
+ val = regval * data->current_lsb_uA;
+ val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_CALIBRATION:
- val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- regval);
+ val = regval;
break;
default:
/* programmer goofed */
@@ -303,9 +305,32 @@ static ssize_t ina2xx_show_value(struct device *dev,
ina2xx_get_value(data, attr->index, regval));
}
-static ssize_t ina2xx_set_shunt(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+/*
+ * In order to keep calibration register value fixed, the product
+ * of current_lsb and shunt_resistor should also be fixed and equal
+ * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order
+ * to keep the scale.
+ */
+static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
+{
+ unsigned int dividend = DIV_ROUND_CLOSEST(1000000000,
+ data->config->shunt_div);
+ if (val <= 0 || val > dividend)
+ return -EINVAL;
+
+ mutex_lock(&data->config_lock);
+ data->rshunt = val;
+ data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val);
+ data->power_lsb_uW = data->config->power_lsb_factor *
+ data->current_lsb_uA;
+ mutex_unlock(&data->config_lock);
+
+ return 0;
+}
+
+static ssize_t ina2xx_store_shunt(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
{
unsigned long val;
int status;
@@ -315,18 +340,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev,
if (status < 0)
return status;
- if (val == 0 ||
- /* Values greater than the calibration factor make no sense. */
- val > data->config->calibration_factor)
- return -EINVAL;
-
- mutex_lock(&data->config_lock);
- data->rshunt = val;
- status = ina2xx_calibrate(data);
- mutex_unlock(&data->config_lock);
+ status = ina2xx_set_shunt(data, val);
if (status < 0)
return status;
-
return count;
}
@@ -386,7 +402,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
/* shunt resistance */
static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
- ina2xx_show_value, ina2xx_set_shunt,
+ ina2xx_show_value, ina2xx_store_shunt,
INA2XX_CALIBRATION);
/* update interval (ina226 only) */
@@ -431,6 +447,7 @@ static int ina2xx_probe(struct i2c_client *client,
/* set the device type */
data->config = &ina2xx_config[id->driver_data];
+ mutex_init(&data->config_lock);
if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
@@ -441,10 +458,7 @@ static int ina2xx_probe(struct i2c_client *client,
val = INA2XX_RSHUNT_DEFAULT;
}
- if (val <= 0 || val > data->config->calibration_factor)
- return -ENODEV;
-
- data->rshunt = val;
+ ina2xx_set_shunt(data, val);
ina2xx_regmap_config.max_register = data->config->registers;
@@ -460,8 +474,6 @@ static int ina2xx_probe(struct i2c_client *client,
return -ENODEV;
}
- mutex_init(&data->config_lock);
-
data->groups[group++] = &ina2xx_group;
if (id->driver_data == ina226)
data->groups[group++] = &ina226_group;
diff --git a/drivers/i2c/busses/i2c-msm-v2.c b/drivers/i2c/busses/i2c-msm-v2.c
index c0d962212720..67261bc10e80 100644
--- a/drivers/i2c/busses/i2c-msm-v2.c
+++ b/drivers/i2c/busses/i2c-msm-v2.c
@@ -2848,8 +2848,8 @@ static void i2c_msm_pm_rt_init(struct device *dev) {}
static const struct dev_pm_ops i2c_msm_pm_ops = {
#ifdef CONFIG_PM_SLEEP
- .suspend_noirq = i2c_msm_pm_sys_suspend_noirq,
- .resume_noirq = i2c_msm_pm_sys_resume_noirq,
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(i2c_msm_pm_sys_suspend_noirq,
+ i2c_msm_pm_sys_resume_noirq)
#endif
SET_RUNTIME_PM_OPS(i2c_msm_pm_rt_suspend,
i2c_msm_pm_rt_resume,
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index c73c6c62a6ac..7401f102dff4 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -121,10 +121,21 @@ static int hi8435_write_event_config(struct iio_dev *idev,
enum iio_event_direction dir, int state)
{
struct hi8435_priv *priv = iio_priv(idev);
+ int ret;
+ u32 tmp;
+
+ if (state) {
+ ret = hi8435_readl(priv, HI8435_SO31_0_REG, &tmp);
+ if (ret < 0)
+ return ret;
+ if (tmp & BIT(chan->channel))
+ priv->event_prev_val |= BIT(chan->channel);
+ else
+ priv->event_prev_val &= ~BIT(chan->channel);
- priv->event_scan_mask &= ~BIT(chan->channel);
- if (state)
priv->event_scan_mask |= BIT(chan->channel);
+ } else
+ priv->event_scan_mask &= ~BIT(chan->channel);
return 0;
}
@@ -442,13 +453,15 @@ static int hi8435_probe(struct spi_device *spi)
priv->spi = spi;
reset_gpio = devm_gpiod_get(&spi->dev, NULL, GPIOD_OUT_LOW);
- if (IS_ERR(reset_gpio)) {
- /* chip s/w reset if h/w reset failed */
+ if (!IS_ERR(reset_gpio)) {
+ /* need >=100ns low pulse to reset chip */
+ gpiod_set_raw_value_cansleep(reset_gpio, 0);
+ udelay(1);
+ gpiod_set_raw_value_cansleep(reset_gpio, 1);
+ } else {
+ /* s/w reset chip if h/w reset is not available */
hi8435_writeb(priv, HI8435_CTRL_REG, HI8435_CTRL_SRST);
hi8435_writeb(priv, HI8435_CTRL_REG, 0);
- } else {
- udelay(5);
- gpiod_set_value(reset_gpio, 1);
}
spi_set_drvdata(spi, idev);
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index 6325e7dc8e03..f3cb4dc05391 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -48,8 +48,6 @@ static int st_magn_spi_remove(struct spi_device *spi)
}
static const struct spi_device_id st_magn_id_table[] = {
- { LSM303DLHC_MAGN_DEV_NAME },
- { LSM303DLM_MAGN_DEV_NAME },
{ LIS3MDL_MAGN_DEV_NAME },
{ LSM303AGR_MAGN_DEV_NAME },
{},
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 960fcb613198..ea3bc9bb1b7a 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1230,6 +1230,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
if (!optlen)
return -EINVAL;
+ if (!ctx->cm_id->device)
+ return -EINVAL;
+
memset(&sa_path, 0, sizeof(sa_path));
ib_sa_unpack_path(path_data->path_rec, &sa_path);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 9a99cee2665a..4fd2892613dd 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2581,9 +2581,11 @@ static int srp_abort(struct scsi_cmnd *scmnd)
ret = FAST_IO_FAIL;
else
ret = FAILED;
- srp_free_req(ch, req, scmnd, 0);
- scmnd->result = DID_ABORT << 16;
- scmnd->scsi_done(scmnd);
+ if (ret == SUCCESS) {
+ srp_free_req(ch, req, scmnd, 0);
+ scmnd->result = DID_ABORT << 16;
+ scmnd->scsi_done(scmnd);
+ }
return ret;
}
@@ -3309,12 +3311,10 @@ static ssize_t srp_create_target(struct device *dev,
num_online_nodes());
const int ch_end = ((node_idx + 1) * target->ch_count /
num_online_nodes());
- const int cv_start = (node_idx * ibdev->num_comp_vectors /
- num_online_nodes() + target->comp_vector)
- % ibdev->num_comp_vectors;
- const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
- num_online_nodes() + target->comp_vector)
- % ibdev->num_comp_vectors;
+ const int cv_start = node_idx * ibdev->num_comp_vectors /
+ num_online_nodes();
+ const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
+ num_online_nodes();
int cpu_idx = 0;
for_each_online_cpu(cpu) {
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index a73874508c3a..cb3a8623ff54 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2974,12 +2974,8 @@ static void srpt_queue_response(struct se_cmd *cmd)
}
spin_unlock_irqrestore(&ioctx->spinlock, flags);
- if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
- || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
- atomic_inc(&ch->req_lim_delta);
- srpt_abort_cmd(ioctx);
+ if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
return;
- }
dir = ioctx->cmd.data_direction;
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index 930424e55439..251d64ca41ce 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -521,7 +521,7 @@ static int drv260x_probe(struct i2c_client *client,
if (!haptics)
return -ENOMEM;
- haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
+ haptics->overdrive_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
if (pdata) {
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index c9d491bc85e0..3851d5715772 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1082,6 +1082,13 @@ static int elan_probe(struct i2c_client *client,
return error;
}
+ /* Make sure there is something at this address */
+ error = i2c_smbus_read_byte(client);
+ if (error < 0) {
+ dev_dbg(&client->dev, "nothing at this address: %d\n", error);
+ return -ENXIO;
+ }
+
/* Initialize the touchpad. */
error = elan_initialize(data);
if (error)
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index a679e56c44cd..765879dcaf85 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -557,7 +557,14 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client,
long ret;
int error;
int len;
- u8 buffer[ETP_I2C_INF_LENGTH];
+ u8 buffer[ETP_I2C_REPORT_LEN];
+
+ len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN);
+ if (len != ETP_I2C_REPORT_LEN) {
+ error = len < 0 ? len : -EIO;
+ dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n",
+ error, len);
+ }
reinit_completion(completion);
enable_irq(client->irq);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 51b96e9bf793..06ea28e5d7b4 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1715,6 +1715,17 @@ int elantech_init(struct psmouse *psmouse)
etd->samples[0], etd->samples[1], etd->samples[2]);
}
+ if (etd->samples[1] == 0x74 && etd->hw_version == 0x03) {
+ /*
+ * This module has a bug which makes absolute mode
+ * unusable, so let's abort so we'll be using standard
+ * PS/2 protocol.
+ */
+ psmouse_info(psmouse,
+ "absolute mode broken, forcing standard PS/2 protocol\n");
+ goto init_fail;
+ }
+
if (elantech_set_absolute_mode(psmouse)) {
psmouse_err(psmouse,
"failed to put touchpad into absolute mode.\n");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
index 4787f2bcd768..13680130c2de 100644
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
@@ -681,10 +681,12 @@ static enum flash_area fwu_go_nogo(struct image_header_data *header)
goto exit;
}
- while (strptr[index] >= '0' && strptr[index] <= '9') {
+ while ((index < MAX_FIRMWARE_ID_LEN - 1) && strptr[index] >= '0'
+ && strptr[index] <= '9') {
firmware_id[index] = strptr[index];
index++;
}
+ firmware_id[index] = '\0';
retval = sstrtoul(firmware_id, 10, &image_fw_id);
kfree(firmware_id);
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index a7d516f973dd..10068a481e22 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -389,6 +389,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
pasid_max - 1, GFP_KERNEL);
if (ret < 0) {
kfree(svm);
+ kfree(sdev);
goto out;
}
svm->pasid = ret;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 10b73d9bea78..b0b534622734 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -41,6 +41,7 @@
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/arm-gic.h>
+#include <linux/syscore_ops.h>
#include <asm/cputype.h>
#include <asm/irq.h>
@@ -69,6 +70,7 @@ union gic_base {
};
struct gic_chip_data {
+ unsigned int irq_offset;
union gic_base dist_base;
union gic_base cpu_base;
#ifdef CONFIG_CPU_PM
@@ -85,6 +87,10 @@ struct gic_chip_data {
#ifdef CONFIG_GIC_NON_BANKED
void __iomem *(*get_base)(union gic_base *);
#endif
+#ifdef CONFIG_PM
+ unsigned int wakeup_irqs[32];
+ unsigned int enabled_irqs[32];
+#endif
};
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
@@ -222,6 +228,109 @@ static void gic_unmask_irq(struct irq_data *d)
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
+#ifdef CONFIG_PM
+static int gic_suspend_one(struct gic_chip_data *gic)
+{
+ unsigned int i;
+ void __iomem *base = gic_data_dist_base(gic);
+
+ for (i = 0; i * 32 < gic->gic_irqs; i++) {
+ gic->enabled_irqs[i]
+ = readl_relaxed(base + GIC_DIST_ENABLE_SET + i * 4);
+ /* disable all of them */
+ writel_relaxed(0xffffffff,
+ base + GIC_DIST_ENABLE_CLEAR + i * 4);
+ /* enable the wakeup set */
+ writel_relaxed(gic->wakeup_irqs[i],
+ base + GIC_DIST_ENABLE_SET + i * 4);
+ }
+ /* make sure all gic setting finished */
+ mb();
+ return 0;
+}
+
+static int gic_suspend(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_GIC_NR; i++)
+ gic_suspend_one(&gic_data[i]);
+ return 0;
+}
+
+static void gic_show_resume_irq(struct gic_chip_data *gic)
+{
+ unsigned int i;
+ u32 enabled;
+ u32 pending[32];
+ void __iomem *base = gic_data_dist_base(gic);
+
+ raw_spin_lock(&irq_controller_lock);
+ for (i = 0; i * 32 < gic->gic_irqs; i++) {
+ enabled = readl_relaxed(base + GIC_DIST_ENABLE_CLEAR + i * 4);
+ pending[i] = readl_relaxed(base + GIC_DIST_PENDING_SET + i * 4);
+ pending[i] &= enabled;
+ }
+ raw_spin_unlock(&irq_controller_lock);
+
+ for (i = find_first_bit((unsigned long *)pending, gic->gic_irqs);
+ i < gic->gic_irqs;
+ i = find_next_bit((unsigned long *)pending,
+ gic->gic_irqs, i+1)) {
+ unsigned int irq = irq_find_mapping(gic->domain,
+ i + gic->irq_offset);
+ struct irq_desc *desc = irq_to_desc(irq);
+ const char *name = "null";
+
+ if (desc == NULL)
+ name = "stray irq";
+ else if (desc->action && desc->action->name)
+ name = desc->action->name;
+
+ pr_warn("%s: %d triggered %s\n", __func__,
+ i + gic->irq_offset, name);
+ }
+}
+
+static void gic_resume_one(struct gic_chip_data *gic)
+{
+ unsigned int i;
+ void __iomem *base = gic_data_dist_base(gic);
+
+ gic_show_resume_irq(gic);
+ for (i = 0; i * 32 < gic->gic_irqs; i++) {
+ /* disable all of them */
+ writel_relaxed(0xffffffff,
+ base + GIC_DIST_ENABLE_CLEAR + i * 4);
+ /* enable the enabled set */
+ writel_relaxed(gic->enabled_irqs[i],
+ base + GIC_DIST_ENABLE_SET + i * 4);
+ }
+ /* make sure all gic setting finished */
+ mb();
+}
+
+static void gic_resume(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_GIC_NR; i++)
+ gic_resume_one(&gic_data[i]);
+}
+
+static struct syscore_ops gic_syscore_ops = {
+ .suspend = gic_suspend,
+ .resume = gic_resume,
+};
+
+static int __init gic_init_sys(void)
+{
+ register_syscore_ops(&gic_syscore_ops);
+ return 0;
+}
+arch_initcall(gic_init_sys);
+#endif
+
static void gic_eoi_irq(struct irq_data *d)
{
if (gic_arch_extn.irq_eoi) {
@@ -373,6 +482,20 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
static int gic_set_wake(struct irq_data *d, unsigned int on)
{
int ret = -ENXIO;
+ unsigned int reg_offset, bit_offset;
+ unsigned int gicirq = gic_irq(d);
+ struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+
+ /* per-cpu interrupts cannot be wakeup interrupts */
+ WARN_ON(gicirq < 32);
+
+ reg_offset = gicirq / 32;
+ bit_offset = gicirq % 32;
+
+ if (on)
+ gic_data->wakeup_irqs[reg_offset] |= 1 << bit_offset;
+ else
+ gic_data->wakeup_irqs[reg_offset] &= ~(1 << bit_offset);
if (gic_arch_extn.irq_set_wake)
ret = gic_arch_extn.irq_set_wake(d, on);
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 9cb4b621fbc3..b92a19a594a1 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -72,7 +72,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
if (sk->sk_state != MISDN_BOUND)
continue;
if (!cskb)
- cskb = skb_copy(skb, GFP_KERNEL);
+ cskb = skb_copy(skb, GFP_ATOMIC);
if (!cskb) {
printk(KERN_WARNING "%s no skb\n", __func__);
break;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index b775e1efecd3..b9f71a87b7e1 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -281,7 +281,7 @@ static int pca955x_probe(struct i2c_client *client,
"slave address 0x%02x\n",
id->name, chip->bits, client->addr);
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
if (pdata) {
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index a58775953242..c90633b16fad 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -319,7 +319,7 @@ static inline int get_current_reg_code(int target_curr_ma, int ires_ua)
if (!ires_ua || !target_curr_ma || (target_curr_ma < (ires_ua / 1000)))
return 0;
- return DIV_ROUND_UP(target_curr_ma * 1000, ires_ua) - 1;
+ return DIV_ROUND_CLOSEST(target_curr_ma * 1000, ires_ua) - 1;
}
static int qpnp_flash_led_read(struct qpnp_flash_led *led, u16 addr, u8 *data)
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 4d46f2ce606f..aa84fcfd59fc 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -514,15 +514,21 @@ struct open_bucket {
/*
* We keep multiple buckets open for writes, and try to segregate different
- * write streams for better cache utilization: first we look for a bucket where
- * the last write to it was sequential with the current write, and failing that
- * we look for a bucket that was last used by the same task.
+ * write streams for better cache utilization: first we try to segregate flash
+ * only volume write streams from cached devices, secondly we look for a bucket
+ * where the last write to it was sequential with the current write, and
+ * failing that we look for a bucket that was last used by the same task.
*
* The ideas is if you've got multiple tasks pulling data into the cache at the
* same time, you'll get better cache utilization if you try to segregate their
* data and preserve locality.
*
- * For example, say you've starting Firefox at the same time you're copying a
+ * For example, dirty sectors of flash only volume is not reclaimable, if their
+ * dirty sectors mixed with dirty sectors of cached device, such buckets will
+ * be marked as dirty and won't be reclaimed, though the dirty data of cached
+ * device have been written back to backend device.
+ *
+ * And say you've starting Firefox at the same time you're copying a
* bunch of files. Firefox will likely end up being fairly hot and stay in the
* cache awhile, but the data you copied might not be; if you wrote all that
* data to the same buckets it'd get invalidated at the same time.
@@ -539,7 +545,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
struct open_bucket *ret, *ret_task = NULL;
list_for_each_entry_reverse(ret, &c->data_buckets, list)
- if (!bkey_cmp(&ret->key, search))
+ if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
+ UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
+ continue;
+ else if (!bkey_cmp(&ret->key, search))
goto found;
else if (ret->last_write_point == write_point)
ret_task = ret;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 02257cb19c0b..b9a526271f02 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -890,6 +890,12 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_lock(&bch_register_lock);
+ cancel_delayed_work_sync(&dc->writeback_rate_update);
+ if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
+ kthread_stop(dc->writeback_thread);
+ dc->writeback_thread = NULL;
+ }
+
memset(&dc->sb.set_uuid, 0, 16);
SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index e34cf53bd068..ceff074b3b74 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/reboot.h>
+#include <linux/vmalloc.h>
#define DM_MSG_PREFIX "verity"
@@ -32,6 +33,7 @@
#define DM_VERITY_OPT_LOGGING "ignore_corruption"
#define DM_VERITY_OPT_RESTART "restart_on_corruption"
#define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
+#define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
#define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC)
@@ -399,6 +401,18 @@ static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
}
/*
+ * Moves the bio iter one data block forward.
+ */
+static inline void verity_bv_skip_block(struct dm_verity *v,
+ struct dm_verity_io *io,
+ struct bvec_iter *iter)
+{
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
+
+ bio_advance_iter(bio, iter, 1 << v->data_dev_block_bits);
+}
+
+/*
* Verify one "dm_verity_io" structure.
*/
static int verity_verify_io(struct dm_verity_io *io)
@@ -410,9 +424,16 @@ static int verity_verify_io(struct dm_verity_io *io)
for (b = 0; b < io->n_blocks; b++) {
int r;
+ sector_t cur_block = io->block + b;
struct shash_desc *desc = verity_io_hash_desc(v, io);
- r = verity_hash_for_block(v, io, io->block + b,
+ if (v->validated_blocks &&
+ likely(test_bit(cur_block, v->validated_blocks))) {
+ verity_bv_skip_block(v, io, &io->iter);
+ continue;
+ }
+
+ r = verity_hash_for_block(v, io, cur_block,
verity_io_want_digest(v, io),
&is_zero);
if (unlikely(r < 0))
@@ -445,13 +466,16 @@ static int verity_verify_io(struct dm_verity_io *io)
return r;
if (likely(memcmp(verity_io_real_digest(v, io),
- verity_io_want_digest(v, io), v->digest_size) == 0))
+ verity_io_want_digest(v, io), v->digest_size) == 0)) {
+ if (v->validated_blocks)
+ set_bit(cur_block, v->validated_blocks);
continue;
+ }
else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
- io->block + b, NULL, &start) == 0)
+ cur_block, NULL, &start) == 0)
continue;
else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
- io->block + b))
+ cur_block))
return -EIO;
}
@@ -645,6 +669,8 @@ void verity_status(struct dm_target *ti, status_type_t type,
args += DM_VERITY_OPTS_FEC;
if (v->zero_digest)
args++;
+ if (v->validated_blocks)
+ args++;
if (!args)
return;
DMEMIT(" %u", args);
@@ -663,6 +689,8 @@ void verity_status(struct dm_target *ti, status_type_t type,
}
if (v->zero_digest)
DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
+ if (v->validated_blocks)
+ DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE);
sz = verity_fec_status_table(v, sz, result, maxlen);
break;
}
@@ -716,6 +744,7 @@ void verity_dtr(struct dm_target *ti)
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
+ vfree(v->validated_blocks);
kfree(v->salt);
kfree(v->root_digest);
kfree(v->zero_digest);
@@ -737,6 +766,26 @@ void verity_dtr(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(verity_dtr);
+static int verity_alloc_most_once(struct dm_verity *v)
+{
+ struct dm_target *ti = v->ti;
+
+ /* the bitset can only handle INT_MAX blocks */
+ if (v->data_blocks > INT_MAX) {
+ ti->error = "device too large to use check_at_most_once";
+ return -E2BIG;
+ }
+
+ v->validated_blocks = vzalloc(BITS_TO_LONGS(v->data_blocks) *
+ sizeof(unsigned long));
+ if (!v->validated_blocks) {
+ ti->error = "failed to allocate bitset for check_at_most_once";
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int verity_alloc_zero_digest(struct dm_verity *v)
{
int r = -ENOMEM;
@@ -806,6 +855,12 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v)
}
continue;
+ } else if (!strcasecmp(arg_name, DM_VERITY_OPT_AT_MOST_ONCE)) {
+ r = verity_alloc_most_once(v);
+ if (r)
+ return r;
+ continue;
+
} else if (verity_is_fec_opt_arg(arg_name)) {
r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
if (r)
@@ -1074,7 +1129,7 @@ EXPORT_SYMBOL_GPL(verity_ctr);
static struct target_type verity_target = {
.name = "verity",
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index a90d1d416107..d216fc76d350 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -63,6 +63,7 @@ struct dm_verity {
sector_t hash_level_block[DM_VERITY_MAX_LEVELS];
struct dm_verity_fec *fec; /* forward error correction */
+ unsigned long *validated_blocks; /* bitset blocks validated */
};
struct dm_verity_io {
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 494d01d0e92a..a7a561af05c9 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -945,8 +945,10 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
lock_comm(cinfo);
ret = __sendmsg(cinfo, &cmsg);
- if (ret)
+ if (ret) {
+ unlock_comm(cinfo);
return ret;
+ }
cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5a334b947a16..9284acea4f7b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -110,8 +110,7 @@ static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
{
int i;
- local_irq_disable();
- spin_lock(conf->hash_locks);
+ spin_lock_irq(conf->hash_locks);
for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
spin_lock(&conf->device_lock);
@@ -121,9 +120,9 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
{
int i;
spin_unlock(&conf->device_lock);
- for (i = NR_STRIPE_HASH_LOCKS; i; i--)
- spin_unlock(conf->hash_locks + i - 1);
- local_irq_enable();
+ for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--)
+ spin_unlock(conf->hash_locks + i);
+ spin_unlock_irq(conf->hash_locks);
}
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
@@ -726,12 +725,11 @@ static bool is_full_stripe_write(struct stripe_head *sh)
static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
{
- local_irq_disable();
if (sh1 > sh2) {
- spin_lock(&sh2->stripe_lock);
+ spin_lock_irq(&sh2->stripe_lock);
spin_lock_nested(&sh1->stripe_lock, 1);
} else {
- spin_lock(&sh1->stripe_lock);
+ spin_lock_irq(&sh1->stripe_lock);
spin_lock_nested(&sh2->stripe_lock, 1);
}
}
@@ -739,8 +737,7 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
{
spin_unlock(&sh1->stripe_lock);
- spin_unlock(&sh2->stripe_lock);
- local_irq_enable();
+ spin_unlock_irq(&sh2->stripe_lock);
}
/* Only freshly new full stripe normal write stripe can be added to a batch list */
diff --git a/drivers/media/i2c/adv7481.c b/drivers/media/i2c/adv7481.c
index 9c8159cc737a..43a5f3da5ac4 100644
--- a/drivers/media/i2c/adv7481.c
+++ b/drivers/media/i2c/adv7481.c
@@ -55,6 +55,8 @@
#define LOCK_MAX_SLEEP 6000
#define LOCK_NUM_TRIES 200
+#define MAX_DEFAULT_WIDTH 1280
+#define MAX_DEFAULT_HEIGHT 720
#define MAX_DEFAULT_FRAME_RATE 60
#define MAX_DEFAULT_PIX_CLK_HZ 74240000
@@ -1705,7 +1707,8 @@ static int adv7481_get_hdmi_timings(struct adv7481_state *state,
} else {
pr_err("%s(%d): PLL not locked return EBUSY\n",
__func__, __LINE__);
- return -EBUSY;
+ ret = -EBUSY;
+ goto set_default;
}
/* Check Timing Lock */
@@ -1825,6 +1828,17 @@ static int adv7481_get_hdmi_timings(struct adv7481_state *state,
(hdmi_params->pix_rep + 1));
}
+set_default:
+ if (ret) {
+ pr_debug("%s(%d), error %d resort to default fmt\n",
+ __func__, __LINE__, ret);
+ vid_params->act_pix = MAX_DEFAULT_WIDTH;
+ vid_params->act_lines = MAX_DEFAULT_HEIGHT;
+ vid_params->fr_rate = MAX_DEFAULT_FRAME_RATE;
+ vid_params->pix_clk = MAX_DEFAULT_PIX_CLK_HZ;
+ vid_params->intrlcd = 0;
+ ret = 0;
+ }
pr_debug("%s(%d), adv7481 TMDS Resolution: %d x %d @ %d fps\n",
__func__, __LINE__,
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index fe6eb78b6914..a47ab1947cc4 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -420,11 +420,13 @@ static void cx25840_initialize(struct i2c_client *client)
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
- prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
- queue_work(q, &state->fw_work);
- schedule();
- finish_wait(&state->fw_wait, &wait);
- destroy_workqueue(q);
+ if (q) {
+ prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+ queue_work(q, &state->fw_work);
+ schedule();
+ finish_wait(&state->fw_wait, &wait);
+ destroy_workqueue(q);
+ }
/* 6. */
cx25840_write(client, 0x115, 0x8c);
@@ -631,11 +633,13 @@ static void cx23885_initialize(struct i2c_client *client)
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
- prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
- queue_work(q, &state->fw_work);
- schedule();
- finish_wait(&state->fw_wait, &wait);
- destroy_workqueue(q);
+ if (q) {
+ prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+ queue_work(q, &state->fw_work);
+ schedule();
+ finish_wait(&state->fw_wait, &wait);
+ destroy_workqueue(q);
+ }
/* Call the cx23888 specific std setup func, we no longer rely on
* the generic cx24840 func.
@@ -746,11 +750,13 @@ static void cx231xx_initialize(struct i2c_client *client)
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
- prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
- queue_work(q, &state->fw_work);
- schedule();
- finish_wait(&state->fw_wait, &wait);
- destroy_workqueue(q);
+ if (q) {
+ prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+ queue_work(q, &state->fw_work);
+ schedule();
+ finish_wait(&state->fw_wait, &wait);
+ destroy_workqueue(q);
+ }
cx25840_std_setup(client);
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c
index 22820a0b8e79..77f2ab5e7c3d 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c
@@ -2809,9 +2809,11 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
vfe_dev->axi_data.src_info[VFE_PIX_0].eof_id = 0;
}
+ mutex_lock(&vfe_dev->buf_mgr->lock);
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
VFE_AXI_SRC_MAX) {
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
stream_info = &axi_data->stream_info[
@@ -2821,6 +2823,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
SRC_TO_INTF(stream_info->stream_src)].active;
else {
ISP_DBG("%s: invalid src info index\n", __func__);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
@@ -2835,6 +2838,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
HANDLE_TO_IDX(
stream_cfg_cmd->stream_handle[i]));
spin_unlock_irqrestore(&stream_info->lock, flags);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return rc;
}
@@ -2893,6 +2897,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
}
}
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
msm_isp_update_stream_bandwidth(vfe_dev, stream_cfg_cmd->hw_state);
vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
vfe_dev->vfe_base, wm_reload_mask);
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c
index 0d08cffda25c..360eb8eca8d7 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -684,18 +684,23 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
stream_cfg_cmd->num_streams);
return -EINVAL;
}
+ mutex_lock(&vfe_dev->buf_mgr->lock);
+
num_stats_comp_mask =
vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
rc = vfe_dev->hw_info->vfe_ops.stats_ops.check_streams(
stats_data->stream_info);
- if (rc < 0)
+ if (rc < 0) {
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return rc;
+ }
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
pr_err("%s Invalid stats index %d", __func__, idx);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
@@ -711,11 +716,13 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
pr_err("%s: comp grp %d exceed max %d\n",
__func__, stream_info->composite_flag,
num_stats_comp_mask);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
rc = msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
if (rc < 0) {
pr_err("%s: No buffer for stream%d\n", __func__, idx);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return rc;
}
if (!stream_info->composite_flag)
@@ -740,6 +747,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
stats_data->num_active_stream);
}
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_util.c
index e4e368424007..2ba19b13535b 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_util.c
@@ -919,9 +919,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
break;
case VIDIOC_MSM_ISP_CFG_STREAM:
mutex_lock(&vfe_dev->core_mutex);
- mutex_lock(&vfe_dev->buf_mgr->lock);
rc = msm_isp_cfg_axi_stream(vfe_dev, arg);
- mutex_unlock(&vfe_dev->buf_mgr->lock);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_CFG_HW_STATE:
@@ -1022,9 +1020,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
break;
case VIDIOC_MSM_ISP_CFG_STATS_STREAM:
mutex_lock(&vfe_dev->core_mutex);
- mutex_lock(&vfe_dev->buf_mgr->lock);
rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
- mutex_unlock(&vfe_dev->buf_mgr->lock);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_UPDATE_STATS_STREAM:
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
index d881b4aea48f..8e0a7443f98c 100644
--- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -442,7 +442,7 @@ static int msm_fd_open(struct file *file)
}
ctx->mem_pool.fd_device = ctx->fd_device;
- ctx->stats = vmalloc(sizeof(*ctx->stats) * MSM_FD_MAX_RESULT_BUFS);
+ ctx->stats = vzalloc(sizeof(*ctx->stats) * MSM_FD_MAX_RESULT_BUFS);
if (!ctx->stats) {
dev_err(device->dev, "No memory for face statistics\n");
ret = -ENOMEM;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
index 6ed5c5c7dbce..4f55f4bc7c4a 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
@@ -14,7 +14,6 @@
#define MSM_CSIPHY_3_5_HWREG_H
#define ULPM_WAKE_UP_TIMER_MODE 2
-#define GLITCH_ELIMINATION_NUM 0x12 /* bit [6:4] */
#include <sensor/csiphy/msm_csiphy.h>
@@ -47,13 +46,13 @@ struct csiphy_reg_3ph_parms_t csiphy_v3_5_3ph = {
{0x138, 0x0},
{0x13C, 0x10},
{0x140, 0x1},
- {0x144, GLITCH_ELIMINATION_NUM},
+ {0x144, 0x32},
{0x148, 0xFE},
{0x14C, 0x1},
{0x154, 0x0},
{0x15C, 0x23},
{0x160, ULPM_WAKE_UP_TIMER_MODE},
- {0x164, 0x48},
+ {0x164, 0x50},
{0x168, 0x70},
{0x16C, 0x17},
{0x170, 0x41},
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index 5fc2aed87e19..4f7a62716810 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -135,8 +135,10 @@ static int msm_csiphy_3phase_lane_config(
uint8_t i = 0;
uint16_t lane_mask = 0, lane_enable = 0, temp;
void __iomem *csiphybase;
+ uint64_t two_gbps = 0;
csiphybase = csiphy_dev->base;
+ two_gbps = 2 * (uint64_t)csiphy_params->lane_cnt * GBPS;
lane_mask = csiphy_params->lane_mask & 0x7;
while (lane_mask != 0) {
temp = (i << 1)+1;
@@ -282,9 +284,9 @@ static int msm_csiphy_3phase_lane_config(
csiphy_3ph_reg.mipi_csiphy_3ph_lnn_ctrl51.addr +
0x200*i);
}
+
if ((csiphy_dev->hw_version == CSIPHY_VERSION_V35) &&
- ((csiphy_params->data_rate /
- csiphy_params->lane_cnt) > 2 * GBPS)) {
+ (csiphy_params->data_rate > two_gbps)) {
msm_camera_io_w(0x40,
csiphybase +
csiphy_dev->ctrl_reg->csiphy_3ph_reg.
@@ -807,7 +809,7 @@ static int msm_csiphy_lane_config(struct csiphy_device *csiphy_dev,
ratio = csiphy_dev->csiphy_max_clk/clk_rate;
csiphy_params->settle_cnt = csiphy_params->settle_cnt/ratio;
}
- CDBG("%s csiphy_params, mask = 0x%x cnt = %d, data rate = %lu\n",
+ CDBG("%s csiphy_params, mask = 0x%x cnt = %d, data rate = %llu\n",
__func__,
csiphy_params->lane_mask,
csiphy_params->lane_cnt, csiphy_params->data_rate);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index f838d9c7ed12..0fba4a2c1602 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1370,8 +1370,13 @@ static int mceusb_dev_probe(struct usb_interface *intf,
goto rc_dev_fail;
/* wire up inbound data handler */
- usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
- mceusb_dev_recv, ir, ep_in->bInterval);
+ if (usb_endpoint_xfer_int(ep_in))
+ usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
+ mceusb_dev_recv, ir, ep_in->bInterval);
+ else
+ usb_fill_bulk_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
+ mceusb_dev_recv, ir);
+
ir->urb_in->transfer_dma = ir->dma_in;
ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index dc3dd13db1be..5cbd15742050 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -101,7 +101,7 @@ static int get_v4l2_window32(struct v4l2_window __user *kp,
static int put_v4l2_window32(struct v4l2_window __user *kp,
struct v4l2_window32 __user *up)
{
- struct v4l2_clip __user *kclips = kp->clips;
+ struct v4l2_clip __user *kclips;
struct v4l2_clip32 __user *uclips;
compat_caddr_t p;
u32 clipcount;
@@ -116,6 +116,8 @@ static int put_v4l2_window32(struct v4l2_window __user *kp,
if (!clipcount)
return 0;
+ if (get_user(kclips, &kp->clips))
+ return -EFAULT;
if (get_user(p, &up->clips))
return -EFAULT;
uclips = compat_ptr(p);
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 3dc9ed2e0774..bb1e19f7ed5a 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -205,6 +205,10 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
struct vb2_buffer *vb;
int ret;
+ /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */
+ num_buffers = min_t(unsigned int, num_buffers,
+ VB2_MAX_FRAME - q->num_buffers);
+
for (buffer = 0; buffer < num_buffers; ++buffer) {
/* Allocate videobuf buffer structures */
vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7ebccfa8072a..cb790b68920f 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs,
+ .no_write_same = 1,
};
static int mptsas_get_linkerrors(struct sas_phy *phy)
diff --git a/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c b/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
index 42b45ec7d9d9..92faa1b899c9 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
@@ -2,7 +2,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -58,7 +58,7 @@ static long audio_ioctl_shared(struct file *file, unsigned int cmd,
audio->ac->session);
if (audio->feedback == NON_TUNNEL_MODE) {
/* Configure PCM output block */
- rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+ rc = q6asm_enc_cfg_blk_pcm_native(audio->ac,
audio->pcm_cfg.sample_rate,
audio->pcm_cfg.channel_count);
if (rc < 0) {
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f42d9c4e4561..cc277f7849b0 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -298,8 +298,11 @@ static void *qp_alloc_queue(u64 size, u32 flags)
size_t pas_size;
size_t vas_size;
size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
- const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ u64 num_pages;
+ if (size > SIZE_MAX - PAGE_SIZE)
+ return NULL;
+ num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
if (num_pages >
(SIZE_MAX - queue_size) /
(sizeof(*queue->kernel_if->u.g.pas) +
@@ -624,9 +627,12 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
{
struct vmci_queue *queue;
size_t queue_page_size;
- const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ u64 num_pages;
const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
+ if (size > SIZE_MAX - PAGE_SIZE)
+ return NULL;
+ num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
if (num_pages > (SIZE_MAX - queue_size) /
sizeof(*queue->kernel_if->u.h.page))
return NULL;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index de7def1f4f1c..35be47dafda2 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -3614,7 +3614,7 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
* or disable state so cannot receive any completion of
* other requests.
*/
- BUG_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+ WARN_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
/* clear pending request */
BUG_ON(!test_and_clear_bit(cmdq_req->tag,
@@ -3648,7 +3648,7 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
out:
mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
- if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
+ if (!(err || cmdq_req->resp_err)) {
mmc_host_clk_release(host);
wake_up(&ctx_info->wait);
mmc_put_card(host->card);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 2af0e819d0cb..547d18c9feef 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -3011,8 +3011,16 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
*/
mmc_host_clk_hold(host);
err = mmc_wait_for_cmd(host, &cmd, 0);
- if (err)
- goto err_command;
+ if (err) {
+ if (err == -ETIMEDOUT) {
+ pr_debug("%s: voltage switching failed with err %d\n",
+ mmc_hostname(host), err);
+ err = -EAGAIN;
+ goto power_cycle;
+ } else {
+ goto err_command;
+ }
+ }
if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
err = -EIO;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 76e8bce6f46e..ad572a0f2124 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -368,9 +368,9 @@ static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
host->irq_mask &= ~irq;
else
host->irq_mask |= irq;
- spin_unlock_irqrestore(&host->lock, flags);
writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
+ spin_unlock_irqrestore(&host->lock, flags);
}
static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 286b97a304cf..4509ee0b294a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -45,6 +45,7 @@
#define I82802AB 0x00ad
#define I82802AC 0x00ac
#define PF38F4476 0x881c
+#define M28F00AP30 0x8963
/* STMicroelectronics chips */
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
@@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
extp->MinorVersion = '1';
}
+static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
+{
+ /*
+ * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
+ * Erase Supend for their small Erase Blocks(0x8000)
+ */
+ if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
+ return 1;
+ return 0;
+}
+
static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info *map, __u16 adr)
{
@@ -825,21 +837,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
(mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
goto sleep;
+ /* Do not allow suspend iff read/write to EB address */
+ if ((adr & chip->in_progress_block_mask) ==
+ chip->in_progress_block_addr)
+ goto sleep;
+
+ /* do not suspend small EBs, buggy Micron Chips */
+ if (cfi_is_micron_28F00AP30(cfi, chip) &&
+ (chip->in_progress_block_mask == ~(0x8000-1)))
+ goto sleep;
/* Erase suspend */
- map_write(map, CMD(0xB0), adr);
+ map_write(map, CMD(0xB0), chip->in_progress_block_addr);
/* If the flash has finished erasing, then 'erase suspend'
* appears to make some (28F320) flash devices switch to
* 'read' mode. Make sure that we switch to 'read status'
* mode so we get the right data. --rmk
*/
- map_write(map, CMD(0x70), adr);
+ map_write(map, CMD(0x70), chip->in_progress_block_addr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
chip->erase_suspended = 1;
for (;;) {
- status = map_read(map, adr);
+ status = map_read(map, chip->in_progress_block_addr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
@@ -1035,8 +1056,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
sending the 0x70 (Read Status) command to an erasing
chip and expecting it to be ignored, that's what we
do. */
- map_write(map, CMD(0xd0), adr);
- map_write(map, CMD(0x70), adr);
+ map_write(map, CMD(0xd0), chip->in_progress_block_addr);
+ map_write(map, CMD(0x70), chip->in_progress_block_addr);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
@@ -1927,6 +1948,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
chip->erase_suspended = 0;
+ chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(len - 1);
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, len,
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index c3624eb571d1..31448a2b39ae 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -814,9 +814,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
(mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
goto sleep;
- /* We could check to see if we're trying to access the sector
- * that is currently being erased. However, no user will try
- * anything like that so we just wait for the timeout. */
+ /* Do not allow suspend iff read/write to EB address */
+ if ((adr & chip->in_progress_block_mask) ==
+ chip->in_progress_block_addr)
+ goto sleep;
/* Erase suspend */
/* It's harmless to issue the Erase-Suspend and Erase-Resume
@@ -2265,6 +2266,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(map->size - 1);
INVALIDATE_CACHE_UDELAY(map, chip,
adr, map->size,
@@ -2354,6 +2356,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(len - 1);
INVALIDATE_CACHE_UDELAY(map, chip,
adr, len,
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index b2fb0528c092..07ad86759d92 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -244,7 +244,7 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode)
* in any case.
*/
if (mode & FMODE_WRITE) {
- ret = -EPERM;
+ ret = -EROFS;
goto out_unlock;
}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 9b7bc6326fa2..9556a4de159c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -951,6 +951,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
return -EINVAL;
}
+ /*
+ * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
+ * MLC NAND is different and needs special care, otherwise UBI or UBIFS
+ * will die soon and you will lose all your data.
+ */
+ if (mtd->type == MTD_MLCNANDFLASH) {
+ pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
+ mtd->index);
+ return -EINVAL;
+ }
+
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 30d3999dddba..ed62f1efe6eb 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -360,7 +360,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
{
int i;
- flush_work(&ubi->fm_work);
return_unused_pool_pebs(ubi, &ubi->fm_pool);
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index eadccf498589..339118f3c718 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1490,39 +1490,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_close;
}
- /* If the mode uses primary, then the following is handled by
- * bond_change_active_slave().
- */
- if (!bond_uses_primary(bond)) {
- /* set promiscuity level to new slave */
- if (bond_dev->flags & IFF_PROMISC) {
- res = dev_set_promiscuity(slave_dev, 1);
- if (res)
- goto err_close;
- }
-
- /* set allmulti level to new slave */
- if (bond_dev->flags & IFF_ALLMULTI) {
- res = dev_set_allmulti(slave_dev, 1);
- if (res)
- goto err_close;
- }
-
- netif_addr_lock_bh(bond_dev);
-
- dev_mc_sync_multiple(slave_dev, bond_dev);
- dev_uc_sync_multiple(slave_dev, bond_dev);
-
- netif_addr_unlock_bh(bond_dev);
- }
-
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* add lacpdu mc addr to mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_add(slave_dev, lacpdu_multicast);
- }
-
res = vlan_vids_add_by_dev(slave_dev, bond_dev);
if (res) {
netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
@@ -1647,8 +1614,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
} /* switch(bond_mode) */
#ifdef CONFIG_NET_POLL_CONTROLLER
- slave_dev->npinfo = bond->dev->npinfo;
- if (slave_dev->npinfo) {
+ if (bond->dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
res = -EBUSY;
@@ -1679,6 +1645,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_upper_unlink;
}
+ /* If the mode uses primary, then the following is handled by
+ * bond_change_active_slave().
+ */
+ if (!bond_uses_primary(bond)) {
+ /* set promiscuity level to new slave */
+ if (bond_dev->flags & IFF_PROMISC) {
+ res = dev_set_promiscuity(slave_dev, 1);
+ if (res)
+ goto err_sysfs_del;
+ }
+
+ /* set allmulti level to new slave */
+ if (bond_dev->flags & IFF_ALLMULTI) {
+ res = dev_set_allmulti(slave_dev, 1);
+ if (res) {
+ if (bond_dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(slave_dev, -1);
+ goto err_sysfs_del;
+ }
+ }
+
+ netif_addr_lock_bh(bond_dev);
+ dev_mc_sync_multiple(slave_dev, bond_dev);
+ dev_uc_sync_multiple(slave_dev, bond_dev);
+ netif_addr_unlock_bh(bond_dev);
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ /* add lacpdu mc addr to mc list */
+ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+
+ dev_mc_add(slave_dev, lacpdu_multicast);
+ }
+ }
+
bond->slave_cnt++;
bond_compute_features(bond);
bond_set_carrier(bond);
@@ -1702,6 +1702,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return 0;
/* Undo stages on error */
+err_sysfs_del:
+ bond_sysfs_slave_del(new_slave);
+
err_upper_unlink:
bond_upper_dev_unlink(bond_dev, slave_dev);
@@ -1709,9 +1712,6 @@ err_unregister:
netdev_rx_handler_unregister(slave_dev);
err_detach:
- if (!bond_uses_primary(bond))
- bond_hw_addr_flush(bond_dev, slave_dev);
-
vlan_vids_del_by_dev(slave_dev, bond_dev);
if (rcu_access_pointer(bond->primary_slave) == new_slave)
RCU_INIT_POINTER(bond->primary_slave, NULL);
@@ -2555,11 +2555,13 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
bond_for_each_slave_rcu(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev);
+ slave->new_link = BOND_LINK_NOCHANGE;
+
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, trans_start, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {
- slave->link = BOND_LINK_UP;
+ slave->new_link = BOND_LINK_UP;
slave_state_changed = 1;
/* primary_slave has no meaning in round-robin
@@ -2586,7 +2588,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (!bond_time_in_interval(bond, trans_start, 2) ||
!bond_time_in_interval(bond, slave->last_rx, 2)) {
- slave->link = BOND_LINK_DOWN;
+ slave->new_link = BOND_LINK_DOWN;
slave_state_changed = 1;
if (slave->link_failure_count < UINT_MAX)
@@ -2617,6 +2619,11 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (!rtnl_trylock())
goto re_arm;
+ bond_for_each_slave(bond, slave, iter) {
+ if (slave->new_link != BOND_LINK_NOCHANGE)
+ slave->link = slave->new_link;
+ }
+
if (slave_state_changed) {
bond_slave_state_change(bond);
if (BOND_MODE(bond) == BOND_MODE_XOR)
diff --git a/drivers/net/can/spi/rh850.c b/drivers/net/can/spi/rh850.c
index d2b6e8caa112..b32ae2ddd41b 100644
--- a/drivers/net/can/spi/rh850.c
+++ b/drivers/net/can/spi/rh850.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/completion.h>
+#include <linux/irq.h>
#define DEBUG_RH850 0
#if DEBUG_RH850 == 1
@@ -1103,6 +1104,7 @@ static int rh850_probe(struct spi_device *spi)
int err, i;
struct rh850_can *priv_data;
struct device *dev;
+ u32 irq_type;
dev = &spi->dev;
dev_info(dev, "rh850_probe");
@@ -1134,8 +1136,11 @@ static int rh850_probe(struct spi_device *spi)
}
}
+ irq_type = irq_get_trigger_type(spi->irq);
+ if (irq_type == IRQ_TYPE_NONE)
+ irq_type = IRQ_TYPE_EDGE_FALLING;
err = request_threaded_irq(spi->irq, NULL, rh850_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ irq_type | IRQF_ONESHOT,
"rh850", priv_data);
if (err) {
dev_err(dev, "Failed to request irq: %d", err);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index d1103d612d8b..949a82458a29 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3943,15 +3943,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* when transmitting in a vf, start bd must hold the ethertype
* for fw to enforce it
*/
+ u16 vlan_tci = 0;
#ifndef BNX2X_STOP_ON_ERROR
- if (IS_VF(bp))
+ if (IS_VF(bp)) {
#endif
- tx_start_bd->vlan_or_ethertype =
- cpu_to_le16(ntohs(eth->h_proto));
+ /* Still need to consider inband vlan for enforced */
+ if (__vlan_get_tag(skb, &vlan_tci)) {
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(ntohs(eth->h_proto));
+ } else {
+ tx_start_bd->bd_flags.as_bitfield |=
+ (X_ETH_INBAND_VLAN <<
+ ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(vlan_tci);
+ }
#ifndef BNX2X_STOP_ON_ERROR
- else
+ } else {
/* used by FW for packet accounting */
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ }
#endif
}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 0f6811860ad5..a36e38676640 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -2845,7 +2845,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
static void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
{
- memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
static void
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cf61a5869c6e..de23f23b41de 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -6076,13 +6076,18 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
if (!t4_fw_matches_chip(adap, fw_hdr))
return -EINVAL;
+ /* Disable FW_OK flag so that mbox commands with FW_OK flag set
+ * wont be sent when we are flashing FW.
+ */
+ adap->flags &= ~FW_OK;
+
ret = t4_fw_halt(adap, mbox, force);
if (ret < 0 && !force)
- return ret;
+ goto out;
ret = t4_load_fw(adap, fw_data, size);
if (ret < 0)
- return ret;
+ goto out;
/*
* Older versions of the firmware don't understand the new
@@ -6093,7 +6098,17 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
* its header flags to see if it advertises the capability.
*/
reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
- return t4_fw_restart(adap, mbox, reset);
+ ret = t4_fw_restart(adap, mbox, reset);
+
+ /* Grab potentially new Firmware Device Log parameters so we can see
+ * how healthy the new Firmware is. It's okay to contact the new
+ * Firmware for these parameters even though, as far as it's
+ * concerned, we've never said "HELLO" to it ...
+ */
+ (void)t4_init_devlog_params(adap);
+out:
+ adap->flags |= FW_OK;
+ return ret;
}
/**
@@ -7696,7 +7711,16 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
if (ret)
break;
- idx = (idx + 1) & UPDBGLARDPTR_M;
+
+ /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
+ * identify the 32-bit portion of the full 312-bit data
+ */
+ if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
+ idx = (idx & 0xff0) + 0x10;
+ else
+ idx++;
+ /* address can't exceed 0xfff */
+ idx &= UPDBGLARDPTR_M;
}
restart:
if (cfg & UPDBGLAEN_F) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index fa3786a9d30e..ec8ffd7eae33 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2604,8 +2604,8 @@ void t4vf_sge_stop(struct adapter *adapter)
int t4vf_sge_init(struct adapter *adapter)
{
struct sge_params *sge_params = &adapter->params.sge;
- u32 fl0 = sge_params->sge_fl_buffer_size[0];
- u32 fl1 = sge_params->sge_fl_buffer_size[1];
+ u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
+ u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
struct sge *s = &adapter->sge;
unsigned int ingpadboundary, ingpackboundary;
@@ -2614,9 +2614,20 @@ int t4vf_sge_init(struct adapter *adapter)
* the Physical Function Driver. Ideally we should be able to deal
* with _any_ configuration. Practice is different ...
*/
- if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
- fl0, fl1);
+ fl_small_pg, fl_large_pg);
return -EINVAL;
}
if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
@@ -2627,8 +2638,8 @@ int t4vf_sge_init(struct adapter *adapter)
/*
* Now translate the adapter parameters into our internal forms.
*/
- if (fl1)
- s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
? 128 : 64);
s->pktshift = PKTSHIFT_G(sge_params->sge_control);
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 40071dad1c57..9c76f1a2f57b 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -382,7 +382,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
{
const struct of_device_id *id =
of_match_device(fsl_pq_mdio_match, &pdev->dev);
- const struct fsl_pq_mdio_data *data = id->data;
+ const struct fsl_pq_mdio_data *data;
struct device_node *np = pdev->dev.of_node;
struct resource res;
struct device_node *tbi;
@@ -390,6 +390,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
struct mii_bus *new_bus;
int err;
+ if (!id) {
+ dev_err(&pdev->dev, "Failed to match device\n");
+ return -ENODEV;
+ }
+
+ data = id->data;
+
dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
new_bus = mdiobus_alloc_size(sizeof(*priv));
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 5d7db6c01c46..f301c03c527b 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -342,6 +342,7 @@ static int emac_reset(struct emac_instance *dev)
{
struct emac_regs __iomem *p = dev->emacp;
int n = 20;
+ bool __maybe_unused try_internal_clock = false;
DBG(dev, "reset" NL);
@@ -354,6 +355,7 @@ static int emac_reset(struct emac_instance *dev)
}
#ifdef CONFIG_PPC_DCR_NATIVE
+do_retry:
/*
* PPC460EX/GT Embedded Processor Advanced User's Manual
* section 28.10.1 Mode Register 0 (EMACx_MR0) states:
@@ -361,10 +363,19 @@ static int emac_reset(struct emac_instance *dev)
* of the EMAC. If none is present, select the internal clock
* (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
* After a soft reset, select the external clock.
+ *
+ * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
+ * ethernet cable is not attached. This causes the reset to timeout
+ * and the PHY detection code in emac_init_phy() is unable to
+ * communicate and detect the AR8035-A PHY. As a result, the emac
+ * driver bails out early and the user has no ethernet.
+ * In order to stay compatible with existing configurations, the
+ * driver will temporarily switch to the internal clock, after
+ * the first reset fails.
*/
if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
- if (dev->phy_address == 0xffffffff &&
- dev->phy_map == 0xffffffff) {
+ if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+ dev->phy_map == 0xffffffff)) {
/* No PHY: select internal loop clock before reset */
dcri_clrset(SDR0, SDR0_ETH_CFG,
0, SDR0_ETH_CFG_ECS << dev->cell_index);
@@ -382,8 +393,15 @@ static int emac_reset(struct emac_instance *dev)
#ifdef CONFIG_PPC_DCR_NATIVE
if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
- if (dev->phy_address == 0xffffffff &&
- dev->phy_map == 0xffffffff) {
+ if (!n && !try_internal_clock) {
+ /* first attempt has timed out. */
+ n = 20;
+ try_internal_clock = true;
+ goto do_retry;
+ }
+
+ if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+ dev->phy_map == 0xffffffff)) {
/* No PHY: restore external clock source after reset */
dcri_clrset(SDR0, SDR0_ETH_CFG,
SDR0_ETH_CFG_ECS << dev->cell_index, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e356e9187e84..20d8806d2bff 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1182,6 +1182,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
struct e1000_hw *hw = &adapter->hw;
if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
+ struct sk_buff *skb = adapter->tx_hwtstamp_skb;
struct skb_shared_hwtstamps shhwtstamps;
u64 txstmp;
@@ -1190,9 +1191,14 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
- skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
- dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ /* Clear the global tx_hwtstamp_skb pointer and force writes
+ * prior to notifying the stack of a Tx timestamp.
+ */
adapter->tx_hwtstamp_skb = NULL;
+ wmb(); /* force write prior to skb_tstamp_tx */
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
} else if (time_after(jiffies, adapter->tx_hwtstamp_start
+ adapter->tx_timeout_factor * HZ)) {
dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
@@ -6589,12 +6595,17 @@ static int e1000e_pm_thaw(struct device *dev)
static int e1000e_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ int rc;
e1000e_flush_lpic(pdev);
e1000e_pm_freeze(dev);
- return __e1000_shutdown(pdev, false);
+ rc = __e1000_shutdown(pdev, false);
+ if (rc)
+ e1000e_pm_thaw(dev);
+
+ return rc;
}
static int e1000e_pm_resume(struct device *dev)
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 4b62aa1f9ff8..6e5065f0907b 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5079,7 +5079,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
- pdev->d3_delay = 150;
+ pdev->d3_delay = 200;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 1d4e2e054647..897d061e4f03 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -35,6 +35,7 @@
#include <linux/etherdevice.h>
#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
#include <linux/export.h>
#include "mlx4.h"
@@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
+ if (!mlx4_qp_lookup(dev, rule->qpn)) {
+ mlx4_err_rule(dev, "QP doesn't exist\n", rule);
+ ret = -EINVAL;
+ goto out;
+ }
+
trans_rule_ctrl_to_hw(rule, mailbox->buf);
size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
list_for_each_entry(cur, &rule->list, list) {
ret = parse_trans_rule(dev, cur, mailbox->buf + size);
- if (ret < 0) {
- mlx4_free_cmd_mailbox(dev, mailbox);
- return ret;
- }
+ if (ret < 0)
+ goto out;
+
size += ret;
}
@@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
}
}
+out:
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index d8359ffba026..62f1a3433a62 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -381,6 +381,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
__mlx4_qp_free_icm(dev, qpn);
}
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
+{
+ struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+ struct mlx4_qp *qp;
+
+ spin_lock(&qp_table->lock);
+
+ qp = __mlx4_qp_lookup(dev, qpn);
+
+ spin_unlock(&qp_table->lock);
+ return qp;
+}
+
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -468,6 +481,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
}
if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
+ if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
+ mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
cmd->qp_context.qos_vport = params->qos_vport;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index d1fc7fa87b05..e3080fbd9d00 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -5040,6 +5040,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
}
+static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
+ struct mlx4_vf_immed_vlan_work *work)
+{
+ ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
+ ctx->qp_context.qos_vport = work->qos_vport;
+}
+
void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
{
struct mlx4_vf_immed_vlan_work *work =
@@ -5144,11 +5151,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
qp->sched_queue & 0xC7;
upd_context->qp_context.pri_path.sched_queue |=
((work->qos & 0x7) << 3);
- upd_context->qp_mask |=
- cpu_to_be64(1ULL <<
- MLX4_UPD_QP_MASK_QOS_VPP);
- upd_context->qp_context.qos_vport =
- work->qos_vport;
+
+ if (dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_QOS_VPP)
+ update_qos_vpp(upd_context, work);
}
err = mlx4_cmd(dev, mailbox->dma,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index f5c1f4acc57b..7c42be586be8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -513,7 +513,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
struct mlx5_priv *priv = &mdev->priv;
struct msix_entry *msix = priv->msix_arr;
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
- int err;
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
@@ -523,18 +522,11 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
priv->irq_info[i].mask);
- err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
- if (err) {
- mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
- irq);
- goto err_clear_mask;
- }
+ if (IS_ENABLED(CONFIG_SMP) &&
+ irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+ mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
return 0;
-
-err_clear_mask:
- free_cpumask_var(priv->irq_info[i].mask);
- return err;
}
static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index b8d5270359cd..e30676515529 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -247,7 +247,7 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
cmd.req.arg3 = 0;
if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
- netxen_issue_cmd(adapter, &cmd);
+ rcode = netxen_issue_cmd(adapter, &cmd);
if (rcode != NX_RCODE_SUCCESS)
return -EIO;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 509b596cf1e8..bd1ec70fb736 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
}
return -EIO;
}
- usleep_range(1000, 1500);
+ udelay(1200);
}
if (id_reg)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index be258d90de9e..e3223f2fe2ff 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -765,7 +765,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
sizeof(struct mpi_coredump_global_header);
mpi_coredump->mpi_global_header.imageSize =
sizeof(struct ql_mpi_coredump);
- memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
sizeof(mpi_coredump->mpi_global_header.idString));
/* Get generic NIC reg dump */
@@ -1255,7 +1255,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
sizeof(struct mpi_coredump_global_header);
mpi_coredump->mpi_global_header.imageSize =
sizeof(struct ql_reg_dump);
- memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
sizeof(mpi_coredump->mpi_global_header.idString));
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 1ef03939d25f..c90ae4d4be7d 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -296,8 +296,9 @@ qcaspi_receive(struct qcaspi *qca)
/* Allocate rx SKB if we don't have one available. */
if (!qca->rx_skb) {
- qca->rx_skb = netdev_alloc_skb(net_dev,
- net_dev->mtu + VLAN_ETH_HLEN);
+ qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
+ net_dev->mtu +
+ VLAN_ETH_HLEN);
if (!qca->rx_skb) {
netdev_dbg(net_dev, "out of RX resources\n");
qca->stats.out_of_mem++;
@@ -377,7 +378,7 @@ qcaspi_receive(struct qcaspi *qca)
qca->rx_skb, qca->rx_skb->dev);
qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_rx_ni(qca->rx_skb);
- qca->rx_skb = netdev_alloc_skb(net_dev,
+ qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
net_dev->mtu + VLAN_ETH_HLEN);
if (!qca->rx_skb) {
netdev_dbg(net_dev, "out of RX resources\n");
@@ -759,7 +760,8 @@ qcaspi_netdev_init(struct net_device *dev)
if (!qca->rx_buffer)
return -ENOBUFS;
- qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN);
+ qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu +
+ VLAN_ETH_HLEN);
if (!qca->rx_skb) {
kfree(qca->rx_buffer);
netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n");
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3783c40f568b..a82c89af7124 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -8411,12 +8411,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_msi_4;
}
+ pci_set_drvdata(pdev, dev);
+
rc = register_netdev(dev);
if (rc < 0)
goto err_out_cnt_5;
- pci_set_drvdata(pdev, dev);
-
netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
(u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 424d1dee55c9..afaf79b8761f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3222,7 +3222,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* MDIO bus init */
ret = sh_mdio_init(mdp, pd);
if (ret) {
- dev_err(&ndev->dev, "failed to initialise MDIO\n");
+ dev_err(&pdev->dev, "failed to initialise MDIO\n");
goto out_release;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 435466c17852..c69b0bdd891d 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -280,6 +280,10 @@ struct cpsw_ss_regs {
/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
+#define CPSW_MAX_BLKS_TX 15
+#define CPSW_MAX_BLKS_TX_SHIFT 4
+#define CPSW_MAX_BLKS_RX 5
+
struct cpsw_host_regs {
u32 max_blks;
u32 blk_cnt;
@@ -1127,11 +1131,23 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
switch (priv->version) {
case CPSW_VERSION_1:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
break;
case CPSW_VERSION_2:
case CPSW_VERSION_3:
case CPSW_VERSION_4:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
break;
}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 49fe59b180a8..a75ce9051a7f 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -574,6 +574,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case HDLCDRVCTL_CALIBRATE:
if(!capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (s->par.bitrate <= 0)
+ return -EINVAL;
if (bi.data.calibrate > INT_MAX / s->par.bitrate)
return -EINVAL;
s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7d0690433ee0..7d2cf015c5e7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -148,6 +148,12 @@ static inline int phy_aneg_done(struct phy_device *phydev)
if (phydev->drv->aneg_done)
return phydev->drv->aneg_done(phydev);
+ /* Avoid genphy_aneg_done() if the Clause 45 PHY does not
+ * implement Clause 22 registers
+ */
+ if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
+ return -EINVAL;
+
return genphy_aneg_done(phydev);
}
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index b7b859c3a0c7..583d50f80b24 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -638,6 +638,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
error = -EINVAL;
+
+ if (sockaddr_len != sizeof(struct sockaddr_pppox))
+ goto end;
+
if (sp->sa_protocol != PX_PROTO_OE)
goto end;
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index f7e8c79349ad..12a627fcc02c 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -501,7 +501,6 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.mtu = dst_mtu(&rt->dst);
if (!po->chan.mtu)
po->chan.mtu = PPP_MRU;
- ip_rt_put(rt);
po->chan.mtu -= PPTP_HEADER_OVERHEAD;
po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index 27ed25252aac..cfd81eb1b532 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -509,6 +509,10 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
if(x < 0 || x > comp->rslot_limit)
goto bad;
+ /* Check if the cstate is initialized */
+ if (!comp->rstate[x].initialized)
+ goto bad;
+
comp->flags &=~ SLF_TOSS;
comp->recv_current = x;
} else {
@@ -673,6 +677,7 @@ slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
if (cs->cs_tcp.doff > 5)
memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
+ cs->initialized = true;
/* Put headers back on packet
* Neither header checksum is recalculated
*/
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 9bca36e1fefd..e74709e4b5dd 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -247,6 +247,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
}
}
+static bool __team_option_inst_tmp_find(const struct list_head *opts,
+ const struct team_option_inst *needle)
+{
+ struct team_option_inst *opt_inst;
+
+ list_for_each_entry(opt_inst, opts, tmp_list)
+ if (opt_inst == needle)
+ return true;
+ return false;
+}
+
static int __team_options_register(struct team *team,
const struct team_option *option,
size_t option_count)
@@ -1039,14 +1050,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int __team_port_enable_netpoll(struct team_port *port)
{
struct netpoll *np;
int err;
- if (!team->dev->npinfo)
- return 0;
-
np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
return -ENOMEM;
@@ -1060,6 +1068,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
return err;
}
+static int team_port_enable_netpoll(struct team_port *port)
+{
+ if (!port->team->dev->npinfo)
+ return 0;
+
+ return __team_port_enable_netpoll(port);
+}
+
static void team_port_disable_netpoll(struct team_port *port)
{
struct netpoll *np = port->np;
@@ -1074,7 +1090,7 @@ static void team_port_disable_netpoll(struct team_port *port)
kfree(np);
}
#else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int team_port_enable_netpoll(struct team_port *port)
{
return 0;
}
@@ -1181,7 +1197,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_vids_add;
}
- err = team_port_enable_netpoll(team, port);
+ err = team_port_enable_netpoll(port);
if (err) {
netdev_err(dev, "Failed to enable netpoll on device %s\n",
portname);
@@ -1889,7 +1905,7 @@ static int team_netpoll_setup(struct net_device *dev,
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
- err = team_port_enable_netpoll(team, port);
+ err = __team_port_enable_netpoll(port);
if (err) {
__team_netpoll_cleanup(team);
break;
@@ -2544,6 +2560,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
if (err)
goto team_put;
opt_inst->changed = true;
+
+ /* dumb/evil user-space can send us duplicate opt,
+ * keep only the last one
+ */
+ if (__team_option_inst_tmp_find(&opt_inst_list,
+ opt_inst))
+ continue;
+
list_add(&opt_inst->tmp_list, &opt_inst_list);
}
if (!opt_found) {
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index f9343bee1de3..f71abe50ea6f 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -461,6 +461,7 @@ static const struct driver_info wwan_info = {
#define REALTEK_VENDOR_ID 0x0bda
#define SAMSUNG_VENDOR_ID 0x04e8
#define LENOVO_VENDOR_ID 0x17ef
+#define LINKSYS_VENDOR_ID 0x13b1
#define NVIDIA_VENDOR_ID 0x0955
#define HP_VENDOR_ID 0x03f0
@@ -650,6 +651,15 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+#if IS_ENABLED(CONFIG_USB_RTL8152)
+/* Linksys USB3GIGV1 Ethernet Adapter */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+#endif
+
/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
@@ -705,6 +715,12 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
+ /* Cinterion AHS3 modem by GEMALTO */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
/* Telit modules */
USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 72cb30828a12..c8e98c8e29fa 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1069,6 +1069,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
u16 n = 0, index, ndplen;
u8 ready2send = 0;
u32 delayed_ndp_size;
+ size_t padding_count;
/* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated
* accordingly. Otherwise, we should check here.
@@ -1225,11 +1226,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* a ZLP after full sized NTBs.
*/
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
- skb_out->len > ctx->min_tx_pkt)
- memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
- ctx->tx_max - skb_out->len);
- else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
+ skb_out->len > ctx->min_tx_pkt) {
+ padding_count = ctx->tx_max - skb_out->len;
+ memset(skb_put(skb_out, padding_count), 0, padding_count);
+ } else if (skb_out->len < ctx->tx_max &&
+ (skb_out->len % dev->maxpacket) == 0) {
*skb_put(skb_out, 1) = 0; /* force short packet */
+ }
/* set final frame length */
nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index ebdee8f01f65..a6d429950cb0 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -618,7 +618,8 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
offset += 0x100;
else
ret = -EINVAL;
- ret = lan78xx_read_raw_otp(dev, offset, length, data);
+ if (!ret)
+ ret = lan78xx_read_raw_otp(dev, offset, length, data);
}
return ret;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 89950f5cea71..b2c1a435357f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -506,6 +506,7 @@ enum rtl8152_flags {
#define VENDOR_ID_REALTEK 0x0bda
#define VENDOR_ID_SAMSUNG 0x04e8
#define VENDOR_ID_LENOVO 0x17ef
+#define VENDOR_ID_LINKSYS 0x13b1
#define VENDOR_ID_NVIDIA 0x0955
#define MCU_TYPE_PLA 0x0100
@@ -4376,6 +4377,7 @@ static struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
{}
};
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8dfc75250583..d01285250204 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -556,7 +556,12 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
hdr = skb_vnet_hdr(skb);
sg_init_table(rq->sg, 2);
sg_set_buf(rq->sg, hdr, vi->hdr_len);
- skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
+
+ err = skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
+ if (unlikely(err < 0)) {
+ dev_kfree_skb(skb);
+ return err;
+ }
err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
if (err < 0)
@@ -858,7 +863,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
struct virtio_net_hdr_mrg_rxbuf *hdr;
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
struct virtnet_info *vi = sq->vq->vdev->priv;
- unsigned num_sg;
+ int num_sg;
unsigned hdr_len = vi->hdr_len;
bool can_push;
@@ -911,11 +916,16 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
if (can_push) {
__skb_push(skb, hdr_len);
num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
+ if (unlikely(num_sg < 0))
+ return num_sg;
/* Pull header back to avoid skew in tx bytes calculations. */
__skb_pull(skb, hdr_len);
} else {
sg_set_buf(sq->sg, hdr, hdr_len);
- num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
+ num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
+ if (unlikely(num_sg < 0))
+ return num_sg;
+ num_sg++;
}
return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 82bf85ae5d08..419c045d0752 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2789,6 +2789,11 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter)
/* we need to enable NAPI, otherwise dev_close will deadlock */
for (i = 0; i < adapter->num_rx_queues; i++)
napi_enable(&adapter->rx_queue[i].napi);
+ /*
+ * Need to clear the quiesce bit to ensure that vmxnet3_close
+ * can quiesce the device properly
+ */
+ clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
dev_close(adapter->netdev);
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index ac945f8781ac..d3d59122a357 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -550,13 +550,15 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
- if (!IS_ERR(neigh))
+ if (!IS_ERR(neigh)) {
ret = dst_neigh_output(dst, neigh, skb);
+ rcu_read_unlock_bh();
+ return ret;
+ }
rcu_read_unlock_bh();
err:
- if (unlikely(ret < 0))
- vrf_tx_error(skb->dev, skb);
+ vrf_tx_error(skb->dev, skb);
return ret;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e4ff1e45c02e..c41378214ede 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -962,7 +962,7 @@ static bool vxlan_snoop(struct net_device *dev,
return false;
/* Don't migrate static entries, drop packets */
- if (f->state & NUD_NOARP)
+ if (f->state & (NUD_PERMANENT | NUD_NOARP))
return true;
if (net_ratelimit())
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index fe86dc84d9c3..f02c1b148545 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5004,6 +5004,15 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err;
}
+ if ((arvif->vdev_type == WMI_VDEV_TYPE_STA) && QCA_REV_WCN3990(ar)) {
+ ret = ath10k_wmi_csa_offload(ar, arvif->vdev_id, true);
+ if (ret) {
+ ath10k_err(ar, "CSA offload failed for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_delete;
+ }
+ }
+
ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
list_add(&arvif->list, &ar->arvifs);
@@ -5216,6 +5225,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
kfree(arvif->u.ap.noa_data);
}
+ if ((arvif->vdev_type == WMI_VDEV_TYPE_STA) && QCA_REV_WCN3990(ar))
+ ath10k_wmi_csa_offload(ar, arvif->vdev_id, false);
+
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
arvif->vdev_id);
@@ -5665,6 +5677,22 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
arvif->vdev_id, ret);
}
+static void ath10k_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ mutex_lock(&ar->conf_mutex);
+ memcpy(&arvif->gtk_rekey_data.kek, data->kek, NL80211_KEK_LEN);
+ memcpy(&arvif->gtk_rekey_data.kck, data->kck, NL80211_KCK_LEN);
+ arvif->gtk_rekey_data.replay_ctr =
+ be64_to_cpup((__be64 *)data->replay_ctr);
+ arvif->gtk_rekey_data.valid = true;
+ mutex_unlock(&ar->conf_mutex);
+}
+
static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
@@ -5913,9 +5941,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, smps, err);
}
- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
- changed & IEEE80211_RC_NSS_CHANGED) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
sta->addr);
err = ath10k_station_assoc(ar, arvif->vif, sta, true);
@@ -7609,6 +7636,7 @@ static const struct ieee80211_ops ath10k_ops = {
.bss_info_changed = ath10k_bss_info_changed,
.hw_scan = ath10k_hw_scan,
.cancel_hw_scan = ath10k_cancel_hw_scan,
+ .set_rekey_data = ath10k_set_rekey_data,
.set_key = ath10k_set_key,
.set_default_unicast_key = ath10k_set_default_unicast_key,
.sta_state = ath10k_sta_state,
@@ -7644,7 +7672,6 @@ static const struct ieee80211_ops ath10k_ops = {
.suspend = ath10k_wow_op_suspend,
.resume = ath10k_wow_op_resume,
.set_wakeup = ath10k_wow_op_set_wakeup,
- .set_rekey_data = ath10k_wow_op_set_rekey_data,
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = ath10k_sta_add_debugfs,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 06fb7596988d..468ad47f0298 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -211,6 +211,8 @@ struct wmi_ops {
(struct ath10k *ar,
enum wmi_bss_survey_req_type type);
struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
+ struct sk_buff *(*gen_csa_offload)(struct ath10k *ar,
+ u32 vdev_id, bool enable);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -1493,6 +1495,23 @@ ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
}
static inline int
+ath10k_wmi_csa_offload(struct ath10k *ar, u32 vdev_id, bool enable)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_csa_offload)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_csa_offload(ar, vdev_id, enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->csa_offload_enable_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
ath10k_wmi_echo(struct ath10k *ar, u32 value)
{
struct ath10k_wmi *wmi = &ar->wmi;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index d938ca951aee..3d323f3e73af 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -3079,6 +3079,37 @@ ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
}
static struct sk_buff *
+ath10k_wmi_tlv_op_gen_csa_offload(struct ath10k *ar, u32 vdev_id, bool enable)
+{
+ struct wmi_csa_offload_enable_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd) + sizeof(*tlv);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_ENABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ if (enable)
+ cmd->csa_offload_enable |=
+ __cpu_to_le32(WMI_CSA_OFFLOAD_ENABLE);
+ else
+ cmd->csa_offload_enable |=
+ __cpu_to_le32(WMI_CSA_OFFLOAD_DISABLE);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi CSA offload for vdev: %d\n", vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
ath10k_wmi_op_gen_gtk_offload(struct ath10k *ar, struct ath10k_vif *arvif)
{
struct wmi_tlv_gtk_offload_cmd *cmd;
@@ -3895,6 +3926,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
.gen_set_arp_ns_offload = ath10k_wmi_tlv_op_gen_set_arp_ns_offload,
.gen_gtk_offload = ath10k_wmi_op_gen_gtk_offload,
+ .gen_csa_offload = ath10k_wmi_tlv_op_gen_csa_offload,
.gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
.gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 69066efbb2a8..2280f47dc227 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -445,22 +445,6 @@ static int ath10k_config_wow_listen_interval(struct ath10k *ar)
return 0;
}
-void ath10k_wow_op_set_rekey_data(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_gtk_rekey_data *data)
-{
- struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
-
- mutex_lock(&ar->conf_mutex);
- memcpy(&arvif->gtk_rekey_data.kek, data->kek, NL80211_KEK_LEN);
- memcpy(&arvif->gtk_rekey_data.kck, data->kck, NL80211_KCK_LEN);
- arvif->gtk_rekey_data.replay_ctr =
- cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
- arvif->gtk_rekey_data.valid = true;
- mutex_unlock(&ar->conf_mutex);
-}
-
static int ath10k_wow_config_gtk_offload(struct ath10k *ar, bool gtk_offload)
{
struct ath10k_vif *arvif;
@@ -509,6 +493,13 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
goto exit;
}
+ ret = ath10k_wow_cleanup(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
+ ret);
+ goto exit;
+ }
+
ret = ath10k_wow_config_gtk_offload(ar, true);
if (ret) {
ath10k_warn(ar, "failed to enable GTK offload: %d\n", ret);
@@ -521,18 +512,11 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
goto disable_gtk_offload;
}
- ret = ath10k_wow_cleanup(ar);
- if (ret) {
- ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
- ret);
- goto disable_ns_arp_offload;
- }
-
ret = ath10k_wow_set_wakeups(ar, wowlan);
if (ret) {
ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
ret);
- goto cleanup;
+ goto disable_ns_arp_offload;
}
ret = ath10k_config_wow_listen_interval(ar);
diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h
index a33881739138..2ca4ba4848c9 100644
--- a/drivers/net/wireless/ath/ath10k/wow.h
+++ b/drivers/net/wireless/ath/ath10k/wow.h
@@ -32,9 +32,6 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan);
int ath10k_wow_op_resume(struct ieee80211_hw *hw);
void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
-void ath10k_wow_op_set_rekey_data(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_gtk_rekey_data *data);
#else
static inline int ath10k_wow_init(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 654a1e33f827..7c5f189cace7 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -939,7 +939,10 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
}
for (i = 0; i < eesize; ++i) {
- AR5K_EEPROM_READ(i, val);
+ if (!ath5k_hw_nvram_read(ah, i, &val)) {
+ ret = -EIO;
+ goto freebuf;
+ }
buf[i] = val;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 41382f89abe1..4435c7bbb625 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1595,6 +1595,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
int count = 50;
u32 reg, last_val;
+ /* Check if chip failed to wake up */
+ if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
+ return false;
+
if (AR_SREV_9300(ah))
return !ath9k_hw_detect_mac_hang(ah);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0881ba8535f4..c78abfc7bd96 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -247,7 +247,10 @@ static const UCHAR b4_default_startup_parms[] = {
0x04, 0x08, /* Noise gain, limit offset */
0x28, 0x28, /* det rssi, med busy offsets */
7, /* det sync thresh */
- 0, 2, 2 /* test mode, min, max */
+ 0, 2, 2, /* test mode, min, max */
+ 0, /* rx/tx delay */
+ 0, 0, 0, 0, 0, 0, /* current BSS id */
+ 0 /* hop set */
};
/*===========================================================================*/
@@ -598,7 +601,7 @@ static void init_startup_params(ray_dev_t *local)
* a_beacon_period = hops a_beacon_period = KuS
*//* 64ms = 010000 */
if (local->fw_ver == 0x55) {
- memcpy((UCHAR *) &local->sparm.b4, b4_default_startup_parms,
+ memcpy(&local->sparm.b4, b4_default_startup_parms,
sizeof(struct b4_startup_params));
/* Translate sane kus input values to old build 4/5 format */
/* i = hop time in uS truncated to 3 bytes */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index b7f72f9c7988..b3691712df61 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1454,6 +1454,7 @@ static int rtl8187_probe(struct usb_interface *intf,
goto err_free_dev;
}
mutex_init(&priv->io_mutex);
+ mutex_init(&priv->conf_mutex);
SET_IEEE80211_DEV(dev, &intf->dev);
usb_set_intfdata(intf, dev);
@@ -1627,7 +1628,6 @@ static int rtl8187_probe(struct usb_interface *intf,
printk(KERN_ERR "rtl8187: Cannot register device\n");
goto err_free_dmabuf;
}
- mutex_init(&priv->conf_mutex);
skb_queue_head_init(&priv->b_tx_status.queue);
wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 9bee3f11898a..869411f55d88 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1196,8 +1196,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
- wl1251_acx_arp_ip_filter(wl, enable, addr);
-
+ ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
if (ret < 0)
goto out_sleep;
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 0b8d2655985f..fee4c01fbdfd 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -2024,7 +2024,10 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
+ break;
+
case XenbusStateUnknown:
+ wake_up_all(&module_unload_q);
break;
case XenbusStateInitWait:
@@ -2155,7 +2158,9 @@ static int xennet_remove(struct xenbus_device *dev)
xenbus_switch_state(dev, XenbusStateClosing);
wait_event(module_unload_q,
xenbus_read_driver_state(dev->otherend) ==
- XenbusStateClosing);
+ XenbusStateClosing ||
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateUnknown);
xenbus_switch_state(dev, XenbusStateClosed);
wait_event(module_unload_q,
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 0b3e0bfa7be5..572ca192cb1f 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -587,6 +587,7 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
{
unsigned long long sta = 0;
struct acpiphp_func *func;
+ u32 dvid;
list_for_each_entry(func, &slot->funcs, sibling) {
if (func->flags & FUNC_HAS_STA) {
@@ -597,19 +598,27 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
if (ACPI_SUCCESS(status) && sta)
break;
} else {
- u32 dvid;
-
- pci_bus_read_config_dword(slot->bus,
- PCI_DEVFN(slot->device,
- func->function),
- PCI_VENDOR_ID, &dvid);
- if (dvid != 0xffffffff) {
+ if (pci_bus_read_dev_vendor_id(slot->bus,
+ PCI_DEVFN(slot->device, func->function),
+ &dvid, 0)) {
sta = ACPI_STA_ALL;
break;
}
}
}
+ if (!sta) {
+ /*
+ * Check for the slot itself since it may be that the
+ * ACPI slot is a device below PCIe upstream port so in
+ * that case it may not even be reachable yet.
+ */
+ if (pci_bus_read_dev_vendor_id(slot->bus,
+ PCI_DEVFN(slot->device, 0), &dvid, 0)) {
+ sta = ACPI_STA_ALL;
+ }
+ }
+
return (unsigned int)sta;
}
diff --git a/drivers/platform/goldfish/Makefile b/drivers/platform/goldfish/Makefile
index 277a820ee4e1..e53ae2fc717b 100644
--- a/drivers/platform/goldfish/Makefile
+++ b/drivers/platform/goldfish/Makefile
@@ -2,5 +2,4 @@
# Makefile for Goldfish platform specific drivers
#
obj-$(CONFIG_GOLDFISH_BUS) += pdev_bus.o
-obj-$(CONFIG_GOLDFISH_PIPE) += goldfish_pipe_all.o
-goldfish_pipe_all-objs := goldfish_pipe.o goldfish_pipe_v2.o
+obj-$(CONFIG_GOLDFISH_PIPE) += goldfish_pipe.o goldfish_pipe_v2.o
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index dc376b0fd276..df3f5c301a61 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -506,7 +506,7 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
static int goldfish_pipe_open(struct inode *inode, struct file *file)
{
struct goldfish_pipe *pipe;
- struct goldfish_pipe_dev *dev = pipe_dev;
+ struct goldfish_pipe_dev *dev = &goldfish_pipe_dev;
int32_t status;
/* Allocate new pipe kernel object */
@@ -558,7 +558,7 @@ static const struct file_operations goldfish_pipe_fops = {
.release = goldfish_pipe_release,
};
-static struct miscdevice goldfish_pipe_dev = {
+static struct miscdevice goldfish_pipe_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "goldfish_pipe",
.fops = &goldfish_pipe_fops,
@@ -566,15 +566,16 @@ static struct miscdevice goldfish_pipe_dev = {
int goldfish_pipe_device_init_v1(struct platform_device *pdev)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
+ struct goldfish_pipe_dev *dev = &goldfish_pipe_dev;
int err = devm_request_irq(&pdev->dev, dev->irq,
goldfish_pipe_interrupt, IRQF_SHARED, "goldfish_pipe", dev);
+
if (err) {
dev_err(&pdev->dev, "unable to allocate IRQ for v1\n");
return err;
}
- err = misc_register(&goldfish_pipe_dev);
+ err = misc_register(&goldfish_pipe_miscdev);
if (err) {
dev_err(&pdev->dev, "unable to register v1 device\n");
return err;
@@ -586,5 +587,5 @@ int goldfish_pipe_device_init_v1(struct platform_device *pdev)
void goldfish_pipe_device_deinit_v1(struct platform_device *pdev)
{
- misc_deregister(&goldfish_pipe_dev);
+ misc_deregister(&goldfish_pipe_miscdev);
}
diff --git a/drivers/platform/goldfish/goldfish_pipe.h b/drivers/platform/goldfish/goldfish_pipe.h
index 9b75a51dba24..5de147432203 100644
--- a/drivers/platform/goldfish/goldfish_pipe.h
+++ b/drivers/platform/goldfish/goldfish_pipe.h
@@ -86,6 +86,6 @@ struct goldfish_pipe_dev {
struct access_params *aps;
};
-extern struct goldfish_pipe_dev pipe_dev[1];
+extern struct goldfish_pipe_dev goldfish_pipe_dev;
#endif /* GOLDFISH_PIPE_H */
diff --git a/drivers/platform/goldfish/goldfish_pipe_v2.c b/drivers/platform/goldfish/goldfish_pipe_v2.c
index 3119b3341a7b..590f6dea3c1b 100644
--- a/drivers/platform/goldfish/goldfish_pipe_v2.c
+++ b/drivers/platform/goldfish/goldfish_pipe_v2.c
@@ -49,7 +49,6 @@
#include <linux/printk.h>
#include "goldfish_pipe.h"
-
/*
* Update this when something changes in the driver's behavior so the host
* can benefit from knowing it
@@ -83,9 +82,9 @@ enum PipeErrors {
/* Bit-flags used to signal events from the emulator */
enum PipeWakeFlags {
- PIPE_WAKE_CLOSED = 1 << 0, /* emulator closed pipe */
- PIPE_WAKE_READ = 1 << 1, /* pipe can now be read from */
- PIPE_WAKE_WRITE = 1 << 2 /* pipe can now be written to */
+ PIPE_WAKE_CLOSED = BIT(0), /* emulator closed pipe */
+ PIPE_WAKE_READ = BIT(1), /* pipe can now be read from */
+ PIPE_WAKE_WRITE = BIT(2), /* pipe can now be written to */
};
/* Bit flags for the 'flags' field */
@@ -214,9 +213,10 @@ struct goldfish_pipe {
struct goldfish_pipe_dev *dev;
};
-struct goldfish_pipe_dev pipe_dev[1] = {};
+struct goldfish_pipe_dev goldfish_pipe_dev;
-static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
+static int goldfish_pipe_cmd_locked(
+ struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
{
pipe->command_buffer->cmd = cmd;
/* failure by default */
@@ -225,12 +225,15 @@ static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
return pipe->command_buffer->status;
}
-static int goldfish_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
+static int goldfish_pipe_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
{
int status;
+
if (mutex_lock_interruptible(&pipe->lock))
return PIPE_ERROR_IO;
- status = goldfish_cmd_locked(pipe, cmd);
+
+ status = goldfish_pipe_cmd_locked(pipe, cmd);
+
mutex_unlock(&pipe->lock);
return status;
}
@@ -329,7 +332,7 @@ static void populate_rw_params(
command->rw_params.buffers_count = buffer_idx + 1;
}
-static int transfer_max_buffers(struct goldfish_pipe* pipe,
+static int transfer_max_buffers(struct goldfish_pipe *pipe,
unsigned long address, unsigned long address_end, int is_write,
unsigned long last_page, unsigned int last_page_size,
s32 *consumed_size, int *status)
@@ -352,7 +355,7 @@ static int transfer_max_buffers(struct goldfish_pipe* pipe,
pipe->command_buffer);
/* Transfer the data */
- *status = goldfish_cmd_locked(
+ *status = goldfish_pipe_cmd_locked(
pipe,
is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
@@ -361,7 +364,6 @@ static int transfer_max_buffers(struct goldfish_pipe* pipe,
mutex_unlock(&pipe->lock);
release_user_pages(pages, pages_count, is_write, *consumed_size);
-
return 0;
}
@@ -371,7 +373,7 @@ static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
set_bit(wakeBit, &pipe->flags);
/* Tell the emulator we're going to wait for a wake event */
- (void)goldfish_cmd(pipe,
+ goldfish_pipe_cmd(pipe,
is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ);
while (test_bit(wakeBit, &pipe->flags)) {
@@ -414,6 +416,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
while (address < address_end) {
s32 consumed_size;
int status;
+
ret = transfer_max_buffers(pipe, address, address_end, is_write,
last_page, last_page_size, &consumed_size, &status);
if (ret < 0)
@@ -491,7 +494,7 @@ static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
poll_wait(filp, &pipe->wake_queue, wait);
- status = goldfish_cmd(pipe, PIPE_CMD_POLL);
+ status = goldfish_pipe_cmd(pipe, PIPE_CMD_POLL);
if (status < 0)
return -ERESTARTSYS;
@@ -507,26 +510,30 @@ static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
return mask;
}
-static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
+static int signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
u32 id, u32 flags)
{
struct goldfish_pipe *pipe;
- BUG_ON(id >= dev->pipes_capacity);
+ if (id >= dev->pipes_capacity)
+ return -EINVAL;
pipe = dev->pipes[id];
if (!pipe)
- return;
+ return -ENXIO;
+
pipe->signalled_flags |= flags;
if (pipe->prev_signalled || pipe->next_signalled
|| dev->first_signalled_pipe == pipe)
- return; /* already in the list */
+ return 0; /* already in the list */
+
pipe->next_signalled = dev->first_signalled_pipe;
if (dev->first_signalled_pipe)
dev->first_signalled_pipe->prev_signalled = pipe;
-
dev->first_signalled_pipe = pipe;
+
+ return 0;
}
static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
@@ -570,11 +577,12 @@ static struct goldfish_pipe *signalled_pipes_pop_front(
static void goldfish_interrupt_task(unsigned long unused)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
/* Iterate over the signalled pipes and wake them one by one */
struct goldfish_pipe *pipe;
int wakes;
- while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) {
+
+ while ((pipe = signalled_pipes_pop_front(&goldfish_pipe_dev, &wakes)) !=
+ NULL) {
if (wakes & PIPE_WAKE_CLOSED) {
pipe->flags = 1 << BIT_CLOSED_ON_HOST;
} else {
@@ -611,7 +619,8 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
u32 i;
unsigned long flags;
struct goldfish_pipe_dev *dev = dev_id;
- if (dev != pipe_dev)
+
+ if (dev != &goldfish_pipe_dev)
return IRQ_NONE;
/* Request the signalled pipes from the device */
@@ -649,7 +658,7 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
u32 new_capacity = 2 * dev->pipes_capacity;
struct goldfish_pipe **pipes =
kcalloc(new_capacity, sizeof(*pipes),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!pipes)
return -ENOMEM;
memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
@@ -674,13 +683,14 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
*/
static int goldfish_pipe_open(struct inode *inode, struct file *file)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
+ struct goldfish_pipe_dev *dev = &goldfish_pipe_dev;
unsigned long flags;
int id;
int status;
/* Allocate new pipe kernel object */
struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
+
if (pipe == NULL)
return -ENOMEM;
@@ -717,16 +727,16 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file)
dev->buffers->open_command_params.rw_params_max_count =
MAX_BUFFERS_PER_COMMAND;
dev->buffers->open_command_params.command_buffer_ptr =
- (u64)(unsigned long)__pa(pipe->command_buffer);
- status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN);
+ (u64)__pa(pipe->command_buffer);
+ status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN);
spin_unlock_irqrestore(&dev->lock, flags);
if (status < 0) {
- pr_err("Could not tell host of new pipe! status=%d", status);
+ pr_err("Could not tell host of new pipe! status=%d\n", status);
goto err_cmd;
}
+
/* All is done, save the pipe into the file's private data field */
file->private_data = pipe;
- pr_debug("%s on 0x%p\n", __func__, pipe);
return 0;
err_cmd:
@@ -746,10 +756,8 @@ static int goldfish_pipe_release(struct inode *inode, struct file *filp)
struct goldfish_pipe *pipe = filp->private_data;
struct goldfish_pipe_dev *dev = pipe->dev;
- pr_debug("%s on 0x%p\n", __func__, pipe);
-
/* The guest is closing the channel, so tell the emulator right now */
- (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE);
+ goldfish_pipe_cmd(pipe, PIPE_CMD_CLOSE);
spin_lock_irqsave(&dev->lock, flags);
dev->pipes[pipe->id] = NULL;
@@ -757,8 +765,10 @@ static int goldfish_pipe_release(struct inode *inode, struct file *filp)
spin_unlock_irqrestore(&dev->lock, flags);
filp->private_data = NULL;
+
free_page((unsigned long)pipe->command_buffer);
kfree(pipe);
+
return 0;
}
@@ -771,7 +781,7 @@ static const struct file_operations goldfish_pipe_fops = {
.release = goldfish_pipe_release,
};
-static struct miscdevice goldfish_pipe_dev = {
+static struct miscdevice goldfish_pipe_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "goldfish_pipe",
.fops = &goldfish_pipe_fops,
@@ -780,7 +790,7 @@ static struct miscdevice goldfish_pipe_dev = {
static int goldfish_pipe_device_init_v2(struct platform_device *pdev)
{
char *page;
- struct goldfish_pipe_dev *dev = pipe_dev;
+ struct goldfish_pipe_dev *dev = &goldfish_pipe_dev;
int err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
IRQF_SHARED, "goldfish_pipe", dev);
if (err) {
@@ -788,7 +798,7 @@ static int goldfish_pipe_device_init_v2(struct platform_device *pdev)
return err;
}
- err = misc_register(&goldfish_pipe_dev);
+ err = misc_register(&goldfish_pipe_miscdev);
if (err) {
dev_err(&pdev->dev, "unable to register v2 device\n");
return err;
@@ -807,13 +817,13 @@ static int goldfish_pipe_device_init_v2(struct platform_device *pdev)
* needs to be contained in a single physical page. The easiest choice
* is to just allocate a page and place the buffers in it.
*/
- BUG_ON(sizeof(*dev->buffers) > PAGE_SIZE);
- page = (char*)__get_free_page(GFP_KERNEL);
+ BUILD_BUG_ON(sizeof(*dev->buffers) > PAGE_SIZE);
+ page = (char *)__get_free_page(GFP_KERNEL);
if (!page) {
kfree(dev->pipes);
return -ENOMEM;
}
- dev->buffers = (struct goldfish_pipe_dev_buffers*)page;
+ dev->buffers = (struct goldfish_pipe_dev_buffers *)page;
/* Send the buffer addresses to the host */
{
@@ -832,23 +842,24 @@ static int goldfish_pipe_device_init_v2(struct platform_device *pdev)
writel((u32)(unsigned long)paddr,
dev->base + PIPE_REG_OPEN_BUFFER);
}
+
return 0;
}
-static void goldfish_pipe_device_deinit_v2(struct platform_device *pdev) {
- struct goldfish_pipe_dev *dev = pipe_dev;
- misc_deregister(&goldfish_pipe_dev);
- kfree(dev->pipes);
- free_page((unsigned long)dev->buffers);
+static void goldfish_pipe_device_deinit_v2(struct platform_device *pdev)
+{
+ misc_deregister(&goldfish_pipe_miscdev);
+ kfree(goldfish_pipe_dev.pipes);
+ free_page((unsigned long)goldfish_pipe_dev.buffers);
}
static int goldfish_pipe_probe(struct platform_device *pdev)
{
int err;
struct resource *r;
- struct goldfish_pipe_dev *dev = pipe_dev;
+ struct goldfish_pipe_dev *dev = &goldfish_pipe_dev;
- BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
/* not thread safe, but this should not happen */
WARN_ON(dev->base != NULL);
@@ -899,7 +910,8 @@ error:
static int goldfish_pipe_remove(struct platform_device *pdev)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
+ struct goldfish_pipe_dev *dev = &goldfish_pipe_dev;
+
if (dev->version < PIPE_CURRENT_DEVICE_VERSION)
goldfish_pipe_device_deinit_v1(pdev);
else
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index f135d3977509..1a704ffab07a 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -432,6 +432,8 @@ static ssize_t ipa_read_hdr(struct file *file, char __user *ubuf, size_t count,
list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
link) {
+ if (entry->cookie != IPA_HDR_COOKIE)
+ continue;
nbytes = scnprintf(
dbg_buff,
IPA_MAX_MSG_LEN,
@@ -606,6 +608,14 @@ static int ipa_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
if (attrib->protocol_eq_present)
pr_err("protocol:%d ", attrib->protocol_eq);
+ if (attrib->num_ihl_offset_range_16 >
+ IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS) {
+ IPAERR_RL("num_ihl_offset_range_16 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS,
+ attrib->num_ihl_offset_range_16);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_ihl_offset_range_16; i++) {
pr_err(
"(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ",
@@ -614,6 +624,12 @@ static int ipa_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
attrib->ihl_offset_range_16[i].range_high);
}
+ if (attrib->num_offset_meq_32 > IPA_IPFLTR_NUM_MEQ_32_EQNS) {
+ IPAERR_RL("num_offset_meq_32 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_MEQ_32_EQNS, attrib->num_offset_meq_32);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_offset_meq_32; i++) {
pr_err(
"(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ",
@@ -635,6 +651,12 @@ static int ipa_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
attrib->ihl_offset_eq_16.value);
}
+ if (attrib->num_ihl_offset_meq_32 > IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS) {
+ IPAERR_RL("num_ihl_offset_meq_32 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS, attrib->num_ihl_offset_meq_32);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_ihl_offset_meq_32; i++) {
pr_err(
"(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ",
@@ -643,6 +665,12 @@ static int ipa_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
attrib->ihl_offset_meq_32[i].value);
}
+ if (attrib->num_offset_meq_128 > IPA_IPFLTR_NUM_MEQ_128_EQNS) {
+ IPAERR_RL("num_offset_meq_128 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_MEQ_128_EQNS, attrib->num_offset_meq_128);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_offset_meq_128; i++) {
for (j = 0; j < 16; j++) {
addr[j] = attrib->offset_meq_128[i].value[j];
@@ -812,11 +840,14 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
u32 rt_tbl_idx;
u32 bitmap;
bool eq;
+ int res = 0;
tbl = &ipa_ctx->glob_flt_tbl[ip];
mutex_lock(&ipa_ctx->lock);
i = 0;
list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (entry->cookie != IPA_FLT_COOKIE)
+ continue;
if (entry->rule.eq_attrib_type) {
rt_tbl_idx = entry->rule.rt_tbl_idx;
bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
@@ -835,10 +866,14 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
i, entry->rule.action, rt_tbl_idx);
pr_err("attrib_mask:%08x retain_hdr:%d eq:%d ",
bitmap, entry->rule.retain_hdr, eq);
- if (eq)
- ipa_attrib_dump_eq(
+ if (eq) {
+ res = ipa_attrib_dump_eq(
&entry->rule.eq_attrib);
- else
+ if (res) {
+ IPAERR_RL("failed read attrib eq\n");
+ goto bail;
+ }
+ } else
ipa_attrib_dump(
&entry->rule.attrib, ip);
i++;
@@ -848,6 +883,8 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
tbl = &ipa_ctx->flt_tbl[j][ip];
i = 0;
list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (entry->cookie != IPA_FLT_COOKIE)
+ continue;
if (entry->rule.eq_attrib_type) {
rt_tbl_idx = entry->rule.rt_tbl_idx;
bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
@@ -867,18 +904,23 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
pr_err("attrib_mask:%08x retain_hdr:%d ",
bitmap, entry->rule.retain_hdr);
pr_err("eq:%d ", eq);
- if (eq)
- ipa_attrib_dump_eq(
- &entry->rule.eq_attrib);
- else
+ if (eq) {
+ res = ipa_attrib_dump_eq(
+ &entry->rule.eq_attrib);
+ if (res) {
+ IPAERR_RL("failed read attrib eq\n");
+ goto bail;
+ }
+ } else
ipa_attrib_dump(
&entry->rule.attrib, ip);
i++;
}
}
+bail:
mutex_unlock(&ipa_ctx->lock);
- return 0;
+ return res;
}
static ssize_t ipa_read_stats(struct file *file, char __user *ubuf,
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 039a8b6a50b5..3defc03c2571 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1432,6 +1432,8 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Extended IOCTLs */
case RMNET_IOCTL_EXTENDED:
+ if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
if (copy_from_user(&extend_ioctl_data,
(u8 *)ifr->ifr_ifru.ifru_data,
@@ -2869,7 +2871,7 @@ int rmnet_ipa_query_tethering_stats_modem(
if (reset) {
req->reset_stats_valid = true;
req->reset_stats = true;
- IPAWANERR("reset the pipe stats\n");
+ IPAWANDBG("reset the pipe stats\n");
} else {
/* print tethered-client enum */
IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 6c8b3573465d..eb9a6877c39f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -357,6 +357,8 @@ static ssize_t ipa3_read_hdr(struct file *file, char __user *ubuf, size_t count,
list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
link) {
+ if (entry->cookie != IPA_HDR_COOKIE)
+ continue;
nbytes = scnprintf(
dbg_buff,
IPA_MAX_MSG_LEN,
@@ -540,6 +542,12 @@ static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
if (attrib->tc_eq_present)
pr_err("tc:%d ", attrib->tc_eq);
+ if (attrib->num_offset_meq_128 > IPA_IPFLTR_NUM_MEQ_128_EQNS) {
+ IPAERR_RL("num_offset_meq_128 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_MEQ_128_EQNS, attrib->num_offset_meq_128);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_offset_meq_128; i++) {
for (j = 0; j < 16; j++) {
addr[j] = attrib->offset_meq_128[i].value[j];
@@ -551,6 +559,12 @@ static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
mask, addr);
}
+ if (attrib->num_offset_meq_32 > IPA_IPFLTR_NUM_MEQ_32_EQNS) {
+ IPAERR_RL("num_offset_meq_32 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_MEQ_32_EQNS, attrib->num_offset_meq_32);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_offset_meq_32; i++)
pr_err(
"(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ",
@@ -558,6 +572,12 @@ static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
attrib->offset_meq_32[i].mask,
attrib->offset_meq_32[i].value);
+ if (attrib->num_ihl_offset_meq_32 > IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS) {
+ IPAERR_RL("num_ihl_offset_meq_32 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS, attrib->num_ihl_offset_meq_32);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_ihl_offset_meq_32; i++)
pr_err(
"(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ",
@@ -572,6 +592,14 @@ static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
attrib->metadata_meq32.mask,
attrib->metadata_meq32.value);
+ if (attrib->num_ihl_offset_range_16 >
+ IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS) {
+ IPAERR_RL("num_ihl_offset_range_16 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS,
+ attrib->num_ihl_offset_range_16);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_ihl_offset_range_16; i++)
pr_err(
"(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ",
@@ -764,7 +792,11 @@ static ssize_t ipa3_read_rt_hw(struct file *file, char __user *ubuf,
pr_err("rule_id:%u prio:%u retain_hdr:%u ",
rules[rl].id, rules[rl].priority,
rules[rl].retain_hdr);
- ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+ res = ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+ if (res) {
+ IPAERR_RL("failed read attrib eq\n");
+ goto bail;
+ }
}
pr_err("=== Routing Table %d = Non-Hashable Rules ===\n", tbl);
@@ -795,7 +827,11 @@ static ssize_t ipa3_read_rt_hw(struct file *file, char __user *ubuf,
pr_err("rule_id:%u prio:%u retain_hdr:%u\n",
rules[rl].id, rules[rl].priority,
rules[rl].retain_hdr);
- ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+ res = ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+ if (res) {
+ IPAERR_RL("failed read attrib eq\n");
+ goto bail;
+ }
}
pr_err("\n");
}
@@ -869,6 +905,7 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
u32 rt_tbl_idx;
u32 bitmap;
bool eq;
+ int res = 0;
mutex_lock(&ipa3_ctx->lock);
@@ -878,6 +915,8 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
tbl = &ipa3_ctx->flt_tbl[j][ip];
i = 0;
list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (entry->cookie != IPA_FLT_COOKIE)
+ continue;
if (entry->rule.eq_attrib_type) {
rt_tbl_idx = entry->rule.rt_tbl_idx;
bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
@@ -899,18 +938,23 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
pr_err("hashable:%u rule_id:%u max_prio:%u prio:%u ",
entry->rule.hashable, entry->rule_id,
entry->rule.max_prio, entry->prio);
- if (eq)
- ipa3_attrib_dump_eq(
+ if (eq) {
+ res = ipa3_attrib_dump_eq(
&entry->rule.eq_attrib);
- else
+ if (res) {
+ IPAERR_RL("failed read attrib eq\n");
+ goto bail;
+ }
+ } else
ipa3_attrib_dump(
&entry->rule.attrib, ip);
i++;
}
}
+bail:
mutex_unlock(&ipa3_ctx->lock);
- return 0;
+ return res;
}
static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf,
@@ -961,7 +1005,11 @@ static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf,
bitmap, rules[rl].rule.retain_hdr);
pr_err("rule_id:%u prio:%u ",
rules[rl].id, rules[rl].priority);
- ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+ res = ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+ if (res) {
+ IPAERR_RL("failed read attrib eq\n");
+ goto bail;
+ }
}
pr_err("=== Filtering Table ep:%d = Non-Hashable Rules ===\n",
@@ -985,7 +1033,11 @@ static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf,
bitmap, rules[rl].rule.retain_hdr);
pr_err("rule_id:%u prio:%u ",
rules[rl].id, rules[rl].priority);
- ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+ res = ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+ if (res) {
+ IPAERR_RL("failed read attrib eq\n");
+ goto bail;
+ }
}
pr_err("\n");
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index ced8c8b2d3ab..128b859ee152 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -61,8 +61,10 @@ static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
gen_params.rule = (const struct ipa_flt_rule *)&entry->rule;
res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
- if (res)
- IPAERR("failed to generate flt h/w rule\n");
+ if (res) {
+ IPAERR_RL("failed to generate flt h/w rule\n");
+ return res;
+ }
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index c9e5a46c08f0..900f5077e901 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1566,6 +1566,8 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Extended IOCTLs */
case RMNET_IOCTL_EXTENDED:
+ if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
if (copy_from_user(&extend_ioctl_data,
(u8 *)ifr->ifr_ifru.ifru_data,
@@ -3013,7 +3015,7 @@ static int rmnet_ipa3_query_tethering_stats_modem(
if (reset) {
req->reset_stats_valid = true;
req->reset_stats = true;
- IPAWANERR("reset the pipe stats\n");
+ IPAWANDBG("reset the pipe stats\n");
} else {
/* print tethered-client enum */
IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client);
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 68640e349765..e068bec8b85e 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -58,6 +58,7 @@ struct pl_data {
struct delayed_work status_change_work;
struct work_struct pl_disable_forever_work;
struct delayed_work pl_taper_work;
+ struct delayed_work pl_awake_work;
struct power_supply *main_psy;
struct power_supply *pl_psy;
struct power_supply *batt_psy;
@@ -640,6 +641,14 @@ static void pl_disable_forever_work(struct work_struct *work)
vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER, false, 0);
}
+static void pl_awake_work(struct work_struct *work)
+{
+ struct pl_data *chip = container_of(work,
+ struct pl_data, pl_awake_work.work);
+
+ vote(chip->pl_awake_votable, PL_VOTER, false, 0);
+}
+
static int pl_disable_vote_callback(struct votable *votable,
void *data, int pl_disable, const char *client)
{
@@ -652,6 +661,11 @@ static int pl_disable_vote_callback(struct votable *votable,
chip->pl_settled_ua = 0;
if (!pl_disable) { /* enable */
+ /* keep system awake to talk to slave charger through i2c */
+ cancel_delayed_work_sync(&chip->pl_awake_work);
+ if (chip->pl_awake_votable)
+ vote(chip->pl_awake_votable, PL_VOTER, true, 0);
+
rc = power_supply_get_property(chip->pl_psy,
POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
if (rc == -ENODEV) {
@@ -712,6 +726,11 @@ static int pl_disable_vote_callback(struct votable *votable,
}
rerun_election(chip->fcc_votable);
rerun_election(chip->fv_votable);
+
+ cancel_delayed_work_sync(&chip->pl_awake_work);
+ if (chip->pl_awake_votable)
+ schedule_delayed_work(&chip->pl_awake_work,
+ msecs_to_jiffies(5000));
}
pl_dbg(chip, PR_PARALLEL, "parallel charging %s\n",
@@ -1098,6 +1117,7 @@ int qcom_batt_init(void)
INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
INIT_DELAYED_WORK(&chip->pl_taper_work, pl_taper_work);
INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
+ INIT_DELAYED_WORK(&chip->pl_awake_work, pl_awake_work);
rc = pl_register_notifier(chip);
if (rc < 0) {
@@ -1151,6 +1171,7 @@ void qcom_batt_deinit(void)
cancel_delayed_work_sync(&chip->status_change_work);
cancel_delayed_work_sync(&chip->pl_taper_work);
cancel_work_sync(&chip->pl_disable_forever_work);
+ cancel_delayed_work_sync(&chip->pl_awake_work);
power_supply_unreg_notifier(&chip->nb);
destroy_votable(chip->pl_enable_votable_indirect);
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 4beaddff47b3..02b1204789bf 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -1829,7 +1829,8 @@ static int smb2_chg_config_init(struct smb2 *chip)
switch (pmic_rev_id->pmic_subtype) {
case PMI8998_SUBTYPE:
chip->chg.smb_version = PMI8998_SUBTYPE;
- chip->chg.wa_flags |= BOOST_BACK_WA | QC_AUTH_INTERRUPT_WA_BIT;
+ chip->chg.wa_flags |= BOOST_BACK_WA | QC_AUTH_INTERRUPT_WA_BIT
+ | TYPEC_PBS_WA_BIT;
if (pmic_rev_id->rev4 == PMI8998_V1P1_REV4) /* PMI rev 1.1 */
chg->wa_flags |= QC_CHARGER_DETECTION_WA_BIT;
if (pmic_rev_id->rev4 == PMI8998_V2P0_REV4) /* PMI rev 2.0 */
@@ -1844,7 +1845,8 @@ static int smb2_chg_config_init(struct smb2 *chip)
break;
case PM660_SUBTYPE:
chip->chg.smb_version = PM660_SUBTYPE;
- chip->chg.wa_flags |= BOOST_BACK_WA | OTG_WA;
+ chip->chg.wa_flags |= BOOST_BACK_WA | OTG_WA | OV_IRQ_WA_BIT
+ | TYPEC_PBS_WA_BIT;
chg->param.freq_buck = pm660_params.freq_buck;
chg->param.freq_boost = pm660_params.freq_boost;
chg->chg_freq.freq_5V = 650;
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 6d3316b934de..2cf8eb4e7ceb 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -670,6 +670,7 @@ static void smblib_uusb_removal(struct smb_charger *chg)
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, false, 0);
+ vote(chg->hvdcp_hw_inov_dis_votable, OV_VOTER, false, 0);
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
@@ -2012,6 +2013,18 @@ static int smblib_dm_pulse(struct smb_charger *chg)
return rc;
}
+static int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val)
+{
+ int rc;
+
+ rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, val, val);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+ rc);
+
+ return rc;
+}
+
int smblib_dp_dm(struct smb_charger *chg, int val)
{
int target_icl_ua, rc = 0;
@@ -2063,6 +2076,21 @@ int smblib_dp_dm(struct smb_charger *chg, int val)
smblib_dbg(chg, PR_PARALLEL, "ICL DOWN ICL=%d reduction=%d\n",
target_icl_ua, chg->usb_icl_delta_ua);
break;
+ case POWER_SUPPLY_DP_DM_FORCE_5V:
+ rc = smblib_force_vbus_voltage(chg, FORCE_5V_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 5V\n");
+ break;
+ case POWER_SUPPLY_DP_DM_FORCE_9V:
+ rc = smblib_force_vbus_voltage(chg, FORCE_9V_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 9V\n");
+ break;
+ case POWER_SUPPLY_DP_DM_FORCE_12V:
+ rc = smblib_force_vbus_voltage(chg, FORCE_12V_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 12V\n");
+ break;
case POWER_SUPPLY_DP_DM_ICL_UP:
default:
break;
@@ -2634,19 +2662,21 @@ int smblib_set_prop_typec_power_role(struct smb_charger *chg,
return -EINVAL;
}
- if (power_role == UFP_EN_CMD_BIT) {
- /* disable PBS workaround when forcing sink mode */
- rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0x0);
- if (rc < 0) {
- smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
- rc);
- }
- } else {
- /* restore it back to 0xA5 */
- rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
- if (rc < 0) {
- smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
- rc);
+ if (chg->wa_flags & TYPEC_PBS_WA_BIT) {
+ if (power_role == UFP_EN_CMD_BIT) {
+ /* disable PBS workaround when forcing sink mode */
+ rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0x0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
+ rc);
+ }
+ } else {
+ /* restore it back to 0xA5 */
+ rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
+ rc);
+ }
}
}
@@ -3426,6 +3456,33 @@ static void smblib_handle_sdp_enumeration_done(struct smb_charger *chg,
rising ? "rising" : "falling");
}
+#define MICRO_10P3V 10300000
+static void smblib_check_ov_condition(struct smb_charger *chg)
+{
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ if (chg->wa_flags & OV_IRQ_WA_BIT) {
+ rc = power_supply_get_property(chg->usb_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get current voltage, rc=%d\n",
+ rc);
+ return;
+ }
+
+ if (pval.intval > MICRO_10P3V) {
+ smblib_err(chg, "USBIN OV detected\n");
+ vote(chg->hvdcp_hw_inov_dis_votable, OV_VOTER, true,
+ 0);
+ pval.intval = POWER_SUPPLY_DP_DM_FORCE_5V;
+ rc = power_supply_set_property(chg->batt_psy,
+ POWER_SUPPLY_PROP_DP_DM, &pval);
+ return;
+ }
+ }
+}
+
#define QC3_PULSES_FOR_6V 5
#define QC3_PULSES_FOR_9V 20
#define QC3_PULSES_FOR_12V 35
@@ -3435,6 +3492,7 @@ static void smblib_hvdcp_adaptive_voltage_change(struct smb_charger *chg)
u8 stat;
int pulses;
+ smblib_check_ov_condition(chg);
power_supply_changed(chg->usb_main_psy);
if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP) {
rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
@@ -3557,7 +3615,8 @@ static void smblib_handle_hvdcp_check_timeout(struct smb_charger *chg,
* if pd is not allowed, then set pd_active = false right here,
* so that it starts the hvdcp engine
*/
- if (!get_effective_result(chg->pd_allowed_votable))
+ if (!get_effective_result(chg->pd_allowed_votable) &&
+ !chg->micro_usb_mode)
__smblib_set_prop_pd_active(chg, 0);
}
@@ -3967,6 +4026,7 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
/* reset hvdcp voters */
vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER, true, 0);
+ vote(chg->hvdcp_hw_inov_dis_votable, OV_VOTER, false, 0);
/* reset power delivery voters */
vote(chg->pd_allowed_votable, PD_VOTER, false, 0);
@@ -4041,10 +4101,13 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
if (rc < 0)
smblib_err(chg, "Couldn't enable HW cc_out rc=%d\n", rc);
- /* restore crude sensor */
- rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
- if (rc < 0)
- smblib_err(chg, "Couldn't restore crude sensor rc=%d\n", rc);
+ /* restore crude sensor if PM660/PMI8998 */
+ if (chg->wa_flags & TYPEC_PBS_WA_BIT) {
+ rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't restore crude sensor rc=%d\n",
+ rc);
+ }
mutex_lock(&chg->vconn_oc_lock);
if (!chg->vconn_en)
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index f292ca09f532..0de99b9da7bd 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -66,6 +66,7 @@ enum print_reason {
#define USBIN_I_VOTER "USBIN_I_VOTER"
#define WEAK_CHARGER_VOTER "WEAK_CHARGER_VOTER"
#define WBC_VOTER "WBC_VOTER"
+#define OV_VOTER "OV_VOTER"
#define VCONN_MAX_ATTEMPTS 3
#define OTG_MAX_ATTEMPTS 3
@@ -84,6 +85,8 @@ enum {
TYPEC_CC2_REMOVAL_WA_BIT = BIT(2),
QC_AUTH_INTERRUPT_WA_BIT = BIT(3),
OTG_WA = BIT(4),
+ OV_IRQ_WA_BIT = BIT(5),
+ TYPEC_PBS_WA_BIT = BIT(6),
};
enum smb_irq_index {
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 84419af16f77..fd12ccc11e26 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -538,6 +538,7 @@ struct powercap_zone *powercap_register_zone(
power_zone->id = result;
idr_init(&power_zone->idr);
+ result = -ENOMEM;
power_zone->name = kstrdup(name, GFP_KERNEL);
if (!power_zone->name)
goto err_name_alloc;
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index 4d6d63e6d887..f7a18611b5d2 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -78,9 +78,10 @@ struct cprh_kbss_fuses {
* Fuse combos 8 - 15 map to CPR fusing revision 0 - 7 with speed bin fuse = 1.
* Fuse combos 16 - 23 map to CPR fusing revision 0 - 7 with speed bin fuse = 2.
* Fuse combos 24 - 31 map to CPR fusing revision 0 - 7 with speed bin fuse = 3.
+ * Fuse combos 32 - 39 map to CPR fusing revision 0 - 7 with speed bin fuse = 4.
*/
#define CPRH_MSM8998_KBSS_FUSE_COMBO_COUNT 32
-#define CPRH_SDM660_KBSS_FUSE_COMBO_COUNT 32
+#define CPRH_SDM660_KBSS_FUSE_COMBO_COUNT 40
#define CPRH_SDM630_KBSS_FUSE_COMBO_COUNT 24
/*
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 853976bd3d36..9473715725df 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -217,6 +217,13 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
missing = year;
}
+ /* Can't proceed if alarm is still invalid after replacing
+ * missing fields.
+ */
+ err = rtc_valid_tm(&alarm->time);
+ if (err)
+ goto done;
+
/* with luck, no rollover is needed */
t_now = rtc_tm_to_time64(&now);
t_alm = rtc_tm_to_time64(&alarm->time);
@@ -268,9 +275,9 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
dev_warn(&rtc->dev, "alarm rollover not handled\n");
}
-done:
err = rtc_valid_tm(&alarm->time);
+done:
if (err) {
dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n",
alarm->time.tm_year + 1900, alarm->time.tm_mon + 1,
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index 229dd2fe8f45..c6b0c7ed7a30 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -150,6 +150,16 @@ static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
y_m_d = be32_to_cpu(__y_m_d);
h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32);
+
+ /* check if no alarm is set */
+ if (y_m_d == 0 && h_m_s_ms == 0) {
+ pr_debug("No alarm is set\n");
+ rc = -ENOENT;
+ goto exit;
+ } else {
+ pr_debug("Alarm set to %x %llx\n", y_m_d, h_m_s_ms);
+ }
+
opal_to_tm(y_m_d, h_m_s_ms, &alarm->time);
exit:
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 950c5d0b6dca..afab89f5be48 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -257,7 +257,7 @@ static int snvs_rtc_probe(struct platform_device *pdev)
of_property_read_u32(pdev->dev.of_node, "offset", &data->offset);
}
- if (!data->regmap) {
+ if (IS_ERR(data->regmap)) {
dev_err(&pdev->dev, "Can't find snvs syscon\n");
return -ENODEV;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e7a6f1222642..b76a85d14ef0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1881,8 +1881,12 @@ static int __dasd_device_is_unusable(struct dasd_device *device,
{
int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
- if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
- /* dasd is being set offline. */
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+ !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /*
+ * dasd is being set offline
+ * but it is no safe offline where we have to allow I/O
+ */
return 1;
}
if (device->stopped) {
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 6fa9364d1c07..835f1054976b 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -2,6 +2,8 @@
# S/390 character devices
#
+CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
+
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1e16331891a9..f9d6a9f00640 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -451,6 +451,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
{
+ struct channel_path *chp;
struct chp_link link;
struct chp_id chpid;
int status;
@@ -463,10 +464,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
chpid.id = sei_area->rsid;
/* allocate a new channel path structure, if needed */
status = chp_get_status(chpid);
- if (status < 0)
- chp_new(chpid);
- else if (!status)
+ if (!status)
return;
+
+ if (status < 0) {
+ chp_new(chpid);
+ } else {
+ chp = chpid_to_chp(chpid);
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+ }
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
if ((sei_area->vf & 0xc0) != 0) {
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4bb5262f7aee..742ca57ece8c 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -126,7 +126,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int start, int count, int auto_ack)
{
- int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
+ int rc, tmp_count = count, tmp_start = start, nr = q->nr;
unsigned int ccq = 0;
qperf_inc(q, eqbs);
@@ -149,14 +149,7 @@ again:
qperf_inc(q, eqbs_partial);
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
tmp_count);
- /*
- * Retry once, if that fails bail out and process the
- * extracted buffers before trying again.
- */
- if (!retried++)
- goto again;
- else
- return count - tmp_count;
+ return count - tmp_count;
}
DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
@@ -212,7 +205,10 @@ again:
return 0;
}
-/* returns number of examined buffers and their common state in *state */
+/*
+ * Returns number of examined buffers and their common state in *state.
+ * Requested number of buffers-to-examine must be > 0.
+ */
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, unsigned int count,
int auto_ack, int merge_pending)
@@ -223,17 +219,23 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
- for (i = 0; i < count; i++) {
- if (!__state) {
- __state = q->slsb.val[bufnr];
- if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
- __state = SLSB_P_OUTPUT_EMPTY;
- } else if (merge_pending) {
- if ((q->slsb.val[bufnr] & __state) != __state)
- break;
- } else if (q->slsb.val[bufnr] != __state)
- break;
+ /* get initial state: */
+ __state = q->slsb.val[bufnr];
+ if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
+ __state = SLSB_P_OUTPUT_EMPTY;
+
+ for (i = 1; i < count; i++) {
bufnr = next_buf(bufnr);
+
+ /* merge PENDING into EMPTY: */
+ if (merge_pending &&
+ q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
+ __state == SLSB_P_OUTPUT_EMPTY)
+ continue;
+
+ /* stop if next state differs from initial state: */
+ if (q->slsb.val[bufnr] != __state)
+ break;
}
*state = __state;
return i;
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 499e369eabf0..8bc1625337f6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -191,6 +191,7 @@ struct bnx2fc_hba {
struct bnx2fc_cmd_mgr *cmd_mgr;
spinlock_t hba_lock;
struct mutex hba_mutex;
+ struct mutex hba_stats_mutex;
unsigned long adapter_state;
#define ADAPTER_STATE_UP 0
#define ADAPTER_STATE_GOING_DOWN 1
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 67405c628864..d0b227ffbd5f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -641,15 +641,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
if (!fw_stats)
return NULL;
+ mutex_lock(&hba->hba_stats_mutex);
+
bnx2fc_stats = fc_get_host_stats(shost);
init_completion(&hba->stat_req_done);
if (bnx2fc_send_stat_req(hba))
- return bnx2fc_stats;
+ goto unlock_stats_mutex;
rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
if (!rc) {
BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
- return bnx2fc_stats;
+ goto unlock_stats_mutex;
}
BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
@@ -671,6 +673,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
memcpy(&hba->prev_stats, hba->stats_buffer,
sizeof(struct fcoe_statistics_params));
+
+unlock_stats_mutex:
+ mutex_unlock(&hba->hba_stats_mutex);
return bnx2fc_stats;
}
@@ -1302,6 +1307,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
}
spin_lock_init(&hba->hba_lock);
mutex_init(&hba->hba_mutex);
+ mutex_init(&hba->hba_stats_mutex);
hba->cnic = cnic;
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 622bdabc8894..dab195f04da7 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -1769,7 +1769,6 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
goto bye;
}
- mempool_free(mbp, hw->mb_mempool);
if (finicsum != cfcsum) {
csio_warn(hw,
"Config File checksum mismatch: csum=%#x, computed=%#x\n",
@@ -1780,6 +1779,10 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
rv = csio_hw_validate_caps(hw, mbp);
if (rv != 0)
goto bye;
+
+ mempool_free(mbp, hw->mb_mempool);
+ mbp = NULL;
+
/*
* Note that we're operating with parameters
* not supplied by the driver, rather than from hard-wired
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index efce04df2109..9f0b00c38658 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1695,6 +1695,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
*/
switch (session->state) {
case ISCSI_STATE_FAILED:
+ /*
+ * cmds should fail during shutdown, if the session
+ * state is bad, allowing completion to happen
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ reason = FAILURE_SESSION_FAILED;
+ sc->result = DID_NO_CONNECT << 16;
+ break;
+ }
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
@@ -1980,6 +1989,19 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
if (session->state != ISCSI_STATE_LOGGED_IN) {
/*
+ * During shutdown, if session is prematurely disconnected,
+ * recovery won't happen and there will be hung cmds. Not
+ * handling cmds would trigger EH, also bad in this case.
+ * Instead, handle cmd, allow completion to happen and let
+ * upper layer to deal with the result.
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ sc->result = DID_NO_CONNECT << 16;
+ ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
+ rc = BLK_EH_HANDLED;
+ goto done;
+ }
+ /*
* We are probably in the middle of iscsi recovery so let
* that complete and handle the error.
*/
@@ -2083,7 +2105,7 @@ done:
task->last_timeout = jiffies;
spin_unlock(&session->frwd_lock);
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
- "timer reset" : "nh");
+ "timer reset" : "shutdown or nh");
return rc;
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 022bb6e10d98..12886f96b286 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -282,6 +282,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
phy->phy->minimum_linkrate = dr->pmin_linkrate;
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
+ phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
skip:
if (new_phy)
@@ -675,7 +676,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
resp, RPEL_RESP_SIZE);
- if (!res)
+ if (res)
goto out;
phy->invalid_dword_count = scsi_to_u32(&resp[12]);
@@ -684,6 +685,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
out:
+ kfree(req);
kfree(resp);
return res;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index e111c3d8c5d6..b868ef3b2ca3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3886,19 +3886,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return 0;
}
- /*
- * Bug work around for firmware SATL handling. The loop
- * is based on atomic operations and ensures consistency
- * since we're lockless at this point
- */
- do {
- if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
- scmd->result = SAM_STAT_BUSY;
- scmd->scsi_done(scmd);
- return 0;
- }
- } while (_scsih_set_satl_pending(scmd, true));
-
sas_target_priv_data = sas_device_priv_data->sas_target;
/* invalid device handle */
@@ -3924,6 +3911,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
sas_device_priv_data->block)
return SCSI_MLQUEUE_DEVICE_BUSY;
+ /*
+ * Bug work around for firmware SATL handling. The loop
+ * is based on atomic operations and ensures consistency
+ * since we're lockless at this point
+ */
+ do {
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+ scmd->result = SAM_STAT_BUSY;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ } while (_scsih_set_satl_pending(scmd, true));
+
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
mpi_control = MPI2_SCSIIO_CONTROL_READ;
else if (scmd->sc_data_direction == DMA_TO_DEVICE)
@@ -3945,6 +3945,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (!smid) {
pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
@@ -3975,6 +3976,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (mpi_request->DataLength) {
if (ioc->build_sg_scmd(ioc, scmd, smid)) {
mpt3sas_base_free_smid(ioc, smid);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
} else
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index ee65f3324d71..6e66e2ad9daf 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1873,6 +1873,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
break; /* standby */
if (sshdr.asc == 4 && sshdr.ascq == 0xc)
break; /* unavailable */
+ if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
+ break; /* sanitize in progress */
/*
* Issue command to spin up drive when not ready
*/
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 1e8f50c4ebad..b92c217dc1b5 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -967,4 +967,11 @@ config QCOM_QDSS_BRIDGE
sub-system to USB on APSS side. The driver acts as a bridge between the
MHI and USB interface. If unsure, say N.
+config EXT_ANC
+ bool "Enable External ANC"
+ depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3
+ help
+ This option enables support for anti-noise cnacellation
+ on Sensor DSP.
+
source "drivers/soc/qcom/memshare/Kconfig"
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index df94bbc6b696..6cf4c7b6dd8a 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -171,8 +171,6 @@ struct mailbox_config_info {
* @kwork: Work to be executed when an irq is received.
* @kworker: Handle to the entity processing of
deferred commands.
- * @tasklet Handle to tasklet to process incoming data
- packets in atomic manner.
* @task: Handle to the task context used to run @kworker.
* @use_ref: Active uses of this transport use this to grab
* a reference. Used for ssr synchronization.
@@ -216,7 +214,6 @@ struct edge_info {
struct kthread_work kwork;
struct kthread_worker kworker;
struct task_struct *task;
- struct tasklet_struct tasklet;
struct srcu_struct use_ref;
bool in_ssr;
spinlock_t rx_lock;
@@ -1191,18 +1188,6 @@ static void __rx_worker(struct edge_info *einfo, bool atomic_ctx)
}
/**
- * rx_worker_atomic() - worker function to process received command in atomic
- * context.
- * @param: The param parameter passed during initialization of the tasklet.
- */
-static void rx_worker_atomic(unsigned long param)
-{
- struct edge_info *einfo = (struct edge_info *)param;
-
- __rx_worker(einfo, true);
-}
-
-/**
* rx_worker() - worker function to process received commands
* @work: kwork associated with the edge to process commands on.
*/
@@ -1221,7 +1206,7 @@ irqreturn_t irq_handler(int irq, void *priv)
if (einfo->rx_reset_reg)
writel_relaxed(einfo->out_irq_mask, einfo->rx_reset_reg);
- tasklet_hi_schedule(&einfo->tasklet);
+ __rx_worker(einfo, true);
einfo->rx_irq_count++;
return IRQ_HANDLED;
@@ -2373,7 +2358,6 @@ static int glink_smem_native_probe(struct platform_device *pdev)
init_waitqueue_head(&einfo->tx_blocked_queue);
init_kthread_work(&einfo->kwork, rx_worker);
init_kthread_worker(&einfo->kworker);
- tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
einfo->read_from_fifo = read_from_fifo;
einfo->write_to_fifo = write_to_fifo;
init_srcu_struct(&einfo->use_ref);
@@ -2477,7 +2461,6 @@ smem_alloc_fail:
flush_kthread_worker(&einfo->kworker);
kthread_stop(einfo->task);
einfo->task = NULL;
- tasklet_kill(&einfo->tasklet);
kthread_fail:
iounmap(einfo->out_irq_reg);
ioremap_fail:
@@ -2563,7 +2546,6 @@ static int glink_rpm_native_probe(struct platform_device *pdev)
init_waitqueue_head(&einfo->tx_blocked_queue);
init_kthread_work(&einfo->kwork, rx_worker);
init_kthread_worker(&einfo->kworker);
- tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
einfo->intentless = true;
einfo->read_from_fifo = memcpy32_fromio;
einfo->write_to_fifo = memcpy32_toio;
@@ -2725,7 +2707,6 @@ toc_init_fail:
flush_kthread_worker(&einfo->kworker);
kthread_stop(einfo->task);
einfo->task = NULL;
- tasklet_kill(&einfo->tasklet);
kthread_fail:
iounmap(msgram);
msgram_ioremap_fail:
@@ -2854,7 +2835,6 @@ static int glink_mailbox_probe(struct platform_device *pdev)
init_waitqueue_head(&einfo->tx_blocked_queue);
init_kthread_work(&einfo->kwork, rx_worker);
init_kthread_worker(&einfo->kworker);
- tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
einfo->read_from_fifo = read_from_fifo;
einfo->write_to_fifo = write_to_fifo;
init_srcu_struct(&einfo->use_ref);
@@ -2977,7 +2957,6 @@ smem_alloc_fail:
flush_kthread_worker(&einfo->kworker);
kthread_stop(einfo->task);
einfo->task = NULL;
- tasklet_kill(&einfo->tasklet);
kthread_fail:
iounmap(einfo->rx_reset_reg);
rx_reset_ioremap_fail:
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
index fe7fb1e5b925..177737f3e314 100644
--- a/drivers/soc/qcom/glink_ssr.c
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -538,7 +538,6 @@ int notify_for_subsystem(struct subsys_info *ss_info)
* only modified during setup.
*/
atomic_set(&responses_remaining, ss_info->notify_list_len);
- init_waitqueue_head(&waitqueue);
notifications_successful = true;
list_for_each_entry(ss_leaf_entry, &ss_info->notify_list,
@@ -945,7 +944,7 @@ static int glink_ssr_probe(struct platform_device *pdev)
ss_info->cb_data = NULL;
spin_lock_init(&ss_info->link_up_lock);
spin_lock_init(&ss_info->cb_lock);
-
+ init_waitqueue_head(&waitqueue);
nb = kmalloc(sizeof(struct restart_notifier_block), GFP_KERNEL);
if (!nb) {
GLINK_SSR_ERR("<SSR> %s: Could not allocate notifier block\n",
diff --git a/drivers/soc/qcom/hab/hab.c b/drivers/soc/qcom/hab/hab.c
index 3294fc34bdf8..37afe025a97a 100644
--- a/drivers/soc/qcom/hab/hab.c
+++ b/drivers/soc/qcom/hab/hab.c
@@ -356,18 +356,21 @@ err:
return ret;
}
-struct hab_message *hab_vchan_recv(struct uhab_context *ctx,
+int hab_vchan_recv(struct uhab_context *ctx,
+ struct hab_message **message,
int vcid,
+ int *rsize,
unsigned int flags)
{
struct virtual_channel *vchan;
- struct hab_message *message;
int ret = 0;
int nonblocking_flag = flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING;
vchan = hab_get_vchan_fromvcid(vcid, ctx);
- if (!vchan)
- return ERR_PTR(-ENODEV);
+ if (!vchan) {
+ pr_err("vcid %X, vchan %p ctx %p\n", vcid, vchan, ctx);
+ return -ENODEV;
+ }
if (nonblocking_flag) {
/*
@@ -378,18 +381,18 @@ struct hab_message *hab_vchan_recv(struct uhab_context *ctx,
physical_channel_rx_dispatch((unsigned long) vchan->pchan);
}
- message = hab_msg_dequeue(vchan, flags);
- if (!message) {
+ ret = hab_msg_dequeue(vchan, message, rsize, flags);
+ if (!(*message)) {
if (nonblocking_flag)
ret = -EAGAIN;
else if (vchan->otherend_closed)
ret = -ENODEV;
- else
- ret = -EPIPE;
+ else if (ret == -ERESTARTSYS)
+ ret = -EINTR;
}
hab_vchan_put(vchan);
- return ret ? ERR_PTR(ret) : message;
+ return ret;
}
bool hab_is_loopback(void)
@@ -843,29 +846,22 @@ static long hab_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
break;
}
- msg = hab_vchan_recv(ctx, recv_param->vcid, recv_param->flags);
-
- if (IS_ERR(msg)) {
- recv_param->sizebytes = 0;
- ret = PTR_ERR(msg);
- break;
- }
+ ret = hab_vchan_recv(ctx, &msg, recv_param->vcid,
+ &recv_param->sizebytes, recv_param->flags);
- if (recv_param->sizebytes < msg->sizebytes) {
- recv_param->sizebytes = 0;
- ret = -EINVAL;
- } else if (copy_to_user((void __user *)recv_param->data,
+ if (ret == 0 && msg) {
+ if (copy_to_user((void __user *)recv_param->data,
msg->data,
msg->sizebytes)) {
- pr_err("copy_to_user failed: vc=%x size=%d\n",
- recv_param->vcid, (int)msg->sizebytes);
- recv_param->sizebytes = 0;
- ret = -EFAULT;
- } else {
- recv_param->sizebytes = msg->sizebytes;
+ pr_err("copy_to_user failed: vc=%x size=%d\n",
+ recv_param->vcid, (int)msg->sizebytes);
+ recv_param->sizebytes = 0;
+ ret = -EFAULT;
+ }
}
- hab_msg_free(msg);
+ if (msg)
+ hab_msg_free(msg);
break;
case IOCTL_HAB_VC_EXPORT:
ret = hab_mem_export(ctx, (struct hab_export *)data, 0);
diff --git a/drivers/soc/qcom/hab/hab.h b/drivers/soc/qcom/hab/hab.h
index ce4c94fa75c9..2a07da728e00 100644
--- a/drivers/soc/qcom/hab/hab.h
+++ b/drivers/soc/qcom/hab/hab.h
@@ -147,7 +147,8 @@ struct hab_header {
(((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT)
-#define HAB_HEADER_SET_SESSION_ID(header, sid) ((header).session_id = (sid))
+#define HAB_HEADER_SET_SESSION_ID(header, sid) \
+ ((header).session_id = (sid))
#define HAB_HEADER_SET_SIZE(header, size) \
((header).id_type_size = ((header).id_type_size & \
@@ -281,8 +282,8 @@ struct uhab_context {
};
/*
- * array to describe the VM and its MMID configuration as what is connected to
- * so this is describing a pchan's remote side
+ * array to describe the VM and its MMID configuration as
+ * what is connected to so this is describing a pchan's remote side
*/
struct vmid_mmid_desc {
int vmid; /* remote vmid */
@@ -341,8 +342,9 @@ struct virtual_channel {
};
/*
- * Struct shared between local and remote, contents are composed by exporter,
- * the importer only writes to pdata and local (exporter) domID
+ * Struct shared between local and remote, contents
+ * are composed by exporter, the importer only writes
+ * to pdata and local (exporter) domID
*/
struct export_desc {
uint32_t export_id;
@@ -371,9 +373,11 @@ long hab_vchan_send(struct uhab_context *ctx,
size_t sizebytes,
void *data,
unsigned int flags);
-struct hab_message *hab_vchan_recv(struct uhab_context *ctx,
- int vcid,
- unsigned int flags);
+int hab_vchan_recv(struct uhab_context *ctx,
+ struct hab_message **msg,
+ int vcid,
+ int *rsize,
+ unsigned int flags);
void hab_vchan_stop(struct virtual_channel *vchan);
void hab_vchans_stop(struct physical_channel *pchan);
void hab_vchan_stop_notify(struct virtual_channel *vchan);
@@ -410,24 +414,18 @@ int habmem_hyp_revoke(void *expdata, uint32_t count);
void *habmem_imp_hyp_open(void);
void habmem_imp_hyp_close(void *priv, int kernel);
-long habmem_imp_hyp_map(void *priv, void *impdata, uint32_t count,
- uint32_t remotedom,
- uint64_t *index,
- void **pkva,
- int kernel,
- uint32_t userflags);
+int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
+ struct export_desc *exp, int kernel);
-long habmm_imp_hyp_unmap(void *priv, uint64_t index,
- uint32_t count,
- int kernel);
+int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp);
int habmem_imp_hyp_mmap(struct file *flip, struct vm_area_struct *vma);
void hab_msg_free(struct hab_message *message);
-struct hab_message *hab_msg_dequeue(struct virtual_channel *vchan,
- unsigned int flags);
+int hab_msg_dequeue(struct virtual_channel *vchan,
+ struct hab_message **msg, int *rsize, unsigned int flags);
void hab_msg_recv(struct physical_channel *pchan,
struct hab_header *header);
diff --git a/drivers/soc/qcom/hab/hab_mem_linux.c b/drivers/soc/qcom/hab/hab_mem_linux.c
index ecc3f52a6662..a779067ee4c4 100644
--- a/drivers/soc/qcom/hab/hab_mem_linux.c
+++ b/drivers/soc/qcom/hab/hab_mem_linux.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,9 @@ struct pages_list {
uint32_t userflags;
struct file *filp_owner;
struct file *filp_mapper;
+ struct dma_buf *dmabuf;
+ int32_t export_id;
+ int32_t vcid;
};
struct importer_context {
@@ -58,7 +61,7 @@ static int match_file(const void *p, struct file *file, unsigned int fd)
}
-static int habmem_get_dma_pages(unsigned long address,
+static int habmem_get_dma_pages_from_va(unsigned long address,
int page_count,
struct page **pages)
{
@@ -142,6 +145,56 @@ err:
return rc;
}
+static int habmem_get_dma_pages_from_fd(int32_t fd,
+ int page_count,
+ struct page **pages)
+{
+ struct dma_buf *dmabuf = NULL;
+ struct scatterlist *s;
+ struct sg_table *sg_table = NULL;
+ struct dma_buf_attachment *attach = NULL;
+ struct page *page;
+ int i, j, rc = 0;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ attach = dma_buf_attach(dmabuf, hab_driver.dev);
+ if (IS_ERR_OR_NULL(attach)) {
+ pr_err("dma_buf_attach failed\n");
+ goto err;
+ }
+
+ sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
+
+ if (IS_ERR_OR_NULL(sg_table)) {
+ pr_err("dma_buf_map_attachment failed\n");
+ goto err;
+ }
+
+ for_each_sg(sg_table->sgl, s, sg_table->nents, i) {
+ page = sg_page(s);
+ pr_debug("sgl length %d\n", s->length);
+
+ for (j = 0; j < (s->length >> PAGE_SHIFT); j++) {
+ pages[rc] = nth_page(page, j);
+ rc++;
+ if (WARN_ON(rc >= page_count))
+ break;
+ }
+ }
+
+err:
+ if (!IS_ERR_OR_NULL(sg_table))
+ dma_buf_unmap_attachment(attach, sg_table, DMA_TO_DEVICE);
+ if (!IS_ERR_OR_NULL(attach))
+ dma_buf_detach(dmabuf, attach);
+ if (!IS_ERR_OR_NULL(dmabuf))
+ dma_buf_put(dmabuf);
+ return rc;
+}
+
/*
* exporter - grant & revoke
* degenerate sharabled page list based on CPU friendly virtual "address".
@@ -165,7 +218,11 @@ int habmem_hyp_grant_user(unsigned long address,
down_read(&current->mm->mmap_sem);
if (HABMM_EXP_MEM_TYPE_DMA & flags) {
- ret = habmem_get_dma_pages(address,
+ ret = habmem_get_dma_pages_from_va(address,
+ page_count,
+ pages);
+ } else if (HABMM_EXPIMP_FLAGS_FD & flags) {
+ ret = habmem_get_dma_pages_from_fd(address,
page_count,
pages);
} else {
@@ -260,30 +317,156 @@ void habmem_imp_hyp_close(void *imp_ctx, int kernel)
kfree(priv);
}
-/*
- * setup pages, be ready for the following mmap call
- * index is output to refer to this imported buffer described by the import data
- */
-long habmem_imp_hyp_map(void *imp_ctx,
- void *impdata,
- uint32_t count,
- uint32_t remotedom,
- uint64_t *index,
- void **pkva,
- int kernel,
- uint32_t userflags)
+static struct sg_table *hab_mem_map_dma_buf(
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_buf *dmabuf = attachment->dmabuf;
+ struct pages_list *pglist = dmabuf->priv;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ int i;
+ int ret = 0;
+ struct page **pages = pglist->pages;
+
+ sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table(sgt, pglist->npages, GFP_KERNEL);
+ if (ret) {
+ kfree(sgt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_sg(sgt->sgl, sg, pglist->npages, i) {
+ sg_set_page(sg, pages[i], PAGE_SIZE, 0);
+ }
+
+ return sgt;
+}
+
+
+static void hab_mem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sgt,
+ enum dma_data_direction direction)
+{
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *page;
+ struct pages_list *pglist;
+
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+
+ /* PHY address */
+ unsigned long fault_offset =
+ (unsigned long)vmf->virtual_address - vma->vm_start + offset;
+ unsigned long fault_index = fault_offset>>PAGE_SHIFT;
+ int page_idx;
+
+ if (vma == NULL)
+ return VM_FAULT_SIGBUS;
+
+ pglist = vma->vm_private_data;
+
+ page_idx = fault_index - pglist->index;
+ if (page_idx < 0 || page_idx >= pglist->npages) {
+ pr_err("Out of page array! page_idx %d, pg cnt %ld",
+ page_idx, pglist->npages);
+ return VM_FAULT_SIGBUS;
+ }
+
+ page = pglist->pages[page_idx];
+ get_page(page);
+ vmf->page = page;
+ return 0;
+}
+
+static void hab_map_open(struct vm_area_struct *vma)
+{
+}
+
+static void hab_map_close(struct vm_area_struct *vma)
+{
+}
+
+static const struct vm_operations_struct habmem_vm_ops = {
+ .fault = hab_map_fault,
+ .open = hab_map_open,
+ .close = hab_map_close,
+};
+
+static int hab_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct pages_list *pglist = dmabuf->priv;
+ uint32_t obj_size = pglist->npages << PAGE_SHIFT;
+
+ if (vma == NULL)
+ return VM_FAULT_SIGBUS;
+
+ /* Check for valid size. */
+ if (obj_size < vma->vm_end - vma->vm_start)
+ return -EINVAL;
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = &habmem_vm_ops;
+ vma->vm_private_data = pglist;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ return 0;
+}
+
+static void hab_mem_dma_buf_release(struct dma_buf *dmabuf)
+{
+}
+
+static void *hab_mem_dma_buf_kmap(struct dma_buf *dmabuf,
+ unsigned long offset)
+{
+ return NULL;
+}
+
+static void hab_mem_dma_buf_kunmap(struct dma_buf *dmabuf,
+ unsigned long offset,
+ void *ptr)
+{
+}
+
+static struct dma_buf_ops dma_buf_ops = {
+ .map_dma_buf = hab_mem_map_dma_buf,
+ .unmap_dma_buf = hab_mem_unmap_dma_buf,
+ .mmap = hab_mem_mmap,
+ .release = hab_mem_dma_buf_release,
+ .kmap_atomic = hab_mem_dma_buf_kmap,
+ .kunmap_atomic = hab_mem_dma_buf_kunmap,
+ .kmap = hab_mem_dma_buf_kmap,
+ .kunmap = hab_mem_dma_buf_kunmap,
+};
+
+static int habmem_imp_hyp_map_fd(void *imp_ctx,
+ struct export_desc *exp,
+ uint32_t userflags,
+ int32_t *pfd)
{
struct page **pages;
- struct compressed_pfns *pfn_table = (struct compressed_pfns *)impdata;
+ struct compressed_pfns *pfn_table =
+ (struct compressed_pfns *)exp->payload;
struct pages_list *pglist;
struct importer_context *priv = imp_ctx;
unsigned long pfn;
int i, j, k = 0;
+ pgprot_t prot = PAGE_KERNEL;
+ int32_t fd;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
if (!pfn_table || !priv)
return -EINVAL;
- pages = vmalloc(count * sizeof(struct page *));
+ pages = vmalloc(exp->payload_count * sizeof(struct page *));
if (!pages)
return -ENOMEM;
@@ -303,145 +486,230 @@ long habmem_imp_hyp_map(void *imp_ctx,
}
pglist->pages = pages;
- pglist->npages = count;
- pglist->kernel = kernel;
- pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT;
+ pglist->npages = exp->payload_count;
+ pglist->kernel = 0;
+ pglist->index = 0;
pglist->refcntk = pglist->refcntu = 0;
pglist->userflags = userflags;
+ pglist->export_id = exp->export_id;
+ pglist->vcid = exp->vcid_remote;
+
+ if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
+ prot = pgprot_writecombine(prot);
+
+ exp_info.ops = &dma_buf_ops;
+ exp_info.size = exp->payload_count << PAGE_SHIFT;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = pglist;
+ pglist->dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(pglist->dmabuf)) {
+ vfree(pages);
+ kfree(pglist);
+ return PTR_ERR(pglist->dmabuf);
+ }
- *index = pglist->index << PAGE_SHIFT;
-
- if (kernel) {
- pgprot_t prot = PAGE_KERNEL;
-
- if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
- prot = pgprot_writecombine(prot);
-
- pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot);
- if (pglist->kva == NULL) {
- vfree(pages);
- kfree(pglist);
- pr_err("%ld pages vmap failed\n", pglist->npages);
- return -ENOMEM;
- } else {
- pr_debug("%ld pages vmap pass, return %pK\n",
- pglist->npages, pglist->kva);
- }
-
- pglist->uva = NULL;
- pglist->refcntk++;
- *pkva = pglist->kva;
- *index = (uint64_t)((uintptr_t)pglist->kva);
- } else {
- pglist->kva = NULL;
+ fd = dma_buf_fd(pglist->dmabuf, O_CLOEXEC);
+ if (fd < 0) {
+ dma_buf_put(pglist->dmabuf);
+ vfree(pages);
+ kfree(pglist);
+ return -EINVAL;
}
+ pglist->refcntk++;
+
write_lock(&priv->implist_lock);
list_add_tail(&pglist->list, &priv->imp_list);
priv->cnt++;
write_unlock(&priv->implist_lock);
- pr_debug("index returned %llx\n", *index);
+
+ *pfd = fd;
return 0;
}
-/* the input index is PHY address shifted for uhab, and kva for khab */
-long habmm_imp_hyp_unmap(void *imp_ctx,
- uint64_t index,
- uint32_t count,
- int kernel)
+static int habmem_imp_hyp_map_kva(void *imp_ctx,
+ struct export_desc *exp,
+ uint32_t userflags,
+ void **pkva)
{
+ struct page **pages;
+ struct compressed_pfns *pfn_table =
+ (struct compressed_pfns *)exp->payload;
+ struct pages_list *pglist;
struct importer_context *priv = imp_ctx;
- struct pages_list *pglist, *tmp;
- int found = 0;
- uint64_t pg_index = index >> PAGE_SHIFT;
-
- write_lock(&priv->implist_lock);
- list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) {
- pr_debug("node pglist %pK, kernel %d, pg_index %llx\n",
- pglist, pglist->kernel, pg_index);
+ unsigned long pfn;
+ int i, j, k = 0;
+ pgprot_t prot = PAGE_KERNEL;
- if (kernel) {
- if (pglist->kva == (void *)((uintptr_t)index))
- found = 1;
- } else {
- if (pglist->index == pg_index)
- found = 1;
- }
+ if (!pfn_table || !priv)
+ return -EINVAL;
+ pages = vmalloc(exp->payload_count * sizeof(struct page *));
+ if (!pages)
+ return -ENOMEM;
+ pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
+ if (!pglist) {
+ vfree(pages);
+ return -ENOMEM;
+ }
- if (found) {
- list_del(&pglist->list);
- priv->cnt--;
- break;
+ pfn = pfn_table->first_pfn;
+ for (i = 0; i < pfn_table->nregions; i++) {
+ for (j = 0; j < pfn_table->region[i].size; j++) {
+ pages[k] = pfn_to_page(pfn+j);
+ k++;
}
+ pfn += pfn_table->region[i].size + pfn_table->region[i].space;
}
- write_unlock(&priv->implist_lock);
- if (!found) {
- pr_err("failed to find export id on index %llx\n", index);
- return -EINVAL;
+ pglist->pages = pages;
+ pglist->npages = exp->payload_count;
+ pglist->kernel = 1;
+ pglist->refcntk = pglist->refcntu = 0;
+ pglist->userflags = userflags;
+ pglist->export_id = exp->export_id;
+ pglist->vcid = exp->vcid_remote;
+
+ if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
+ prot = pgprot_writecombine(prot);
+
+ pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot);
+ if (pglist->kva == NULL) {
+ vfree(pages);
+ kfree(pglist);
+ pr_err("%ld pages vmap failed\n", pglist->npages);
+ return -ENOMEM;
}
- pr_debug("detach pglist %pK, index %llx, kernel %d, list cnt %d\n",
- pglist, pglist->index, pglist->kernel, priv->cnt);
+ pr_debug("%ld pages vmap pass, return %p\n",
+ pglist->npages, pglist->kva);
- if (kernel)
- if (pglist->kva)
- vunmap(pglist->kva);
+ pglist->refcntk++;
- vfree(pglist->pages);
- kfree(pglist);
+ write_lock(&priv->implist_lock);
+ list_add_tail(&pglist->list, &priv->imp_list);
+ priv->cnt++;
+ write_unlock(&priv->implist_lock);
+
+ *pkva = pglist->kva;
return 0;
}
-static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int habmem_imp_hyp_map_uva(void *imp_ctx,
+ struct export_desc *exp,
+ uint32_t userflags,
+ uint64_t *index)
{
- struct page *page;
+ struct page **pages;
+ struct compressed_pfns *pfn_table =
+ (struct compressed_pfns *)exp->payload;
struct pages_list *pglist;
+ struct importer_context *priv = imp_ctx;
+ unsigned long pfn;
+ int i, j, k = 0;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
-
- /* PHY address */
- unsigned long fault_offset =
- (unsigned long)vmf->virtual_address - vma->vm_start + offset;
- unsigned long fault_index = fault_offset>>PAGE_SHIFT;
- int page_idx;
+ if (!pfn_table || !priv)
+ return -EINVAL;
- if (vma == NULL)
- return VM_FAULT_SIGBUS;
+ pages = vmalloc(exp->payload_count * sizeof(struct page *));
+ if (!pages)
+ return -ENOMEM;
- pglist = vma->vm_private_data;
+ pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
+ if (!pglist) {
+ vfree(pages);
+ return -ENOMEM;
+ }
- page_idx = fault_index - pglist->index;
- if (page_idx < 0 || page_idx >= pglist->npages) {
- pr_err("Out of page array. page_idx %d, pg cnt %ld",
- page_idx, pglist->npages);
- return VM_FAULT_SIGBUS;
+ pfn = pfn_table->first_pfn;
+ for (i = 0; i < pfn_table->nregions; i++) {
+ for (j = 0; j < pfn_table->region[i].size; j++) {
+ pages[k] = pfn_to_page(pfn+j);
+ k++;
+ }
+ pfn += pfn_table->region[i].size + pfn_table->region[i].space;
}
- pr_debug("Fault page index %d\n", page_idx);
+ pglist->pages = pages;
+ pglist->npages = exp->payload_count;
+ pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT;
+ pglist->refcntk = pglist->refcntu = 0;
+ pglist->userflags = userflags;
+ pglist->export_id = exp->export_id;
+ pglist->vcid = exp->vcid_remote;
+
+ write_lock(&priv->implist_lock);
+ list_add_tail(&pglist->list, &priv->imp_list);
+ priv->cnt++;
+ write_unlock(&priv->implist_lock);
+
+ *index = pglist->index << PAGE_SHIFT;
- page = pglist->pages[page_idx];
- get_page(page);
- vmf->page = page;
return 0;
}
-static void hab_map_open(struct vm_area_struct *vma)
+int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
+ struct export_desc *exp, int kernel)
{
+ int ret = 0;
+
+ if (kernel)
+ ret = habmem_imp_hyp_map_kva(imp_ctx, exp,
+ param->flags,
+ (void **)&param->kva);
+ else if (param->flags & HABMM_EXPIMP_FLAGS_FD)
+ ret = habmem_imp_hyp_map_fd(imp_ctx, exp,
+ param->flags,
+ (int32_t *)&param->kva);
+ else
+ ret = habmem_imp_hyp_map_uva(imp_ctx, exp,
+ param->flags,
+ &param->index);
+
+ return ret;
}
-static void hab_map_close(struct vm_area_struct *vma)
+int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp)
{
-}
+ struct importer_context *priv = imp_ctx;
+ struct pages_list *pglist, *tmp;
+ int found = 0;
-static const struct vm_operations_struct habmem_vm_ops = {
+ write_lock(&priv->implist_lock);
+ list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) {
+ if (pglist->export_id == exp->export_id &&
+ pglist->vcid == exp->vcid_remote) {
+ found = 1;
+ }
- .fault = hab_map_fault,
- .open = hab_map_open,
- .close = hab_map_close,
-};
+ if (found) {
+ list_del(&pglist->list);
+ priv->cnt--;
+ break;
+ }
+ }
+ write_unlock(&priv->implist_lock);
+
+ if (!found) {
+ pr_err("failed to find export id %u\n", exp->export_id);
+ return -EINVAL;
+ }
+
+ pr_debug("detach pglist %p, kernel %d, list cnt %d\n",
+ pglist, pglist->kernel, priv->cnt);
+
+ if (pglist->kva)
+ vunmap(pglist->kva);
+
+ if (pglist->dmabuf)
+ dma_buf_put(pglist->dmabuf);
+
+ vfree(pglist->pages);
+ kfree(pglist);
+
+ return 0;
+}
int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma)
{
diff --git a/drivers/soc/qcom/hab/hab_mimex.c b/drivers/soc/qcom/hab/hab_mimex.c
index 67601590908e..00fbeabed4bb 100644
--- a/drivers/soc/qcom/hab/hab_mimex.c
+++ b/drivers/soc/qcom/hab/hab_mimex.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -345,25 +345,20 @@ int hab_mem_import(struct uhab_context *ctx,
exp->export_id, exp->payload_count, exp->domid_local,
*((uint32_t *)exp->payload));
- ret = habmem_imp_hyp_map(ctx->import_ctx,
- exp->payload,
- exp->payload_count,
- exp->domid_local,
- &exp->import_index,
- &exp->kva,
- kernel,
- param->flags);
+ ret = habmem_imp_hyp_map(ctx->import_ctx, param, exp, kernel);
+
if (ret) {
pr_err("Import fail ret:%d pcnt:%d rem:%d 1st_ref:0x%X\n",
ret, exp->payload_count,
exp->domid_local, *((uint32_t *)exp->payload));
return ret;
}
- pr_debug("import index %llx, kva %llx, kernel %d\n",
- exp->import_index, param->kva, kernel);
- param->index = exp->import_index;
- param->kva = (uint64_t)exp->kva;
+ exp->import_index = param->index;
+ exp->kva = kernel ? (void *)param->kva : NULL;
+
+ pr_debug("import index %llx, kva or fd %llx, kernel %d\n",
+ exp->import_index, param->kva, kernel);
return ret;
}
@@ -396,13 +391,10 @@ int hab_mem_unimport(struct uhab_context *ctx,
if (!found)
ret = -EINVAL;
else {
- ret = habmm_imp_hyp_unmap(ctx->import_ctx,
- exp->import_index,
- exp->payload_count,
- kernel);
+ ret = habmm_imp_hyp_unmap(ctx->import_ctx, exp);
if (ret) {
- pr_err("unmap fail id:%d pcnt:%d kernel:%d\n",
- exp->export_id, exp->payload_count, kernel);
+ pr_err("unmap fail id:%d pcnt:%d vcid:%d\n",
+ exp->export_id, exp->payload_count, exp->vcid_remote);
}
param->kva = (uint64_t)exp->kva;
kfree(exp);
diff --git a/drivers/soc/qcom/hab/hab_msg.c b/drivers/soc/qcom/hab/hab_msg.c
index d5c625e8c1c9..d904cdee838c 100644
--- a/drivers/soc/qcom/hab/hab_msg.c
+++ b/drivers/soc/qcom/hab/hab_msg.c
@@ -42,8 +42,9 @@ void hab_msg_free(struct hab_message *message)
kfree(message);
}
-struct hab_message *
-hab_msg_dequeue(struct virtual_channel *vchan, unsigned int flags)
+int
+hab_msg_dequeue(struct virtual_channel *vchan, struct hab_message **msg,
+ int *rsize, unsigned int flags)
{
struct hab_message *message = NULL;
int ret = 0;
@@ -64,15 +65,30 @@ hab_msg_dequeue(struct virtual_channel *vchan, unsigned int flags)
}
/* return all the received messages before the remote close */
- if (!ret && !hab_rx_queue_empty(vchan)) {
+ if ((!ret || (ret == -ERESTARTSYS)) && !hab_rx_queue_empty(vchan)) {
spin_lock_bh(&vchan->rx_lock);
message = list_first_entry(&vchan->rx_list,
struct hab_message, node);
- list_del(&message->node);
+ if (message) {
+ if (*rsize >= message->sizebytes) {
+ /* msg can be safely retrieved in full */
+ list_del(&message->node);
+ ret = 0;
+ *rsize = message->sizebytes;
+ } else {
+ pr_err("rcv buffer too small %d < %zd\n",
+ *rsize, message->sizebytes);
+ *rsize = 0;
+ message = NULL;
+ ret = -EINVAL;
+ }
+ }
spin_unlock_bh(&vchan->rx_lock);
- }
+ } else
+ *rsize = 0;
- return message;
+ *msg = message;
+ return ret;
}
static void hab_msg_queue(struct virtual_channel *vchan,
diff --git a/drivers/soc/qcom/hab/hab_vchan.c b/drivers/soc/qcom/hab/hab_vchan.c
index 140d75656353..2db4db8f321b 100644
--- a/drivers/soc/qcom/hab/hab_vchan.c
+++ b/drivers/soc/qcom/hab/hab_vchan.c
@@ -110,10 +110,7 @@ hab_vchan_free(struct kref *ref)
}
spin_unlock_bh(&ctx->imp_lock);
if (found) {
- habmm_imp_hyp_unmap(ctx->import_ctx,
- exp->import_index,
- exp->payload_count,
- ctx->kernel);
+ habmm_imp_hyp_unmap(ctx->import_ctx, exp);
ctx->import_total--;
kfree(exp);
}
diff --git a/drivers/soc/qcom/hab/khab.c b/drivers/soc/qcom/hab/khab.c
index 3fdd11f7daf7..ba77e5e9cca2 100644
--- a/drivers/soc/qcom/hab/khab.c
+++ b/drivers/soc/qcom/hab/khab.c
@@ -51,22 +51,14 @@ int32_t habmm_socket_recv(int32_t handle, void *dst_buff, uint32_t *size_bytes,
if (!size_bytes || !dst_buff)
return -EINVAL;
- msg = hab_vchan_recv(hab_driver.kctx, handle, flags);
+ ret = hab_vchan_recv(hab_driver.kctx, &msg, handle, size_bytes, flags);
- if (IS_ERR(msg)) {
- *size_bytes = 0;
- return PTR_ERR(msg);
- }
-
- if (*size_bytes < msg->sizebytes) {
- *size_bytes = 0;
- ret = -EINVAL;
- } else {
+ if (ret == 0 && msg)
memcpy(dst_buff, msg->data, msg->sizebytes);
- *size_bytes = msg->sizebytes;
- }
- hab_msg_free(msg);
+ if (msg)
+ hab_msg_free(msg);
+
return ret;
}
EXPORT_SYMBOL(habmm_socket_recv);
diff --git a/drivers/soc/qcom/qdsp6v2/Makefile b/drivers/soc/qcom/qdsp6v2/Makefile
index 90feb8b659d1..0a0e258e6ec1 100644
--- a/drivers/soc/qcom/qdsp6v2/Makefile
+++ b/drivers/soc/qcom/qdsp6v2/Makefile
@@ -10,3 +10,5 @@ obj-$(CONFIG_MSM_QDSP6_SSR) += audio_ssr.o
obj-$(CONFIG_MSM_QDSP6_PDR) += audio_pdr.o
obj-$(CONFIG_MSM_QDSP6_NOTIFIER) += audio_notifier.o
obj-$(CONFIG_MSM_CDSP_LOADER) += cdsp-loader.o
+obj-$(CONFIG_EXT_ANC) += sdsp-anc.o audio_anc.o audio-anc-dev-mgr.o
+
diff --git a/drivers/soc/qcom/qdsp6v2/apr.c b/drivers/soc/qcom/qdsp6v2/apr.c
index b1afd02b49bf..fefc348c0027 100644
--- a/drivers/soc/qcom/qdsp6v2/apr.c
+++ b/drivers/soc/qcom/qdsp6v2/apr.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2010-2014, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2014, 2016, 2018 The Linux Foundation.
+ * All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -209,6 +210,16 @@ static struct apr_svc_table svc_tbl_voice[] = {
},
};
+static const struct apr_svc_table svc_tbl_sdsp[] = {
+ {
+ /* Micro Audio Service */
+ .name = "MAS",
+ .idx = 0,
+ .id = APR_SVC_MAS,
+ .client_id = APR_CLIENT_AUDIO,
+ },
+};
+
enum apr_subsys_state apr_get_modem_state(void)
{
return atomic_read(&q6.modem_state);
@@ -444,6 +455,9 @@ struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn,
*/
can_open_channel = false;
domain_id = APR_DOMAIN_MODEM;
+ } else if (!strcmp(dest, "SDSP")) {
+ domain_id = APR_DOMAIN_SDSP;
+ pr_debug("APR: SDSP DOMAIN_ID %d\n", domain_id);
} else {
pr_err("APR: wrong destination\n");
goto done;
@@ -472,6 +486,8 @@ struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn,
}
}
pr_debug("%s: modem Up\n", __func__);
+ } else if (dest_id == APR_DEST_DSPS) {
+ pr_debug("%s: Sensor DSP Up\n", __func__);
}
if (apr_get_svc(svc_name, domain_id, &client_id, &svc_idx, &svc_id)) {
@@ -624,6 +640,8 @@ void apr_cb_func(void *buf, int len, void *priv)
pr_err("APR: Wrong svc :%d\n", svc);
return;
}
+ } else if (hdr->src_domain == APR_DOMAIN_SDSP) {
+ clnt = APR_CLIENT_AUDIO;
} else {
pr_err("APR: Pkt from wrong source: %d\n", hdr->src_domain);
return;
@@ -700,6 +718,9 @@ int apr_get_svc(const char *svc_name, int domain_id, int *client_id,
if ((domain_id == APR_DOMAIN_ADSP)) {
tbl = (struct apr_svc_table *)&svc_tbl_qdsp6;
size = ARRAY_SIZE(svc_tbl_qdsp6);
+ } else if (domain_id == APR_DOMAIN_SDSP) {
+ tbl = (struct apr_svc_table *)&svc_tbl_sdsp;
+ size = ARRAY_SIZE(svc_tbl_sdsp);
} else {
tbl = (struct apr_svc_table *)&svc_tbl_voice;
size = ARRAY_SIZE(svc_tbl_voice);
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal.c b/drivers/soc/qcom/qdsp6v2/apr_tal.c
index 6cffe7be655a..3884667cc12c 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_tal.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_tal.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, 2013-2014, 2016 The Linux Foundation.
+/* Copyright (c) 2010-2011, 2013-2014, 2016, 2018 The Linux Foundation.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -38,6 +38,14 @@ static char *svc_names[APR_DEST_MAX][APR_CLIENT_MAX] = {
"apr_audio_svc",
"apr_voice_svc",
},
+ {
+ "",
+ "",
+ },
+ {
+ "apr_apps_sdsp",
+ "apr_apps_sdsp",
+ },
};
struct apr_svc_ch_dev apr_svc_ch[APR_DL_MAX][APR_DEST_MAX][APR_CLIENT_MAX];
@@ -162,7 +170,8 @@ struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest,
if ((clnt >= APR_CLIENT_MAX) || (dest >= APR_DEST_MAX) ||
(dl >= APR_DL_MAX)) {
- pr_err("apr_tal: Invalid params\n");
+ pr_err("apr_tal: Invalid params clnt %d dest %d dl %d\n",
+ clnt, dest, dl);
return NULL;
}
@@ -184,10 +193,12 @@ struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest,
pr_debug("apr_tal:Wakeup done\n");
apr_svc_ch[dl][dest][clnt].dest_state = 0;
}
+
rc = smd_named_open_on_edge(svc_names[dest][clnt], dest,
- &apr_svc_ch[dl][dest][clnt].ch,
- &apr_svc_ch[dl][dest][clnt],
- apr_tal_notify);
+ &apr_svc_ch[dl][dest][clnt].ch,
+ &apr_svc_ch[dl][dest][clnt],
+ apr_tal_notify);
+
if (rc < 0) {
pr_err("apr_tal: smd_open failed %s\n",
svc_names[dest][clnt]);
@@ -256,6 +267,12 @@ static int apr_smd_probe(struct platform_device *pdev)
clnt = APR_CLIENT_AUDIO;
apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1;
wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest);
+ } else if (pdev->id == APR_DEST_DSPS) {
+ pr_info("apr_tal:Sensor DSP Is Up\n");
+ dest = APR_DEST_DSPS;
+ clnt = APR_CLIENT_AUDIO;
+ apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1;
+ wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest);
} else
pr_err("apr_tal:Invalid Dest Id: %d\n", pdev->id);
@@ -278,6 +295,14 @@ static struct platform_driver apr_modem_driver = {
},
};
+static struct platform_driver apr_sdsp_driver = {
+ .probe = apr_smd_probe,
+ .driver = {
+ .name = "apr_apps_sdsp",
+ .owner = THIS_MODULE,
+ },
+};
+
static int __init apr_tal_init(void)
{
int i, j, k;
@@ -293,6 +318,7 @@ static int __init apr_tal_init(void)
}
platform_driver_register(&apr_q6_driver);
platform_driver_register(&apr_modem_driver);
+ platform_driver_register(&apr_sdsp_driver);
return 0;
}
device_initcall(apr_tal_init);
diff --git a/drivers/soc/qcom/qdsp6v2/apr_v2.c b/drivers/soc/qcom/qdsp6v2/apr_v2.c
index 037fb3327ef0..d42f2ff5912e 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_v2.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_v2.c
@@ -37,6 +37,8 @@ uint16_t apr_get_data_src(struct apr_hdr *hdr)
return APR_DEST_MODEM;
else if (hdr->src_domain == APR_DOMAIN_ADSP)
return APR_DEST_QDSP6;
+ else if (hdr->src_domain == APR_DOMAIN_SDSP)
+ return APR_DEST_DSPS;
else {
pr_err("APR: Pkt from wrong source: %d\n", hdr->src_domain);
return APR_DEST_MAX; /*RETURN INVALID VALUE*/
@@ -47,6 +49,8 @@ int apr_get_dest_id(char *dest)
{
if (!strcmp(dest, "ADSP"))
return APR_DEST_QDSP6;
+ else if (!strcmp(dest, "SDSP"))
+ return APR_DEST_DSPS;
else
return APR_DEST_MODEM;
}
diff --git a/drivers/soc/qcom/qdsp6v2/apr_vm.c b/drivers/soc/qcom/qdsp6v2/apr_vm.c
index 56592ac91e1b..bd555b6e6f3b 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_vm.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_vm.c
@@ -529,25 +529,23 @@ static int apr_vm_cb_thread(void *data)
{
uint32_t apr_rx_buf_len;
struct aprv2_vm_ack_rx_pkt_available_t apr_ack;
+ unsigned long delay = jiffies + (HZ / 2);
int status = 0;
int ret = 0;
while (1) {
- apr_rx_buf_len = sizeof(apr_rx_buf);
- ret = habmm_socket_recv(hab_handle_rx,
- (void *)&apr_rx_buf,
- &apr_rx_buf_len,
- 0xFFFFFFFF,
- 0);
+ do {
+ apr_rx_buf_len = sizeof(apr_rx_buf);
+ ret = habmm_socket_recv(hab_handle_rx,
+ (void *)&apr_rx_buf,
+ &apr_rx_buf_len,
+ 0xFFFFFFFF,
+ 0);
+ } while (time_before(jiffies, delay) && (ret == -EAGAIN) &&
+ (apr_rx_buf_len == 0));
if (ret) {
pr_err("%s: habmm_socket_recv failed %d\n",
__func__, ret);
- /*
- * TODO: depends on the HAB error code,
- * may need to implement
- * a retry mechanism.
- * break if recv failed ?
- */
break;
}
diff --git a/drivers/soc/qcom/qdsp6v2/audio-anc-dev-mgr.c b/drivers/soc/qcom/qdsp6v2/audio-anc-dev-mgr.c
new file mode 100644
index 000000000000..75b114e6905c
--- /dev/null
+++ b/drivers/soc/qcom/qdsp6v2/audio-anc-dev-mgr.c
@@ -0,0 +1,1170 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/of_device.h>
+#include <linux/clk/msm-clk.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/q6afe-v2.h>
+#include <sound/msm-dai-q6-v2.h>
+#include <linux/qdsp6v2/audio-anc-dev-mgr.h>
+#include <linux/qdsp6v2/sdsp_anc.h>
+
+#define LPM_START_ADDR (0x9120000 + 60*1024)
+#define LPM_LENGTH (4*1024)
+
+enum {
+ ANC_DEV_PORT_REFS = 0,
+ ANC_DEV_PORT_ANC_SPKR,
+ ANC_DEV_PORT_ANC_MIC,
+ ANC_DEV_PORT_MAX,
+};
+
+struct anc_tdm_port_cfg_info {
+ u16 port_id;
+ struct afe_param_id_tdm_cfg port_cfg;
+};
+
+struct anc_tdm_group_set_info {
+ struct afe_param_id_group_device_tdm_cfg gp_cfg;
+ uint32_t num_tdm_group_ports;
+ struct afe_clk_set tdm_clk_set;
+ uint32_t clk_mode;
+};
+
+struct anc_dev_drv_info {
+ uint32_t state;
+ uint32_t rpm;
+ uint32_t bypass_mode;
+ uint32_t algo_module_id;
+};
+
+struct anc_dev_port_cfg_info {
+ uint32_t port_id;
+ uint32_t sample_rate;
+ uint32_t num_channels;
+ uint32_t bit_width;
+};
+
+static struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info
+ anc_mic_spkr_layout;
+
+static struct anc_dev_port_cfg_info anc_port_cfg[ANC_DEV_PORT_MAX];
+
+static struct anc_tdm_group_set_info anc_dev_tdm_gp_set[IDX_GROUP_TDM_MAX];
+
+static struct anc_tdm_port_cfg_info anc_dev_tdm_port_cfg[IDX_TDM_MAX];
+
+static struct anc_dev_drv_info this_anc_dev_info;
+
+static int anc_dev_get_free_tdm_gp_cfg_idx(void)
+{
+ int idx = -1;
+ int i;
+
+ for (i = 0; i < IDX_GROUP_TDM_MAX; i++) {
+ if (anc_dev_tdm_gp_set[i].gp_cfg.group_id == 0) {
+ idx = i;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+static int anc_dev_get_free_tdm_port_cfg_idx(void)
+{
+ int idx = -1;
+ int i;
+
+ for (i = 0; i < IDX_TDM_MAX; i++) {
+ if (anc_dev_tdm_port_cfg[i].port_id == 0) {
+ idx = i;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+static u16 get_group_id_from_port_id(int32_t port_id)
+{
+ u16 gp_id = AFE_PORT_INVALID;
+
+ switch (port_id) {
+ case AFE_PORT_ID_PRIMARY_TDM_RX:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+ gp_id = AFE_GROUP_DEVICE_ID_PRIMARY_TDM_RX;
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX:
+ case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+ case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+ case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+ case AFE_PORT_ID_SECONDARY_TDM_RX_4:
+ case AFE_PORT_ID_SECONDARY_TDM_RX_5:
+ case AFE_PORT_ID_SECONDARY_TDM_RX_6:
+ case AFE_PORT_ID_SECONDARY_TDM_RX_7:
+ gp_id = AFE_GROUP_DEVICE_ID_SECONDARY_TDM_RX;
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_RX:
+ case AFE_PORT_ID_TERTIARY_TDM_RX_1:
+ case AFE_PORT_ID_TERTIARY_TDM_RX_2:
+ case AFE_PORT_ID_TERTIARY_TDM_RX_3:
+ case AFE_PORT_ID_TERTIARY_TDM_RX_4:
+ case AFE_PORT_ID_TERTIARY_TDM_RX_5:
+ case AFE_PORT_ID_TERTIARY_TDM_RX_6:
+ case AFE_PORT_ID_TERTIARY_TDM_RX_7:
+ gp_id = AFE_GROUP_DEVICE_ID_TERTIARY_TDM_RX;
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_RX:
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_4:
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_5:
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_6:
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
+ gp_id = AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_RX;
+ break;
+ default:
+ break;
+ }
+
+ return gp_id;
+}
+
+static int anc_dev_get_matched_tdm_gp_cfg_idx(u16 gp_id)
+{
+ int idx = -1;
+ int i;
+
+ for (i = 0; i < IDX_GROUP_TDM_MAX; i++) {
+ if (anc_dev_tdm_gp_set[i].gp_cfg.group_id == gp_id) {
+ idx = i;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+static int anc_dev_get_matched_tdm_port_cfg_idx(u16 port_id)
+{
+ int idx = -1;
+ int i;
+
+ for (i = 0; i < IDX_TDM_MAX; i++) {
+ if (anc_dev_tdm_port_cfg[i].port_id == port_id) {
+ idx = i;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+static int anc_dev_tdm_set_clk(
+ struct anc_tdm_group_set_info *gp_set_data,
+ u16 port_id, bool enable)
+{
+ int rc = 0;
+
+ switch (gp_set_data->gp_cfg.group_id) {
+ case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_RX:
+ case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_TX:
+ if (gp_set_data->clk_mode) {
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT;
+ } else
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_PRI_TDM_EBIT;
+ break;
+ case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_RX:
+ case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_TX:
+ if (gp_set_data->clk_mode) {
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_SEC_TDM_IBIT;
+ } else
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_SEC_TDM_EBIT;
+ break;
+ case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_RX:
+ case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_TX:
+ if (gp_set_data->clk_mode) {
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_TER_TDM_IBIT;
+ } else
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_TER_TDM_EBIT;
+ break;
+ case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_RX:
+ case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_TX:
+ if (gp_set_data->clk_mode) {
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_QUAD_TDM_IBIT;
+ } else
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_QUAD_TDM_EBIT;
+ break;
+ default:
+ pr_err("%s: port id 0x%x not supported\n",
+ __func__, port_id);
+ return -EINVAL;
+ }
+ gp_set_data->tdm_clk_set.enable = enable;
+
+ rc = afe_set_lpass_clock_v2(port_id,
+ &gp_set_data->tdm_clk_set);
+
+ if (rc < 0)
+ pr_err("%s: afe lpass clock failed, err:%d\n",
+ __func__, rc);
+
+ return rc;
+}
+
+static int anc_dev_port_start(int32_t which_port)
+{
+ int rc = 0;
+ int pt_idx;
+
+ struct afe_tdm_port_config tdm_cfg;
+
+ pt_idx =
+ anc_dev_get_matched_tdm_port_cfg_idx(anc_port_cfg[which_port].port_id);
+
+ if (pt_idx == -1) {
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ tdm_cfg.tdm = anc_dev_tdm_port_cfg[pt_idx].port_cfg;
+
+ tdm_cfg.tdm.num_channels = anc_port_cfg[which_port].num_channels;
+ tdm_cfg.tdm.sample_rate = anc_port_cfg[which_port].sample_rate;
+ tdm_cfg.tdm.bit_width = anc_port_cfg[which_port].bit_width;
+
+ tdm_cfg.tdm.nslots_per_frame = anc_port_cfg[which_port].num_channels;
+ tdm_cfg.tdm.slot_width = anc_port_cfg[which_port].bit_width;
+ tdm_cfg.tdm.slot_mask =
+ ((1 << anc_port_cfg[which_port].num_channels) - 1);
+
+ pr_debug("%s: port_id %x num_channels %x bit_width %x sample_rate %x nslots_per_frame %x slot_width %x slot_mask %x!\n",
+ __func__,
+ anc_port_cfg[which_port].port_id,
+ tdm_cfg.tdm.num_channels,
+ tdm_cfg.tdm.bit_width,
+ tdm_cfg.tdm.sample_rate,
+ tdm_cfg.tdm.nslots_per_frame,
+ tdm_cfg.tdm.slot_width,
+ tdm_cfg.tdm.slot_mask);
+
+ rc = anc_if_tdm_port_start(anc_port_cfg[which_port].port_id,
+ &tdm_cfg);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to open ANC port from SDSP 0x%x\n",
+ __func__, anc_port_cfg[which_port].port_id);
+ goto rtn;
+ }
+
+rtn:
+ return rc;
+}
+
+static int anc_dev_port_stop(int32_t which_port)
+{
+ int rc = 0;
+
+ rc = anc_if_tdm_port_stop(anc_port_cfg[which_port].port_id);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to stop ANC port from SDSP 0x%x\n",
+ __func__, anc_port_cfg[which_port].port_id);
+ }
+
+ return rc;
+}
+
+int msm_anc_dev_set_info(void *info_p, int32_t anc_cmd)
+{
+ int rc = 0;
+
+ switch (anc_cmd) {
+ case ANC_CMD_RPM: {
+ struct audio_anc_rpm_info *rpm_info_p =
+ (struct audio_anc_rpm_info *)info_p;
+
+ if (this_anc_dev_info.state)
+ rc = anc_if_set_rpm(
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
+ rpm_info_p->rpm);
+ else
+ this_anc_dev_info.rpm = 0;
+ break;
+ }
+ case ANC_CMD_BYPASS_MODE: {
+ struct audio_anc_bypass_mode *bypass_mode_p =
+ (struct audio_anc_bypass_mode *)info_p;
+
+ if (this_anc_dev_info.state)
+ rc = anc_if_set_bypass_mode(
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
+ bypass_mode_p->mode);
+ else
+ this_anc_dev_info.bypass_mode = bypass_mode_p->mode;
+ break;
+ }
+ case ANC_CMD_ALGO_MODULE: {
+ struct audio_anc_algo_module_info *module_info_p =
+ (struct audio_anc_algo_module_info *)info_p;
+
+ if (this_anc_dev_info.state)
+ rc = anc_if_set_algo_module_id(
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
+ module_info_p->module_id);
+ else
+ this_anc_dev_info.algo_module_id =
+ module_info_p->module_id;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+
+int msm_anc_dev_start(void)
+{
+ int rc = 0;
+ u16 group_id;
+ int gp_idx, pt_idx;
+ union afe_port_group_config anc_dev_gp_cfg;
+ struct afe_tdm_port_config tdm_cfg;
+
+ pr_debug("%s: ANC devices start in!\n", __func__);
+
+ memset(&tdm_cfg, 0, sizeof(tdm_cfg));
+
+ /*
+ * Refs port for ADSP
+ * 1. enable clk
+ * 2. group cfg and enable
+ * 3. Refs port cfg and start
+ */
+
+ group_id =
+ get_group_id_from_port_id(anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+
+ gp_idx = anc_dev_get_matched_tdm_gp_cfg_idx(group_id);
+
+ if (gp_idx == -1) {
+ rc = -EINVAL;
+ pr_err("%s: anc_dev_get_matched_tdm_gp_cfg_idx() failed with group_id 0x%x\n",
+ __func__, group_id);
+ goto rtn;
+ } else {
+ rc = anc_dev_tdm_set_clk(&anc_dev_tdm_gp_set[gp_idx],
+ (u16)anc_port_cfg[ANC_DEV_PORT_REFS].port_id, true);
+
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to enable AFE clk 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+ goto rtn;
+ }
+
+ anc_dev_gp_cfg.tdm_cfg = anc_dev_tdm_gp_set[gp_idx].gp_cfg;
+
+ anc_dev_gp_cfg.tdm_cfg.group_device_cfg_minor_version =
+ AFE_API_VERSION_GROUP_DEVICE_TDM_CONFIG;
+ anc_dev_gp_cfg.tdm_cfg.num_channels =
+ anc_port_cfg[ANC_DEV_PORT_REFS].num_channels;
+ anc_dev_gp_cfg.tdm_cfg.bit_width =
+ anc_port_cfg[ANC_DEV_PORT_REFS].bit_width;
+ anc_dev_gp_cfg.tdm_cfg.sample_rate =
+ anc_port_cfg[ANC_DEV_PORT_REFS].sample_rate;
+ anc_dev_gp_cfg.tdm_cfg.nslots_per_frame =
+ anc_port_cfg[ANC_DEV_PORT_REFS].num_channels;
+ anc_dev_gp_cfg.tdm_cfg.slot_width =
+ anc_port_cfg[ANC_DEV_PORT_REFS].bit_width;
+ anc_dev_gp_cfg.tdm_cfg.slot_mask =
+ ((1 << anc_port_cfg[ANC_DEV_PORT_REFS].num_channels) - 1);
+
+ pr_debug("%s: refs_port_id %x\n", __func__,
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+
+ pr_debug("%s: anc_dev_gp_cfg num_channels %x bit_width %x sample_rate %x nslots_per_frame %x slot_width %x slot_mask %x!\n",
+ __func__,
+ anc_dev_gp_cfg.tdm_cfg.num_channels,
+ anc_dev_gp_cfg.tdm_cfg.bit_width,
+ anc_dev_gp_cfg.tdm_cfg.sample_rate,
+ anc_dev_gp_cfg.tdm_cfg.nslots_per_frame,
+ anc_dev_gp_cfg.tdm_cfg.slot_width,
+ anc_dev_gp_cfg.tdm_cfg.slot_mask);
+
+ rc = afe_port_group_enable(group_id,
+ &anc_dev_gp_cfg, true);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to enable AFE group 0x%x\n",
+ __func__, group_id);
+ goto rtn;
+ }
+
+ pt_idx =
+ anc_dev_get_matched_tdm_port_cfg_idx(
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+
+ if (pt_idx == -1) {
+ rc = -EINVAL;
+ pr_err("%s: anc_dev_get_matched_tdm_port_cfg_idx() failed with port_id 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+ goto rtn;
+ }
+
+ tdm_cfg.tdm = anc_dev_tdm_port_cfg[pt_idx].port_cfg;
+
+ tdm_cfg.tdm.num_channels =
+ anc_port_cfg[ANC_DEV_PORT_REFS].num_channels;
+ tdm_cfg.tdm.sample_rate =
+ anc_port_cfg[ANC_DEV_PORT_REFS].sample_rate;
+ tdm_cfg.tdm.bit_width =
+ anc_port_cfg[ANC_DEV_PORT_REFS].bit_width;
+
+ tdm_cfg.tdm.nslots_per_frame =
+ anc_dev_gp_cfg.tdm_cfg.nslots_per_frame;
+ tdm_cfg.tdm.slot_width = anc_dev_gp_cfg.tdm_cfg.slot_width;
+ tdm_cfg.tdm.slot_mask = anc_dev_gp_cfg.tdm_cfg.slot_mask;
+
+ rc = afe_tdm_port_start(anc_port_cfg[ANC_DEV_PORT_REFS].port_id,
+ &tdm_cfg,
+ anc_port_cfg[ANC_DEV_PORT_REFS].sample_rate, 0);
+ if (IS_ERR_VALUE(rc)) {
+ afe_port_group_enable(group_id,
+ &anc_dev_gp_cfg, false);
+
+ anc_dev_tdm_set_clk(&anc_dev_tdm_gp_set[gp_idx],
+ (u16)anc_port_cfg[ANC_DEV_PORT_REFS].port_id, false);
+
+ pr_err("%s: fail to open AFE port 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+ goto rtn;
+ }
+
+ }
+
+ rc = anc_if_set_anc_mic_spkr_layout(
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id,
+ &anc_mic_spkr_layout);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to pass ANC MIC and SPKR layout info to SDSP 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+ goto rtn;
+ }
+
+ rc = anc_if_share_resource(
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id, 4, 3,
+ LPM_START_ADDR, LPM_LENGTH);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to assign lpass resource to SDSP 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+ goto rtn;
+ }
+
+ rc = anc_if_config_ref(anc_port_cfg[ANC_DEV_PORT_REFS].port_id,
+ anc_port_cfg[ANC_DEV_PORT_REFS].sample_rate,
+ anc_port_cfg[ANC_DEV_PORT_REFS].bit_width,
+ anc_port_cfg[ANC_DEV_PORT_REFS].num_channels);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to refs port cfg in SDSP 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+ goto rtn;
+ }
+
+ if (this_anc_dev_info.algo_module_id != 0)
+ rc = anc_if_set_algo_module_id(
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
+ this_anc_dev_info.algo_module_id);
+
+ if (this_anc_dev_info.bypass_mode != 0)
+ rc = anc_if_set_bypass_mode(
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
+ this_anc_dev_info.bypass_mode);
+
+ group_id = get_group_id_from_port_id(
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id);
+
+ gp_idx = anc_dev_get_matched_tdm_gp_cfg_idx(group_id);
+
+ if (gp_idx == -1) {
+ rc = -EINVAL;
+ pr_err("%s: anc_dev_get_matched_tdm_gp_cfg_idx() failed with group_id 0x%x\n",
+ __func__, group_id);
+ goto rtn;
+ } else {
+ rc = anc_dev_tdm_set_clk(&anc_dev_tdm_gp_set[gp_idx],
+ (u16)anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id, true);
+
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to enable AFE clk 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id);
+ goto rtn;
+ }
+ }
+
+ rc = anc_dev_port_start(ANC_DEV_PORT_ANC_MIC);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to enable ANC MIC Port 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_ANC_MIC].port_id);
+ goto rtn;
+ }
+
+ rc = anc_dev_port_start(ANC_DEV_PORT_ANC_SPKR);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to enable ANC SPKR Port 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id);
+ goto rtn;
+ }
+
+ this_anc_dev_info.state = 1;
+
+ pr_debug("%s: ANC devices start successfully!\n", __func__);
+
+rtn:
+ return rc;
+}
+
+int msm_anc_dev_stop(void)
+{
+ int rc = 0;
+ u16 group_id;
+ int gp_idx;
+
+ anc_dev_port_stop(ANC_DEV_PORT_ANC_SPKR);
+ anc_dev_port_stop(ANC_DEV_PORT_ANC_MIC);
+
+ group_id = get_group_id_from_port_id(
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id);
+
+ gp_idx = anc_dev_get_matched_tdm_gp_cfg_idx(group_id);
+
+ if (gp_idx == -1) {
+ rc = -EINVAL;
+ goto rtn;
+ } else {
+ rc = anc_dev_tdm_set_clk(&anc_dev_tdm_gp_set[gp_idx],
+ (u16)anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id, false);
+
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to disable AFE clk 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id);
+ }
+ }
+
+ group_id =
+ get_group_id_from_port_id(anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+
+ gp_idx = anc_dev_get_matched_tdm_gp_cfg_idx(group_id);
+
+ if (gp_idx == -1) {
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ afe_close(anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+
+ afe_port_group_enable(group_id, NULL, false);
+
+ anc_dev_tdm_set_clk(&anc_dev_tdm_gp_set[gp_idx],
+ (u16)anc_port_cfg[ANC_DEV_PORT_REFS].port_id, false);
+
+ this_anc_dev_info.state = 0;
+ this_anc_dev_info.algo_module_id = 0;
+ this_anc_dev_info.rpm = 0;
+ this_anc_dev_info.bypass_mode = 0;
+
+ pr_debug("%s: ANC devices stop successfully!\n", __func__);
+
+rtn:
+ return rc;
+}
+
+
+static int msm_anc_tdm_dev_group_cfg_info(
+ struct platform_device *pdev,
+ struct device_node *ctx_node)
+{
+ int rc = 0;
+ const uint32_t *port_id_array = NULL;
+ uint32_t num_tdm_group_ports = 0;
+ uint32_t array_length = 0;
+ int i = 0;
+ int gp_idx = anc_dev_get_free_tdm_gp_cfg_idx();
+
+ if ((gp_idx < 0) || (gp_idx > IDX_GROUP_TDM_MAX)) {
+ dev_err(&pdev->dev, "%s: could not get abaiable tdm group cfg slot\n",
+ __func__);
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ /* extract tdm group info into static */
+ rc = of_property_read_u32(ctx_node,
+ "qcom,msm-cpudai-tdm-group-id",
+ (u32 *)&anc_dev_tdm_gp_set[gp_idx].gp_cfg.group_id);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Group ID from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-group-id");
+ goto rtn;
+ }
+
+ dev_dbg(&pdev->dev, "%s: dev_name: %s group_id: 0x%x\n",
+ __func__, dev_name(&pdev->dev),
+ anc_dev_tdm_gp_set[gp_idx].gp_cfg.group_id);
+
+ rc = of_property_read_u32(ctx_node,
+ "qcom,msm-cpudai-tdm-group-num-ports",
+ &num_tdm_group_ports);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Group Num Ports from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-group-num-ports");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Group Num Ports from DT file 0x%x\n",
+ __func__, num_tdm_group_ports);
+
+ if (num_tdm_group_ports > AFE_GROUP_DEVICE_NUM_PORTS) {
+ dev_err(&pdev->dev, "%s Group Num Ports %d greater than Max %d\n",
+ __func__, num_tdm_group_ports,
+ AFE_GROUP_DEVICE_NUM_PORTS);
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ port_id_array = of_get_property(ctx_node,
+ "qcom,msm-cpudai-tdm-group-port-id",
+ &array_length);
+ if (port_id_array == NULL) {
+ dev_err(&pdev->dev, "%s port_id_array is not valid\n",
+ __func__);
+ rc = -EINVAL;
+ goto rtn;
+ }
+ if (array_length != sizeof(uint32_t) * num_tdm_group_ports) {
+ dev_err(&pdev->dev, "%s array_length is %d, expected is %zd\n",
+ __func__, array_length,
+ sizeof(uint32_t) * num_tdm_group_ports);
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ for (i = 0; i < num_tdm_group_ports; i++)
+ anc_dev_tdm_gp_set[gp_idx].gp_cfg.port_id[i] =
+ (u16)be32_to_cpu(port_id_array[i]);
+ /* Unused index should be filled with 0 or AFE_PORT_INVALID */
+ for (i = num_tdm_group_ports;
+ i < AFE_GROUP_DEVICE_NUM_PORTS; i++)
+ anc_dev_tdm_gp_set[gp_idx].gp_cfg.port_id[i] = AFE_PORT_INVALID;
+
+ anc_dev_tdm_gp_set[gp_idx].num_tdm_group_ports = num_tdm_group_ports;
+
+ /* extract tdm clk info into static */
+ rc = of_property_read_u32(ctx_node,
+ "qcom,msm-cpudai-tdm-clk-rate",
+ &anc_dev_tdm_gp_set[gp_idx].tdm_clk_set.clk_freq_in_hz);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Clk Rate from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-clk-rate");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Clk Rate from DT file %d\n",
+ __func__,
+ anc_dev_tdm_gp_set[gp_idx].tdm_clk_set.clk_freq_in_hz);
+
+ anc_dev_tdm_gp_set[gp_idx].tdm_clk_set.clk_set_minor_version =
+ Q6AFE_LPASS_CLK_CONFIG_API_VERSION;
+ anc_dev_tdm_gp_set[gp_idx].tdm_clk_set.clk_attri =
+ Q6AFE_LPASS_CLK_ATTRIBUTE_INVERT_COUPLE_NO;
+ anc_dev_tdm_gp_set[gp_idx].tdm_clk_set.clk_root =
+ Q6AFE_LPASS_CLK_ROOT_DEFAULT;
+
+
+ /* extract tdm clk attribute into static */
+ if (of_find_property(ctx_node,
+ "qcom,msm-cpudai-tdm-clk-attribute", NULL)) {
+ rc = of_property_read_u16(ctx_node,
+ "qcom,msm-cpudai-tdm-clk-attribute",
+ &anc_dev_tdm_gp_set[gp_idx].tdm_clk_set.clk_attri);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: No Clk attribute in DT file %s\n",
+ __func__,
+ "qcom,msm-cpudai-tdm-clk-attribute");
+ goto rtn;
+ }
+ } else {
+ dev_dbg(&pdev->dev, "%s: Clk Attribute from DT file %d\n",
+ __func__,
+ anc_dev_tdm_gp_set[gp_idx].tdm_clk_set.clk_attri);
+ }
+
+ /* extract tdm clk src master/slave info into static */
+ rc = of_property_read_u32(ctx_node,
+ "qcom,msm-cpudai-tdm-clk-internal",
+ &anc_dev_tdm_gp_set[gp_idx].clk_mode);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Clk id from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-clk-internal");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Clk id from DT file %d\n",
+ __func__, anc_dev_tdm_gp_set[gp_idx].clk_mode);
+
+rtn:
+ return rc;
+}
+
+
+static int msm_anc_tdm_dev_port_cfg_info(
+ struct platform_device *pdev,
+ struct device_node *ctx_node)
+{
+ int rc = 0;
+ u32 tdm_dev_id = 0;
+ int pt_idx = anc_dev_get_free_tdm_port_cfg_idx();
+ struct device_node *tdm_parent_node = NULL;
+
+ if ((pt_idx < 0) || (pt_idx > IDX_TDM_MAX)) {
+ dev_err(&pdev->dev, "%s: could not get abaiable tdm port cfg slot\n",
+ __func__);
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ /* retrieve device/afe id */
+ rc = of_property_read_u32(ctx_node,
+ "qcom,msm-cpudai-tdm-dev-id",
+ &tdm_dev_id);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Device ID missing in DT file\n",
+ __func__);
+ goto rtn;
+ }
+ if ((tdm_dev_id < AFE_PORT_ID_TDM_PORT_RANGE_START) ||
+ (tdm_dev_id > AFE_PORT_ID_TDM_PORT_RANGE_END)) {
+ dev_err(&pdev->dev, "%s: Invalid TDM Device ID 0x%x in DT file\n",
+ __func__, tdm_dev_id);
+ rc = -ENXIO;
+ goto rtn;
+ }
+ anc_dev_tdm_port_cfg[pt_idx].port_id = tdm_dev_id;
+
+ dev_dbg(&pdev->dev, "%s: dev_name: %s dev_id: 0x%x\n",
+ __func__, dev_name(&pdev->dev), tdm_dev_id);
+
+ /* TDM CFG */
+ tdm_parent_node = of_get_parent(ctx_node);
+ rc = of_property_read_u32(tdm_parent_node,
+ "qcom,msm-cpudai-tdm-sync-mode",
+ (u32 *)&anc_dev_tdm_port_cfg[pt_idx].port_cfg.sync_mode);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Sync Mode from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-sync-mode");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Sync Mode from DT file 0x%x\n",
+ __func__, anc_dev_tdm_port_cfg[pt_idx].port_cfg.sync_mode);
+
+ rc = of_property_read_u32(tdm_parent_node,
+ "qcom,msm-cpudai-tdm-sync-src",
+ (u32 *)&anc_dev_tdm_port_cfg[pt_idx].port_cfg.sync_src);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Sync Src from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-sync-src");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Sync Src from DT file 0x%x\n",
+ __func__, anc_dev_tdm_port_cfg[pt_idx].port_cfg.sync_src);
+
+ rc = of_property_read_u32(tdm_parent_node,
+ "qcom,msm-cpudai-tdm-data-out",
+ (u32 *)&anc_dev_tdm_port_cfg[pt_idx].port_cfg.ctrl_data_out_enable);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Data Out from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-data-out");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Data Out from DT file 0x%x\n",
+ __func__,
+ anc_dev_tdm_port_cfg[pt_idx].port_cfg.ctrl_data_out_enable);
+
+ rc = of_property_read_u32(tdm_parent_node,
+ "qcom,msm-cpudai-tdm-invert-sync",
+ (u32 *)&anc_dev_tdm_port_cfg[pt_idx].port_cfg.ctrl_invert_sync_pulse);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Invert Sync from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-invert-sync");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Invert Sync from DT file 0x%x\n",
+ __func__,
+ anc_dev_tdm_port_cfg[pt_idx].port_cfg.ctrl_invert_sync_pulse);
+
+ rc = of_property_read_u32(tdm_parent_node,
+ "qcom,msm-cpudai-tdm-data-delay",
+ (u32 *)&anc_dev_tdm_port_cfg[pt_idx].port_cfg.ctrl_sync_data_delay);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Data Delay from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-data-delay");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Data Delay from DT file 0x%x\n",
+ __func__,
+ anc_dev_tdm_port_cfg[pt_idx].port_cfg.ctrl_sync_data_delay);
+
+ /* TDM CFG -- set default */
+ anc_dev_tdm_port_cfg[pt_idx].port_cfg.data_format = AFE_LINEAR_PCM_DATA;
+ anc_dev_tdm_port_cfg[pt_idx].port_cfg.tdm_cfg_minor_version =
+ AFE_API_VERSION_TDM_CONFIG;
+
+ msm_anc_tdm_dev_group_cfg_info(pdev, tdm_parent_node);
+
+ return 0;
+
+rtn:
+ return rc;
+}
+
+static int msm_anc_dev_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+
+ u32 port_id = 0;
+ const uint32_t *layout_array = NULL;
+ uint32_t num_anc_io = 0;
+ uint32_t array_length = 0;
+ int i = 0;
+ uint32_t sample_rate = 0;
+ uint32_t num_channels = 0;
+ uint32_t bit_width = 0;
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,refs-port-id",
+ (u32 *)&port_id);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC refs-port-id DT file %s\n",
+ __func__, "qcom,refs-port-id");
+ goto rtn;
+ }
+
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id = port_id;
+
+ dev_dbg(&pdev->dev, "%s: refs-port-id 0x%x\n",
+ __func__, port_id);
+
+ port_id = 0;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,spkr-port-id",
+ (u32 *)&port_id);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC spkr-port-id DT file %s\n",
+ __func__, "qcom,spkr-port-id");
+ goto rtn;
+ }
+
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id = port_id;
+
+ dev_dbg(&pdev->dev, "%s: spkr-port-id 0x%x\n",
+ __func__, port_id);
+
+ port_id = 0;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mic-port-id",
+ (u32 *)&port_id);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC mic-port-id DT file %s\n",
+ __func__, "qcom,mic-port-id");
+ goto rtn;
+ }
+
+ anc_port_cfg[ANC_DEV_PORT_ANC_MIC].port_id = port_id;
+
+ dev_dbg(&pdev->dev, "%s: mic-port-id 0x%x\n",
+ __func__, port_id);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,sample-rate",
+ (u32 *)&sample_rate);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC sample rate DT file %s\n",
+ __func__, "qcom,sample-rate");
+ goto rtn;
+ }
+
+ dev_dbg(&pdev->dev, "%s: ANC sample rate 0x%x\n",
+ __func__, sample_rate);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,num-channels",
+ (u32 *)&num_channels);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC num channels DT file %s\n",
+ __func__, "qcom,num-channels");
+ goto rtn;
+ }
+
+ dev_dbg(&pdev->dev, "%s: ANC num channel 0x%x\n",
+ __func__, num_channels);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,bit-width",
+ (u32 *)&bit_width);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC bit width DT file %s\n",
+ __func__, "qcom,bit-width");
+ goto rtn;
+ }
+
+ dev_dbg(&pdev->dev, "%s: ANC bit width 0x%x\n",
+ __func__, bit_width);
+
+ for (i = 0; i < ANC_DEV_PORT_MAX; i++) {
+ anc_port_cfg[i].sample_rate = sample_rate;
+ anc_port_cfg[i].num_channels = num_channels;
+ anc_port_cfg[i].bit_width = bit_width;
+ }
+
+ memset(&anc_mic_spkr_layout, 0, sizeof(anc_mic_spkr_layout));
+
+ anc_mic_spkr_layout.minor_version = 1;
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,num-anc-mic",
+ (u32 *)&num_anc_io);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC num_anc_mic DT file %s\n",
+ __func__, "qcom,num-anc-mic");
+ goto rtn;
+ }
+
+ layout_array = of_get_property(pdev->dev.of_node,
+ "qcom,anc-mic-array",
+ &array_length);
+ if (layout_array == NULL) {
+ dev_err(&pdev->dev, "%s layout_array is not valid\n",
+ __func__);
+ rc = -EINVAL;
+ goto rtn;
+ }
+ if (array_length != sizeof(uint32_t) * num_anc_io) {
+ dev_err(&pdev->dev, "%s array_length is %d, expected is %zd\n",
+ __func__, array_length,
+ sizeof(uint32_t) * num_anc_io);
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ anc_mic_spkr_layout.num_anc_mic = num_anc_io;
+
+ for (i = 0; i < num_anc_io; i++)
+ anc_mic_spkr_layout.mic_layout_array[i] =
+ (u16)be32_to_cpu(layout_array[i]);
+
+ num_anc_io = 0;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,num-anc-spkr",
+ (u32 *)&num_anc_io);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC num_anc_mic DT file %s\n",
+ __func__, "qcom,num-anc-spkr");
+ goto rtn;
+ }
+
+ layout_array = of_get_property(pdev->dev.of_node,
+ "qcom,anc-spkr-array",
+ &array_length);
+ if (layout_array == NULL) {
+ dev_err(&pdev->dev, "%s layout_array is not valid\n",
+ __func__);
+ rc = -EINVAL;
+ goto rtn;
+ }
+ if (array_length != sizeof(uint32_t) * num_anc_io) {
+ dev_err(&pdev->dev, "%s array_length is %d, expected is %zd\n",
+ __func__, array_length,
+ sizeof(uint32_t) * num_anc_io);
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ anc_mic_spkr_layout.num_anc_spkr = num_anc_io;
+
+ for (i = 0; i < num_anc_io; i++)
+ anc_mic_spkr_layout.spkr_layout_array[i] =
+ (u16)be32_to_cpu(layout_array[i]);
+
+ dev_dbg(&pdev->dev, "%s: num_anc_mic 0x%x\n",
+ __func__, anc_mic_spkr_layout.num_anc_mic);
+
+ dev_dbg(&pdev->dev, "%s: num_anc_spkr 0x%x\n",
+ __func__, anc_mic_spkr_layout.num_anc_spkr);
+
+ num_anc_io = 0;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,num-add-mic-signal",
+ (u32 *)&num_anc_io);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC num_add_mic_signal DT file %s\n",
+ __func__, "qcom,num-add-mic-signal");
+ goto rtn;
+ }
+
+ anc_mic_spkr_layout.num_add_mic_signal = num_anc_io;
+
+ num_anc_io = 0;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,num-add-spkr-signal",
+ (u32 *)&num_anc_io);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC num_add_spkr_signal DT file %s\n",
+ __func__, "qcom,num-add-spkr-signal");
+ goto rtn;
+ }
+
+ anc_mic_spkr_layout.num_add_spkr_signal = num_anc_io;
+
+ dev_dbg(&pdev->dev, "%s: num_add_mic_signal 0x%x\n",
+ __func__, anc_mic_spkr_layout.num_add_mic_signal);
+
+ dev_dbg(&pdev->dev, "%s: num_add_spkr_signal 0x%x\n",
+ __func__, anc_mic_spkr_layout.num_add_spkr_signal);
+
+ /* TDM group CFG and TDM port CFG */
+ {
+ struct device_node *ctx_node = NULL;
+
+ ctx_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,refs-tdm-rx", 0);
+ if (!ctx_node) {
+ pr_err("%s Could not find refs-tdm-rx info\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rc = msm_anc_tdm_dev_port_cfg_info(pdev, ctx_node);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to probe TDM group info\n",
+ __func__);
+ }
+
+ ctx_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,spkr-tdm-rx", 0);
+ if (!ctx_node) {
+ pr_err("%s Could not find spkr-tdm-rx info\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rc = msm_anc_tdm_dev_port_cfg_info(pdev, ctx_node);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to probe TDM group info\n",
+ __func__);
+ }
+
+ ctx_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,mic-tdm-tx", 0);
+ if (!ctx_node) {
+ pr_err("%s Could not find mic-tdm-tx info\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rc = msm_anc_tdm_dev_port_cfg_info(pdev, ctx_node);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to probe TDM group info\n",
+ __func__);
+ }
+ }
+
+ rc = msm_anc_dev_create(pdev);
+
+rtn:
+ return rc;
+}
+
+static int msm_anc_dev_remove(struct platform_device *pdev)
+{
+ return msm_anc_dev_destroy(pdev);
+}
+
+static const struct of_device_id msm_anc_dev_dt_match[] = {
+ { .compatible = "qcom,msm-ext-anc", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_anc_dev_dt_match);
+
+static struct platform_driver msm_anc_dev = {
+ .probe = msm_anc_dev_probe,
+ .remove = msm_anc_dev_remove,
+ .driver = {
+ .name = "msm-ext-anc",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_anc_dev_dt_match,
+ },
+};
+
+int msm_anc_dev_init(void)
+{
+ int rc = 0;
+
+ memset(&anc_dev_tdm_gp_set, 0, sizeof(anc_dev_tdm_gp_set));
+ memset(&anc_dev_tdm_port_cfg, 0, sizeof(anc_dev_tdm_port_cfg));
+ memset(&anc_port_cfg, 0, sizeof(anc_port_cfg));
+ memset(&this_anc_dev_info, 0, sizeof(this_anc_dev_info));
+
+ rc = platform_driver_register(&msm_anc_dev);
+ if (rc)
+ pr_err("%s: fail to register msm ANC device driver\n",
+ __func__);
+
+ return rc;
+}
+
+int msm_anc_dev_deinit(void)
+{
+ platform_driver_unregister(&msm_anc_dev);
+ return 0;
+}
+
diff --git a/drivers/soc/qcom/qdsp6v2/audio_anc.c b/drivers/soc/qcom/qdsp6v2/audio_anc.c
new file mode 100644
index 000000000000..e0abd2b58027
--- /dev/null
+++ b/drivers/soc/qcom/qdsp6v2/audio_anc.c
@@ -0,0 +1,350 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+
+#include <linux/qdsp6v2/audio-anc-dev-mgr.h>
+
+#define DEVICE_NAME "msm_audio_anc"
+
+struct audio_anc_info {
+ struct cdev myc;
+ struct class *anc_class;
+};
+
+static int major;
+
+static struct audio_anc_info audio_anc;
+
+static size_t get_user_anc_cmd_size(int32_t anc_cmd)
+{
+ size_t size = 0;
+
+ switch (anc_cmd) {
+ case ANC_CMD_START:
+ case ANC_CMD_STOP:
+ size = 0;
+ break;
+ case ANC_CMD_RPM:
+ size = sizeof(struct audio_anc_rpm_info);
+ break;
+ case ANC_CMD_BYPASS_MODE:
+ size = sizeof(struct audio_anc_bypass_mode);
+ break;
+ case ANC_CMD_ALGO_MODULE:
+ size = sizeof(struct audio_anc_algo_module_info);
+ break;
+ default:
+ pr_err("%s:Invalid anc cmd %d!",
+ __func__, anc_cmd);
+ }
+ return size;
+}
+
+static int call_set_anc(int32_t anc_cmd,
+ size_t anc_cmd_size, void *data)
+{
+ int ret = 0;
+
+ pr_err("%s EXT_ANC anc_cmd %x\n", __func__, anc_cmd);
+
+ switch (anc_cmd) {
+ case ANC_CMD_START:
+ ret = msm_anc_dev_start();
+ break;
+ case ANC_CMD_STOP:
+ ret = msm_anc_dev_stop();
+ break;
+ case ANC_CMD_RPM:
+ case ANC_CMD_BYPASS_MODE:
+ case ANC_CMD_ALGO_MODULE:
+ ret = msm_anc_dev_set_info(data, anc_cmd);
+ break;
+ default:
+ break;
+ }
+
+ pr_err("%s EXT_ANC ret %x\n", __func__, ret);
+
+ return ret;
+}
+
+static int call_get_anc(int32_t anc_cmd,
+ size_t anc_cmd_size, void *data)
+{
+ int ret = 0;
+
+ switch (anc_cmd) {
+ case ANC_CMD_RPM:
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int audio_anc_open(struct inode *inode, struct file *f)
+{
+ int ret = 0;
+
+ pr_debug("%s\n", __func__);
+ return ret;
+}
+
+static int audio_anc_close(struct inode *inode, struct file *f)
+{
+ int ret = 0;
+
+ pr_debug("%s\n", __func__);
+ return ret;
+}
+
+static long audio_anc_shared_ioctl(struct file *file, unsigned int cmd,
+ void __user *arg)
+{
+ int ret = 0;
+ int32_t size;
+ struct audio_anc_packet *data = NULL;
+
+ pr_err("%s EXT_ANC cmd %x\n", __func__, cmd);
+
+ switch (cmd) {
+ case AUDIO_ANC_SET_PARAM:
+ case AUDIO_ANC_GET_PARAM:
+ break;
+ default:
+ pr_err("%s: ioctl not found!\n", __func__);
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (copy_from_user(&size, (void *)arg, sizeof(size))) {
+ pr_err("%s: Could not copy size value from user\n", __func__);
+ ret = -EFAULT;
+ goto done;
+ } else if (size < sizeof(struct audio_anc_packet)) {
+ pr_err("%s: Invalid size sent to driver: %d, min size is %zd\n",
+ __func__, size, sizeof(struct audio_anc_packet));
+ ret = -EINVAL;
+ goto done;
+ }
+
+ data = kmalloc(size, GFP_KERNEL);
+ if (data == NULL) {
+ ret = -ENOMEM;
+ pr_err("%s: Could not allocate memory of size %d for ioctl\n",
+ __func__, size);
+ goto done;
+ } else if (copy_from_user(data, (void *)arg, size)) {
+ pr_err("%s: Could not copy data from user\n",
+ __func__);
+ ret = -EFAULT;
+ goto done;
+ } else if ((data->hdr.anc_cmd < 0) ||
+ (data->hdr.anc_cmd >= ANC_CMD_MAX)) {
+ pr_err("%s: anc_cmd %d is Invalid!\n",
+ __func__, data->hdr.anc_cmd);
+ ret = -EINVAL;
+ goto done;
+ } else if ((data->hdr.anc_cmd_size <
+ get_user_anc_cmd_size(data->hdr.anc_cmd)) ||
+ (data->hdr.anc_cmd_size >
+ sizeof(union audio_anc_data))) {
+ pr_err("%s: anc_cmd size %d is Invalid! Min is %zd Max is %zd!\n",
+ __func__, data->hdr.anc_cmd_size,
+ get_user_anc_cmd_size(data->hdr.anc_cmd),
+ sizeof(union audio_anc_data));
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (cmd) {
+ case AUDIO_ANC_SET_PARAM:
+ ret = call_set_anc(data->hdr.anc_cmd,
+ data->hdr.anc_cmd_size, &data->anc_data);
+ break;
+ case AUDIO_ANC_GET_PARAM:
+ ret = call_get_anc(data->hdr.anc_cmd,
+ data->hdr.anc_cmd_size, &data->anc_data);
+ break;
+ }
+
+ if (cmd == AUDIO_ANC_GET_PARAM) {
+ if (data->hdr.anc_cmd_size == 0)
+ goto done;
+ if (data == NULL)
+ goto done;
+ if ((sizeof(data->hdr) + data->hdr.anc_cmd_size) > size) {
+ pr_err("%s: header size %zd plus ype size %d larger than data buffer size %d\n",
+ __func__, sizeof(data->hdr),
+ data->hdr.anc_cmd_size, size);
+ ret = -EFAULT;
+ goto done;
+ } else if (copy_to_user((void *)arg, data,
+ sizeof(data->hdr) + data->hdr.anc_cmd_size)) {
+ pr_err("%s: Could not copy cal type to user\n",
+ __func__);
+ ret = -EFAULT;
+ goto done;
+ }
+ }
+
+done:
+ kfree(data);
+
+ pr_err("%s EXT_ANC ret %x\n", __func__, ret);
+
+ return ret;
+}
+
+static long audio_anc_ioctl(struct file *f,
+ unsigned int cmd, unsigned long arg)
+{
+ return audio_anc_shared_ioctl(f, cmd, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+
+#define AUDIO_ANC_SET_PARAM32 _IOWR(ANC_IOCTL_MAGIC, \
+ 300, compat_uptr_t)
+#define AUDIO_ANC_GET_PARAM32 _IOWR(ANC_IOCTL_MAGIC, \
+ 301, compat_uptr_t)
+
+static long audio_anc_compat_ioctl(struct file *f,
+ unsigned int cmd, unsigned long arg)
+{
+ unsigned int cmd64;
+ int ret = 0;
+
+ switch (cmd) {
+ case AUDIO_ANC_SET_PARAM32:
+ cmd64 = AUDIO_ANC_SET_PARAM;
+ break;
+ case AUDIO_ANC_GET_PARAM32:
+ cmd64 = AUDIO_ANC_GET_PARAM;
+ break;
+ default:
+ pr_err("%s: ioctl not found!\n", __func__);
+ ret = -EFAULT;
+ goto done;
+ }
+
+ ret = audio_anc_shared_ioctl(f, cmd64, compat_ptr(arg));
+done:
+ return ret;
+}
+#endif
+
+static const struct file_operations audio_anc_fops = {
+ .owner = THIS_MODULE,
+ .open = audio_anc_open,
+ .release = audio_anc_close,
+ .unlocked_ioctl = audio_anc_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = audio_anc_compat_ioctl,
+#endif
+};
+
+int msm_anc_dev_create(struct platform_device *pdev)
+{
+ int result = 0;
+ dev_t dev = MKDEV(major, 0);
+ struct device *device_handle;
+
+ pr_debug("%s\n", __func__);
+
+ if (major) {
+ result = register_chrdev_region(dev, 1, DEVICE_NAME);
+ } else {
+ result = alloc_chrdev_region(&dev, 0, 1, DEVICE_NAME);
+ major = MAJOR(dev);
+ }
+
+ if (result < 0) {
+ pr_err("%s: Registering msm_audio_anc device failed\n",
+ __func__);
+ goto done;
+ }
+
+ audio_anc.anc_class = class_create(THIS_MODULE, "msm_audio_anc");
+ if (IS_ERR(audio_anc.anc_class)) {
+ result = PTR_ERR(audio_anc.anc_class);
+ pr_err("%s: Error creating anc class: %d\n",
+ __func__, result);
+ goto unregister_chrdev_region;
+ }
+
+ cdev_init(&audio_anc.myc, &audio_anc_fops);
+ result = cdev_add(&audio_anc.myc, dev, 1);
+
+ if (result < 0) {
+ pr_err("%s: Registering file operations failed\n",
+ __func__);
+ goto class_destroy;
+ }
+
+ device_handle = device_create(audio_anc.anc_class,
+ NULL, audio_anc.myc.dev, NULL, "msm_audio_anc");
+ if (IS_ERR(device_handle)) {
+ result = PTR_ERR(device_handle);
+ pr_err("%s: device_create failed: %d\n", __func__, result);
+ goto class_destroy;
+ }
+
+ pr_debug("exit %s\n", __func__);
+ return 0;
+
+class_destroy:
+ class_destroy(audio_anc.anc_class);
+unregister_chrdev_region:
+ unregister_chrdev_region(MKDEV(major, 0), 1);
+done:
+ pr_err("exit %s\n", __func__);
+ return result;
+}
+
+int msm_anc_dev_destroy(struct platform_device *pdev)
+{
+ device_destroy(audio_anc.anc_class, audio_anc.myc.dev);
+ cdev_del(&audio_anc.myc);
+ class_destroy(audio_anc.anc_class);
+ unregister_chrdev_region(MKDEV(major, 0), 1);
+
+ return 0;
+}
+
+static int __init audio_anc_init(void)
+{
+ return msm_anc_dev_init();
+}
+
+static void __exit audio_anc_exit(void)
+{
+ msm_anc_dev_deinit();
+}
+
+module_init(audio_anc_init);
+module_exit(audio_anc_exit);
+
+MODULE_DESCRIPTION("SoC QDSP6v2 Audio ANC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c
index 7ef16ad5575b..15c3e7e42c6d 100644
--- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c
+++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c
@@ -83,6 +83,7 @@ static int msm_audio_ion_smmu_map(struct ion_client *client,
struct msm_audio_smmu_vm_map_cmd_rsp cmd_rsp;
struct msm_audio_smmu_map_data *map_data = NULL;
struct msm_audio_smmu_vm_map_cmd smmu_map_cmd;
+ unsigned long delay = jiffies + (HZ / 2);
rc = ion_handle_get_size(client, handle, len);
if (rc) {
@@ -122,12 +123,15 @@ static int msm_audio_ion_smmu_map(struct ion_client *client,
goto err;
}
- cmd_rsp_size = sizeof(cmd_rsp);
- rc = habmm_socket_recv(msm_audio_ion_hab_handle,
- (void *)&cmd_rsp,
- &cmd_rsp_size,
- 0xFFFFFFFF,
- 0);
+ do {
+ cmd_rsp_size = sizeof(cmd_rsp);
+ rc = habmm_socket_recv(msm_audio_ion_hab_handle,
+ (void *)&cmd_rsp,
+ &cmd_rsp_size,
+ 0xFFFFFFFF,
+ 0);
+ } while (time_before(jiffies, delay) && (rc == -EAGAIN) &&
+ (cmd_rsp_size == 0));
if (rc) {
pr_err("%s: habmm_socket_recv failed %d\n",
__func__, rc);
@@ -181,6 +185,7 @@ static int msm_audio_ion_smmu_unmap(struct ion_client *client,
struct msm_audio_smmu_vm_unmap_cmd_rsp cmd_rsp;
struct msm_audio_smmu_map_data *map_data, *next;
struct msm_audio_smmu_vm_unmap_cmd smmu_unmap_cmd;
+ unsigned long delay = jiffies + (HZ / 2);
/*
* Though list_for_each_entry_safe is delete safe, lock
@@ -205,12 +210,15 @@ static int msm_audio_ion_smmu_unmap(struct ion_client *client,
goto err;
}
- cmd_rsp_size = sizeof(cmd_rsp);
- rc = habmm_socket_recv(msm_audio_ion_hab_handle,
- (void *)&cmd_rsp,
- &cmd_rsp_size,
- 0xFFFFFFFF,
- 0);
+ do {
+ cmd_rsp_size = sizeof(cmd_rsp);
+ rc = habmm_socket_recv(msm_audio_ion_hab_handle,
+ (void *)&cmd_rsp,
+ &cmd_rsp_size,
+ 0xFFFFFFFF,
+ 0);
+ } while (time_before(jiffies, delay) &&
+ (rc == -EAGAIN) && (cmd_rsp_size == 0));
if (rc) {
pr_err("%s: habmm_socket_recv failed %d\n",
__func__, rc);
diff --git a/drivers/soc/qcom/qdsp6v2/sdsp-anc.c b/drivers/soc/qcom/qdsp6v2/sdsp-anc.c
new file mode 100644
index 000000000000..9294485f7ff2
--- /dev/null
+++ b/drivers/soc/qcom/qdsp6v2/sdsp-anc.c
@@ -0,0 +1,801 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/wakelock.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/msm_audio_ion.h>
+#include <linux/delay.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/q6afe-v2.h>
+#include <sound/q6audio-v2.h>
+#include <sound/audio_cal_utils.h>
+#include <sound/adsp_err.h>
+#include <linux/qdsp6v2/apr_tal.h>
+
+#include <linux/qdsp6v2/sdsp_anc.h>
+
+#define TIMEOUT_MS 1000
+
+struct anc_if_ctl {
+ void *apr;
+ atomic_t state;
+ atomic_t status;
+ wait_queue_head_t wait[AFE_MAX_PORTS];
+ struct task_struct *task;
+ struct anc_get_rpm_resp rpm_calib_data;
+ uint32_t mmap_handle;
+ struct mutex afe_cmd_lock;
+};
+
+static struct anc_if_ctl this_anc_if;
+
+static int32_t anc_get_param_callback(uint32_t *payload,
+ uint32_t payload_size)
+{
+ u32 param_id;
+ struct anc_get_rpm_resp *resp =
+ (struct anc_get_rpm_resp *) payload;
+
+ if (!(&(resp->pdata))) {
+ pr_err("%s: Error: resp pdata is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ param_id = resp->pdata.param_id;
+ if (param_id == AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_RPM) {
+ if (payload_size < sizeof(this_anc_if.rpm_calib_data)) {
+ pr_err("%s: Error: received size %d, calib_data size %zu\n",
+ __func__, payload_size,
+ sizeof(this_anc_if.rpm_calib_data));
+ return -EINVAL;
+ }
+
+ memcpy(&this_anc_if.rpm_calib_data, payload,
+ sizeof(this_anc_if.rpm_calib_data));
+ if (!this_anc_if.rpm_calib_data.status) {
+ atomic_set(&this_anc_if.state, 0);
+ } else {
+ pr_debug("%s: calib resp status: %d", __func__,
+ this_anc_if.rpm_calib_data.status);
+ atomic_set(&this_anc_if.state, -1);
+ }
+ }
+
+ return 0;
+}
+
+static void anc_if_callback_debug_print(struct apr_client_data *data)
+{
+ uint32_t *payload;
+
+ payload = data->payload;
+
+ if (data->payload_size >= 8)
+ pr_debug("%s: code = 0x%x PL#0[0x%x], PL#1[0x%x], size = %d\n",
+ __func__, data->opcode, payload[0], payload[1],
+ data->payload_size);
+ else if (data->payload_size >= 4)
+ pr_debug("%s: code = 0x%x PL#0[0x%x], size = %d\n",
+ __func__, data->opcode, payload[0],
+ data->payload_size);
+ else
+ pr_debug("%s: code = 0x%x, size = %d\n",
+ __func__, data->opcode, data->payload_size);
+}
+
+static int32_t anc_if_callback(struct apr_client_data *data, void *priv)
+{
+ if (!data) {
+ pr_err("%s: Invalid param data\n", __func__);
+ return -EINVAL;
+ }
+ if (data->opcode == RESET_EVENTS) {
+ pr_debug("%s: reset event = %d %d apr[%pK]\n",
+ __func__,
+ data->reset_event, data->reset_proc, this_anc_if.apr);
+
+ if (this_anc_if.apr) {
+ apr_reset(this_anc_if.apr);
+ atomic_set(&this_anc_if.state, 0);
+ this_anc_if.apr = NULL;
+ }
+
+ return 0;
+ }
+ anc_if_callback_debug_print(data);
+ if (data->opcode == AFE_PORT_CMDRSP_GET_PARAM_V2) {
+ u8 *payload = data->payload;
+
+ if (!payload || (data->token >= AFE_MAX_PORTS)) {
+ pr_err("%s: Error: size %d payload %pK token %d\n",
+ __func__, data->payload_size,
+ payload, data->token);
+ return -EINVAL;
+ }
+
+ if (anc_get_param_callback(data->payload, data->payload_size))
+ return -EINVAL;
+
+ wake_up(&this_anc_if.wait[data->token]);
+
+ } else if (data->payload_size) {
+ uint32_t *payload;
+
+ payload = data->payload;
+ if (data->opcode == APR_BASIC_RSP_RESULT) {
+ pr_debug("%s:opcode = 0x%x cmd = 0x%x status = 0x%x token=%d\n",
+ __func__, data->opcode,
+ payload[0], payload[1], data->token);
+ /* payload[1] contains the error status for response */
+ if (payload[1] != 0) {
+ atomic_set(&this_anc_if.status, payload[1]);
+ pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+ __func__, payload[0], payload[1]);
+ }
+ switch (payload[0]) {
+ case AFE_PORT_CMD_SET_PARAM_V2:
+ case AFE_PORT_CMD_DEVICE_STOP:
+ case AFE_PORT_CMD_DEVICE_START:
+ case AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS:
+ case AFE_SERVICE_CMD_SHARED_MEM_UNMAP_REGIONS:
+ case AFE_SVC_CMD_SET_PARAM:
+ atomic_set(&this_anc_if.state, 0);
+ wake_up(&this_anc_if.wait[data->token]);
+ break;
+ default:
+ pr_err("%s: Unknown cmd 0x%x\n", __func__,
+ payload[0]);
+ break;
+ }
+ } else if (data->opcode ==
+ AFE_SERVICE_CMDRSP_SHARED_MEM_MAP_REGIONS) {
+ pr_err("%s: ANC mmap_handle: 0x%x\n",
+ __func__, payload[0]);
+ this_anc_if.mmap_handle = payload[0];
+ atomic_set(&this_anc_if.state, 0);
+ wake_up(&this_anc_if.wait[data->token]);
+ }
+ }
+ return 0;
+}
+
+int anc_sdsp_interface_prepare(void)
+{
+ int ret = 0;
+
+ pr_debug("%s:\n", __func__);
+
+ if (this_anc_if.apr == NULL) {
+ this_anc_if.apr = apr_register("SDSP", "MAS", anc_if_callback,
+ 0xFFFFFFFF, &this_anc_if);
+ if (this_anc_if.apr == NULL) {
+ pr_err("%s: Unable to register AFE\n", __func__);
+ ret = -ENODEV;
+ }
+ }
+ return ret;
+}
+
+/*
+ * anc_if_apr_send_pkt : returns 0 on success, negative otherwise.
+ */
+static int anc_if_apr_send_pkt(void *data, wait_queue_head_t *wait)
+{
+ int ret;
+
+ if (wait)
+ atomic_set(&this_anc_if.state, 1);
+ atomic_set(&this_anc_if.status, 0);
+ ret = apr_send_pkt(this_anc_if.apr, data);
+ if (ret > 0) {
+ if (wait) {
+ ret = wait_event_timeout(*wait,
+ (atomic_read(&this_anc_if.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ } else if (atomic_read(&this_anc_if.status) > 0) {
+ pr_err("%s: DSP returned error[%s]\n", __func__,
+ adsp_err_get_err_str(atomic_read(
+ &this_anc_if.status)));
+ ret = adsp_err_get_lnx_err_code(
+ atomic_read(&this_anc_if.status));
+ } else {
+ ret = 0;
+ }
+ } else {
+ ret = 0;
+ }
+ } else if (ret == 0) {
+ pr_err("%s: packet not transmitted\n", __func__);
+ /* apr_send_pkt can return 0 when nothing is transmitted */
+ ret = -EINVAL;
+ }
+
+ pr_debug("%s: leave %d\n", __func__, ret);
+ return ret;
+}
+
+static int anc_if_send_cmd_port_start(u16 port_id)
+{
+ struct afe_port_cmd_device_start start;
+ int ret, index;
+
+ pr_debug("%s: enter\n", __func__);
+ index = q6audio_get_port_index(port_id);
+ if (index < 0 || index > AFE_MAX_PORTS) {
+ pr_err("%s: AFE port index[%d] invalid!\n",
+ __func__, index);
+ return -EINVAL;
+ }
+ ret = q6audio_validate_port(port_id);
+ if (ret < 0) {
+ pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret);
+ return -EINVAL;
+ }
+
+ start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ start.hdr.pkt_size = sizeof(start);
+ start.hdr.src_port = 0;
+ start.hdr.dest_port = 0;
+ start.hdr.token = index;
+ start.hdr.opcode = AFE_PORT_CMD_DEVICE_START;
+ start.port_id = q6audio_get_port_id(port_id);
+ pr_debug("%s: cmd device start opcode[0x%x] port id[0x%x]\n",
+ __func__, start.hdr.opcode, start.port_id);
+
+ ret = anc_if_apr_send_pkt(&start, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: AFE enable for port 0x%x failed %d\n", __func__,
+ port_id, ret);
+ } else if (this_anc_if.task != current) {
+ this_anc_if.task = current;
+ pr_debug("task_name = %s pid = %d\n",
+ this_anc_if.task->comm, this_anc_if.task->pid);
+ }
+
+ return ret;
+}
+
+int anc_if_send_cmd_port_stop(int port_id)
+{
+ struct afe_port_cmd_device_stop stop;
+ int ret = 0;
+
+ if (this_anc_if.apr == NULL) {
+ pr_err("%s: AFE is already closed\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ pr_debug("%s: port_id = 0x%x\n", __func__, port_id);
+ port_id = q6audio_convert_virtual_to_portid(port_id);
+
+ stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ stop.hdr.pkt_size = sizeof(stop);
+ stop.hdr.src_port = 0;
+ stop.hdr.dest_port = 0;
+ stop.hdr.token = 0;
+ stop.hdr.opcode = AFE_PORT_CMD_DEVICE_STOP;
+ stop.port_id = port_id;
+ stop.reserved = 0;
+
+ ret = anc_if_apr_send_pkt(&stop, NULL);
+ if (ret)
+ pr_err("%s: AFE close failed %d\n", __func__, ret);
+
+fail_cmd:
+ return ret;
+
+}
+
+int anc_if_config_ref(u16 port_id, u32 sample_rate,
+ u32 bit_width, u16 num_channel)
+{
+ struct anc_config_ref_command config;
+ int ret = 0;
+ int index;
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+ memset(&config, 0, sizeof(config));
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = sizeof(config);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = index;
+ config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+ sizeof(config.param);
+ config.param.payload_address_lsw = 0x00;
+ config.param.payload_address_msw = 0x00;
+ config.param.mem_map_handle = 0x00;
+ config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_REFS;
+ config.pdata.param_id = AUD_MSVC_PARAM_ID_DEV_ANC_REFS_CONFIG;
+ config.pdata.param_size = sizeof(config.refs);
+ config.refs.minor_version = AUD_MSVC_API_VERSION_DEV_ANC_REFS_CONFIG;
+ config.refs.port_id = q6audio_get_port_id(port_id);
+ config.refs.sample_rate = sample_rate;
+ config.refs.bit_width = bit_width;
+ config.refs.num_channel = num_channel;
+
+ ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: anc_if_config_ref for port 0x%x failed ret = %d\n",
+ __func__, port_id, ret);
+ pr_err("%s: anc_if_config_ref size of param is %lu\n",
+ __func__, sizeof(config.refs));
+ }
+
+ return ret;
+}
+
+int anc_if_share_resource(u16 port_id, u16 rddma_idx, u16 wrdma_idx,
+ u32 lpm_start_addr, u32 lpm_length)
+{
+ struct anc_share_resource_command config;
+ int ret = 0;
+ int index;
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+ memset(&config, 0, sizeof(config));
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = sizeof(config);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = index;
+ config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+ sizeof(config.param);
+ config.param.payload_address_lsw = 0x00;
+ config.param.payload_address_msw = 0x00;
+ config.param.mem_map_handle = 0x00;
+ config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_RESOURCE_SHARE;
+ config.pdata.param_id = AUD_MSVC_PARAM_ID_PORT_SHARE_RESOURCE_CONFIG;
+ config.pdata.param_size = sizeof(config.resource);
+ config.resource.minor_version =
+ AUD_MSVC_API_VERSION_SHARE_RESOURCE_CONFIG;
+ config.resource.rddma_idx = rddma_idx;
+ config.resource.wrdma_idx = wrdma_idx;
+ config.resource.lpm_start_addr = lpm_start_addr;
+ config.resource.lpm_length = lpm_length;
+
+ ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: share resource for port 0x%x failed ret = %d\n",
+ __func__, port_id, ret);
+ }
+
+ return ret;
+}
+
+int anc_if_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port)
+{
+ struct aud_audioif_config_command config;
+ int ret = 0;
+ int index = 0;
+
+ if (!tdm_port) {
+ pr_err("%s: Error, no configuration data\n", __func__);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: port id: 0x%x\n", __func__, port_id);
+
+ index = q6audio_get_port_index(port_id);
+ if (index < 0 || index > AFE_MAX_PORTS) {
+ pr_err("%s: AFE port index[%d] invalid!\n",
+ __func__, index);
+ return -EINVAL;
+ }
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ memset(&config, 0, sizeof(config));
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = sizeof(config);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = index;
+ config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+ sizeof(config.param);
+ config.param.payload_address_lsw = 0x00;
+ config.param.payload_address_msw = 0x00;
+ config.param.mem_map_handle = 0x00;
+ config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ config.pdata.param_id = AFE_PARAM_ID_TDM_CONFIG;
+ config.pdata.param_size = sizeof(config.port);
+ config.port.tdm = tdm_port->tdm;
+
+ ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: AFE enable for port 0x%x failed ret = %d\n",
+ __func__, port_id, ret);
+ goto fail_cmd;
+ }
+
+ ret = anc_if_send_cmd_port_start(port_id);
+
+fail_cmd:
+ return ret;
+}
+
+int anc_if_tdm_port_stop(u16 port_id)
+{
+ return anc_if_send_cmd_port_stop(port_id);
+}
+
+int anc_if_set_rpm(u16 port_id, u32 rpm)
+{
+ int ret = 0;
+ int index;
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+
+ {
+ struct anc_set_rpm_command config;
+
+ memset(&config, 0, sizeof(config));
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = sizeof(config);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = index;
+ config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) -
+ sizeof(struct apr_hdr) -
+ sizeof(config.param);
+ config.param.payload_address_lsw = 0x00;
+ config.param.payload_address_msw = 0x00;
+ config.param.mem_map_handle = 0x00;
+ config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
+ config.pdata.param_id = AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_RPM;
+ config.pdata.param_size = sizeof(config.set_rpm);
+ config.set_rpm.minor_version =
+ AUD_MSVC_API_VERSION_DEV_ANC_ALGO_RPM;
+ config.set_rpm.rpm = rpm;
+
+ ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: share resource for port 0x%x failed ret = %d\n",
+ __func__, port_id, ret);
+ }
+ }
+
+ return ret;
+}
+
+int anc_if_set_bypass_mode(u16 port_id, u32 bypass_mode)
+{
+ int ret = 0;
+
+ int index;
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+
+ {
+ struct anc_set_bypass_mode_command config;
+
+ memset(&config, 0, sizeof(config));
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = sizeof(config);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = index;
+ config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) -
+ sizeof(struct apr_hdr) -
+ sizeof(config.param);
+ config.param.payload_address_lsw = 0x00;
+ config.param.payload_address_msw = 0x00;
+ config.param.mem_map_handle = 0x00;
+ config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
+ config.pdata.param_id =
+ AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_BYPASS_MODE;
+ config.pdata.param_size = sizeof(config.set_bypass_mode);
+ config.set_bypass_mode.minor_version =
+ AUD_MSVC_API_VERSION_DEV_ANC_ALGO_BYPASS_MODE;
+ config.set_bypass_mode.bypass_mode = bypass_mode;
+
+ ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: share resource for port 0x%x failed ret = %d\n",
+ __func__, port_id, ret);
+ }
+ }
+
+ return ret;
+}
+
+int anc_if_set_algo_module_id(u16 port_id, u32 module_id)
+{
+ int ret = 0;
+
+ int index;
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+
+ {
+ struct anc_set_algo_module_id_command config;
+
+ memset(&config, 0, sizeof(config));
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = sizeof(config);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = index;
+ config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) -
+ sizeof(struct apr_hdr) -
+ sizeof(config.param);
+ config.param.payload_address_lsw = 0x00;
+ config.param.payload_address_msw = 0x00;
+ config.param.mem_map_handle = 0x00;
+ config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
+ config.pdata.param_id =
+ AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_MODULE_ID;
+ config.pdata.param_size = sizeof(config.set_algo_module_id);
+ config.set_algo_module_id.minor_version = 1;
+ config.set_algo_module_id.module_id = module_id;
+
+ ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: anc algo module ID for port 0x%x failed ret = %d\n",
+ __func__, port_id, ret);
+ }
+ }
+
+ return ret;
+}
+
+int anc_if_set_anc_mic_spkr_layout(u16 port_id,
+struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info *set_mic_spkr_layout_p)
+{
+ int ret = 0;
+
+ int index;
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+
+ {
+ struct anc_set_mic_spkr_layout_info_command config;
+
+ memset(&config, 0, sizeof(config));
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = sizeof(config);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = index;
+ config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) -
+ sizeof(struct apr_hdr) -
+ sizeof(config.param);
+ config.param.payload_address_lsw = 0x00;
+ config.param.payload_address_msw = 0x00;
+ config.param.mem_map_handle = 0x00;
+ config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
+ config.pdata.param_id =
+ AUD_MSVC_PARAM_ID_PORT_ANC_MIC_SPKR_LAYOUT_INFO;
+ config.pdata.param_size = sizeof(config.set_mic_spkr_layout);
+
+ memcpy(&config.set_mic_spkr_layout, set_mic_spkr_layout_p,
+ sizeof(config.set_mic_spkr_layout));
+ ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: anc algo module ID for port 0x%x failed ret = %d\n",
+ __func__, port_id, ret);
+ }
+ }
+
+ return ret;
+}
+
+int anc_if_cmd_memory_map(int port_id, phys_addr_t dma_addr_p,
+ u32 dma_buf_sz)
+{
+ int ret = 0;
+ int cmd_size = 0;
+ void *payload = NULL;
+ void *mmap_region_cmd = NULL;
+ struct afe_service_cmd_shared_mem_map_regions *mregion = NULL;
+ struct afe_service_shared_map_region_payload *mregion_pl = NULL;
+ int index = 0;
+
+ pr_debug("%s:\n", __func__);
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+ if (index < 0 || index > AFE_MAX_PORTS) {
+ pr_err("%s: AFE port index[%d] invalid!\n",
+ __func__, index);
+ return -EINVAL;
+ }
+ ret = q6audio_validate_port(port_id);
+ if (ret < 0) {
+ pr_err("%s: Invalid port 0x%x ret %d",
+ __func__, port_id, ret);
+ return -EINVAL;
+ }
+
+ cmd_size = sizeof(struct afe_service_cmd_shared_mem_map_regions)
+ + sizeof(struct afe_service_shared_map_region_payload);
+
+ mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (!mmap_region_cmd) {
+ ret = -ENOMEM;
+ pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
+ return ret;
+ }
+
+ mregion = (struct afe_service_cmd_shared_mem_map_regions *)
+ mmap_region_cmd;
+ mregion->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ mregion->hdr.pkt_size = cmd_size;
+ mregion->hdr.src_port = 0;
+ mregion->hdr.dest_port = 0;
+ mregion->hdr.token = index;
+ mregion->hdr.opcode = AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS;
+ mregion->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
+ mregion->num_regions = 1;
+ mregion->property_flag = 0x00;
+
+ payload = ((u8 *) mmap_region_cmd +
+ sizeof(struct afe_service_cmd_shared_mem_map_regions));
+ mregion_pl = (struct afe_service_shared_map_region_payload *)payload;
+
+ mregion_pl->shm_addr_lsw = lower_32_bits(dma_addr_p);
+ mregion_pl->shm_addr_msw = msm_audio_populate_upper_32_bits(dma_addr_p);
+ mregion_pl->mem_size_bytes = dma_buf_sz;
+
+ ret = anc_if_apr_send_pkt(mmap_region_cmd, &this_anc_if.wait[index]);
+ if (ret)
+ pr_err("%s: AFE memory map cmd failed %d\n",
+ __func__, ret);
+ kfree(mmap_region_cmd);
+ return ret;
+}
+
+int anc_if_cmd_memory_unmap(int port_id, u32 mem_map_handle)
+{
+ int ret = 0;
+ struct afe_service_cmd_shared_mem_unmap_regions mregion;
+ int index = 0;
+
+ pr_debug("%s: handle 0x%x\n", __func__, mem_map_handle);
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+ if (index < 0 || index > AFE_MAX_PORTS) {
+ pr_err("%s: AFE port index[%d] invalid!\n",
+ __func__, index);
+ return -EINVAL;
+ }
+ ret = q6audio_validate_port(port_id);
+ if (ret < 0) {
+ pr_err("%s: Invalid port 0x%x ret %d",
+ __func__, port_id, ret);
+ return -EINVAL;
+ }
+
+ mregion.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ mregion.hdr.pkt_size = sizeof(mregion);
+ mregion.hdr.src_port = 0;
+ mregion.hdr.dest_port = 0;
+ mregion.hdr.token = index;
+ mregion.hdr.opcode = AFE_SERVICE_CMD_SHARED_MEM_UNMAP_REGIONS;
+ mregion.mem_map_handle = mem_map_handle;
+
+ ret = anc_if_apr_send_pkt(&mregion, &this_anc_if.wait[index]);
+ if (ret)
+ pr_err("%s: msvc memory unmap cmd failed %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int __init sdsp_anc_init(void)
+{
+ int i = 0, ret = 0;
+
+ atomic_set(&this_anc_if.state, 0);
+ atomic_set(&this_anc_if.status, 0);
+ this_anc_if.apr = NULL;
+ this_anc_if.mmap_handle = 0;
+ mutex_init(&this_anc_if.afe_cmd_lock);
+ for (i = 0; i < AFE_MAX_PORTS; i++)
+ init_waitqueue_head(&this_anc_if.wait[i]);
+
+ return ret;
+}
+
+static void __exit sdsp_anc_exit(void)
+{
+ mutex_destroy(&this_anc_if.afe_cmd_lock);
+}
+
+device_initcall(sdsp_anc_init);
+__exitcall(sdsp_anc_exit);
diff --git a/drivers/soc/qcom/rpm_stats.c b/drivers/soc/qcom/rpm_stats.c
index b54af9eae8ec..ed7493d063ae 100644
--- a/drivers/soc/qcom/rpm_stats.c
+++ b/drivers/soc/qcom/rpm_stats.c
@@ -430,7 +430,7 @@ static ssize_t rpmstats_show(struct kobject *kobj,
prvdata);
}
- ret = snprintf(buf, prvdata->len, prvdata->buf);
+ ret = snprintf(buf, prvdata->len, "%s", prvdata->buf);
iounmap(prvdata->reg_base);
ioremap_fail:
kfree(prvdata);
diff --git a/drivers/soc/qcom/scm_qcpe.c b/drivers/soc/qcom/scm_qcpe.c
index a788c8c3673e..4b44d9694092 100644
--- a/drivers/soc/qcom/scm_qcpe.c
+++ b/drivers/soc/qcom/scm_qcpe.c
@@ -10,6 +10,8 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) "QSEECOM: %s:%d : " fmt, __func__, __LINE__
+
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -18,7 +20,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/of.h>
#include <asm/cacheflush.h>
#include <asm/compiler.h>
@@ -30,6 +31,12 @@
#include <linux/habmm.h>
+#ifdef CONFIG_GHS_VMM
+#include <../../staging/android/ion/ion_hvenv_driver.h>
+#include <linux/msm_ion.h>
+#include <soc/qcom/qseecomi.h>
+#endif
+
#define SCM_ENOMEM (-5)
#define SCM_EOPNOTSUPP (-4)
#define SCM_EINVAL_ADDR (-3)
@@ -68,7 +75,8 @@ DEFINE_MUTEX(scm_lmh_lock);
else \
result = x + y; \
result; \
- })
+})
+
/**
* struct scm_command - one SCM command buffer
* @len: total available memory for command and response
@@ -113,6 +121,19 @@ struct scm_response {
u32 is_complete;
};
+struct scm_extra_arg {
+ union {
+ u32 args32[N_EXT_SCM_ARGS];
+ u64 args64[N_EXT_SCM_ARGS];
+ };
+};
+
+struct smc_params_s {
+ uint64_t fn_id;
+ uint64_t arginfo;
+ uint64_t args[MAX_SCM_ARGS];
+} __packed;
+
#ifdef CONFIG_ARM64
#define R0_STR "x0"
@@ -141,6 +162,16 @@ struct scm_response {
#endif
+static enum scm_interface_version {
+ SCM_UNKNOWN,
+ SCM_LEGACY,
+ SCM_ARMV8_32,
+ SCM_ARMV8_64,
+} scm_version = SCM_UNKNOWN;
+
+/* This will be set to specify SMC32 or SMC64 */
+static u32 scm_version_mask;
+
/**
* scm_command_to_response() - Get a pointer to a scm_response
* @cmd: command
@@ -195,76 +226,280 @@ static int scm_remap_error(int err)
return -EINVAL;
}
-#define MAX_SCM_ARGS 10
+#ifdef CONFIG_GHS_VMM
+enum SCM_QCPE_IONIZE {
+ /* args[0] - physical addr, args[1] - length */
+ IONIZE_IDX_0,
-struct qcpe_msg_s {
- uint64_t fn_id;
- uint64_t arginfo;
- uint64_t args[MAX_SCM_ARGS];
+ /* args[1] - physical addr, args[2] - length */
+ IONIZE_IDX_1,
+
+ /* args[0] - physical addr, args[1] - length */
+ /* args[2] - physical addr, args[3] - length */
+ IONIZE_IDX_0_2,
+
+ /* args[2] - physical addr, args[3] - length */
+ IONIZE_IDX_2,
+
+ /* args[5] - physical addr, args[6] - length */
+ IONIZE_IDX_5
};
+static struct ion_client *ion_clnt;
+
+static int scm_ion_alloc(size_t len, void **vaddr,
+ ion_phys_addr_t *paddr, struct ion_handle **ihandle)
+{
+ struct ion_handle *ihndl = NULL;
+ void *mvaddr;
+ ion_phys_addr_t mpaddr;
+ int ret = 0;
+
+ if (!ion_clnt) {
+ ion_clnt = hvenv_ion_client_create("qseecom-kernel");
+ if (IS_ERR_OR_NULL(ion_clnt)) {
+ pr_err("Ion client cannot be created\n");
+ return SCM_ENOMEM;
+ }
+ }
+
+ ihndl = ion_alloc(ion_clnt, len,
+ SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+ if (IS_ERR_OR_NULL(ihandle)) {
+ pr_err("ION alloc failed\n");
+ return SCM_ENOMEM;
+ }
+
+ mvaddr = ion_map_kernel(ion_clnt, ihndl);
+ if (IS_ERR_OR_NULL(mvaddr)) {
+ pr_err("ION memory mapping for image loading failed\n");
+ ret = SCM_ENOMEM;
+ goto free_ion;
+ }
+
+ ret = ion_phys(ion_clnt, ihndl, &mpaddr, &len);
+ if (ret) {
+ pr_err("physical memory retrieval failure\n");
+ ret = SCM_ENOMEM;
+ goto unmap_ion;
+
+ }
+
+ *vaddr = mvaddr;
+ *paddr = mpaddr;
+ *ihandle = ihndl;
+ return ret;
+
+unmap_ion:
+ ion_unmap_kernel(ion_clnt, ihndl);
+free_ion:
+ ion_free(ion_clnt, ihndl);
+ return ret;
+}
+
+static int scm_ionize(enum SCM_QCPE_IONIZE idx,
+ u64 *args, struct ion_handle **ihandle)
+{
+ ion_phys_addr_t ion_paddr;
+ void *krn_vaddr;
+ void *ion_vaddr;
+ size_t len, len1;
+ struct ion_handle *ihndl = NULL;
+ int ret = 0;
+
+ switch (idx) {
+ case IONIZE_IDX_0:
+ len = (size_t)args[1];
+ ret = scm_ion_alloc(len, &ion_vaddr, &ion_paddr, &ihndl);
+ if (ret)
+ break;
+ krn_vaddr = phys_to_virt(args[0]);
+ memcpy(ion_vaddr, krn_vaddr, len);
+ args[0] = ion_paddr;
+ break;
+
+ case IONIZE_IDX_1:
+ len = (size_t)args[2];
+ ret = scm_ion_alloc(len, &ion_vaddr, &ion_paddr, &ihndl);
+ if (ret)
+ break;
+ krn_vaddr = phys_to_virt(args[1]);
+ memcpy(ion_vaddr, krn_vaddr, len);
+ args[1] = ion_paddr;
+ break;
+
+ case IONIZE_IDX_0_2:
+ len = (size_t)args[1] + (size_t)args[3];
+ ret = scm_ion_alloc(len, &ion_vaddr, &ion_paddr, &ihndl);
+ if (ret)
+ break;
+ krn_vaddr = phys_to_virt(args[0]);
+ len = (size_t)args[1];
+ memcpy(ion_vaddr, krn_vaddr, len);
+ args[0] = ion_paddr;
+
+ krn_vaddr = phys_to_virt(args[2]);
+ len1 = (size_t)args[3];
+ memcpy((uint8_t *)ion_vaddr + len, krn_vaddr, len1);
+ args[2] = ion_paddr;
+ break;
+
+ case IONIZE_IDX_2:
+ len = (size_t)args[3];
+ ret = scm_ion_alloc(len, &ion_vaddr, &ion_paddr, &ihndl);
+ if (ret)
+ break;
+ krn_vaddr = phys_to_virt(args[2]);
+ memcpy(ion_vaddr, krn_vaddr, len);
+ args[2] = ion_paddr;
+ break;
+
+ case IONIZE_IDX_5:
+ len = (size_t)args[6];
+ ret = scm_ion_alloc(len, &ion_vaddr, &ion_paddr, &ihndl);
+ if (ret)
+ break;
+ krn_vaddr = phys_to_virt(args[5]);
+ memcpy(ion_vaddr, krn_vaddr, len);
+ args[5] = ion_paddr;
+ break;
+ default:
+ break;
+ }
+ *ihandle = ihndl;
+ return ret;
+}
+
+static int ionize_buffers(u32 fn_id,
+ struct smc_params_s *desc, struct ion_handle **ihandle)
+{
+ struct ion_handle *ihndl = NULL;
+ int ret = 0;
+
+ switch (fn_id) {
+ case TZ_OS_APP_LOOKUP_ID:
+ case TZ_OS_KS_GEN_KEY_ID:
+ case TZ_OS_KS_DEL_KEY_ID:
+ case TZ_OS_KS_SET_PIPE_KEY_ID:
+ case TZ_OS_KS_UPDATE_KEY_ID:
+ ret = scm_ionize(IONIZE_IDX_0, desc->args, &ihndl);
+ break;
+
+ case TZ_ES_SAVE_PARTITION_HASH_ID:
+ ret = scm_ionize(IONIZE_IDX_1, desc->args, &ihndl);
+ break;
+
+ case TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID:
+ ret = scm_ionize(IONIZE_IDX_2, desc->args, &ihndl);
+ break;
+
+ case TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID:
+ case TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID:
+ case TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID:
+ ret = scm_ionize(IONIZE_IDX_5, desc->args, &ihndl);
+ break;
+ default:
+ break;
+ }
+ *ihandle = ihndl;
+ return ret;
+}
+
+static void free_ion_buffers(struct ion_handle *ihandle)
+{
+ ion_free(ion_clnt, ihandle);
+}
+#endif
+
static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc)
{
static bool opened;
static u32 handle;
- u32 ret;
u32 size_bytes;
- struct qcpe_msg_s msg;
+ struct smc_params_s smc_params = {0,};
+ int ret;
+#ifdef CONFIG_GHS_VMM
+ int i;
+ uint64_t arglen = desc->arginfo & 0xf;
+ struct ion_handle *ihandle = NULL;
+#endif
- pr_info("scm_call_qcpe: IN: 0x%x, 0x%x, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx",
- fn_id, desc->arginfo, desc->args[0], desc->args[1],
- desc->args[2], desc->args[3], desc->args[4],
- desc->args[5], desc->args[6]);
+ pr_info("\nscm_call_qcpe: IN: 0x%x, 0x%x, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
+ fn_id, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->args[3], desc->x5);
if (!opened) {
ret = habmm_socket_open(&handle, MM_QCPE_VM1, 0, 0);
if (ret) {
- pr_err("scm_call_qcpe: habmm_socket_open failed with ret = %d",
- ret);
+ pr_err(
+ "scm_call_qcpe: habmm_socket_open failed with ret = %d",
+ ret);
return ret;
}
opened = true;
}
- msg.fn_id = fn_id | 0x40000000; /* SMC64_MASK */
- msg.arginfo = desc->arginfo;
- msg.args[0] = desc->args[0];
- msg.args[1] = desc->args[1];
- msg.args[2] = desc->args[2];
- msg.args[3] = desc->x5;
- msg.args[4] = 0;
+ smc_params.fn_id = fn_id | scm_version_mask;
+ smc_params.arginfo = desc->arginfo;
+ smc_params.args[0] = desc->args[0];
+ smc_params.args[1] = desc->args[1];
+ smc_params.args[2] = desc->args[2];
- ret = habmm_socket_send(handle, &msg, sizeof(msg), 0);
- if (ret) {
- pr_err("scm_call_qcpe: habmm_socket_send failed with ret = %d",
- ret);
- return ret;
- }
+#ifdef CONFIG_GHS_VMM
+ if (arglen <= N_REGISTER_ARGS) {
+ smc_params.args[FIRST_EXT_ARG_IDX] = desc->x5;
+ } else {
+ struct scm_extra_arg *argbuf =
+ (struct scm_extra_arg *)desc->extra_arg_buf;
+ int j = 0;
- size_bytes = sizeof(msg);
- memset(&msg, 0x0, sizeof(msg));
+ if (scm_version == SCM_ARMV8_64)
+ for (i = FIRST_EXT_ARG_IDX; i < MAX_SCM_ARGS; i++)
+ smc_params.args[i] = argbuf->args64[j++];
+ else
+ for (i = FIRST_EXT_ARG_IDX; i < MAX_SCM_ARGS; i++)
+ smc_params.args[i] = argbuf->args32[j++];
+ }
- ret = habmm_socket_recv(handle, &msg, &size_bytes, 0, 0);
- if (ret) {
- pr_err("scm_call_qcpe: habmm_socket_recv failed with ret = %d",
- ret);
+ ret = ionize_buffers(fn_id & (~SMC64_MASK), &smc_params, &ihandle);
+ if (ret)
return ret;
- }
+#else
+ smc_params.args[3] = desc->x5;
+ smc_params.args[4] = 0;
+#endif
- if (size_bytes != sizeof(msg)) {
- pr_err("scm_call_qcpe: expected size: %lu, actual=%u\n",
- sizeof(msg), size_bytes);
- return SCM_ERROR;
- }
+ ret = habmm_socket_send(handle, &smc_params, sizeof(smc_params), 0);
+ if (ret)
+ goto err_ret;
- desc->ret[0] = msg.args[1];
- desc->ret[1] = msg.args[2];
- desc->ret[2] = msg.args[3];
+ size_bytes = sizeof(smc_params);
+ memset(&smc_params, 0x0, sizeof(smc_params));
- pr_info("scm_call_qcpe: OUT: 0x%llx, 0x%llx, 0x%llx, 0x%llx",
- msg.args[0], msg.args[1], msg.args[2], msg.args[3]);
+ ret = habmm_socket_recv(handle, &smc_params, &size_bytes, 0, 0);
+ if (ret)
+ goto err_ret;
+
+ if (size_bytes != sizeof(smc_params)) {
+ pr_err("scm_call_qcpe: expected size: %lu, actual=%u\n",
+ sizeof(smc_params), size_bytes);
+ ret = SCM_ERROR;
+ goto err_ret;
+ }
- return msg.args[0];
+ desc->ret[0] = smc_params.args[1];
+ desc->ret[1] = smc_params.args[2];
+ desc->ret[2] = smc_params.args[3];
+ ret = smc_params.args[0];
+ pr_info("\nscm_call_qcpe: OUT: 0x%llx, 0x%llx, 0x%llx, 0x%llx",
+ smc_params.args[0], desc->ret[0], desc->ret[1], desc->ret[2]);
+
+err_ret:
+#ifdef CONFIG_GHS_VMM
+ if (ihandle)
+ free_ion_buffers(ihandle);
+#endif
+ return ret;
}
static u32 smc(u32 cmd_addr)
@@ -324,7 +559,7 @@ static void scm_inv_range(unsigned long start, unsigned long end)
outer_inv_range(start, end);
while (start < end) {
asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
- : "memory");
+ : "memory");
start += cacheline_size;
}
mb(); /* Make sure memory is visible to TZ */
@@ -357,9 +592,9 @@ static void scm_inv_range(unsigned long start, unsigned long end)
*/
static int scm_call_common(u32 svc_id, u32 cmd_id, const void *cmd_buf,
- size_t cmd_len, void *resp_buf, size_t resp_len,
- struct scm_command *scm_buf,
- size_t scm_buf_length)
+ size_t cmd_len, void *resp_buf, size_t resp_len,
+ struct scm_command *scm_buf,
+ size_t scm_buf_length)
{
int ret;
struct scm_response *rsp;
@@ -403,15 +638,15 @@ static int scm_call_common(u32 svc_id, u32 cmd_id, const void *cmd_buf,
* since we want the first attempt to be the "fastpath".
*/
static int _scm_call_retry(u32 svc_id, u32 cmd_id, const void *cmd_buf,
- size_t cmd_len, void *resp_buf, size_t resp_len,
- struct scm_command *cmd,
- size_t len)
+ size_t cmd_len, void *resp_buf, size_t resp_len,
+ struct scm_command *cmd,
+ size_t len)
{
int ret, retry_count = 0;
do {
ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len,
- resp_buf, resp_len, cmd, len);
+ resp_buf, resp_len, cmd, len);
if (ret == SCM_EBUSY)
msleep(SCM_EBUSY_WAIT_MS);
if (retry_count == 33)
@@ -447,28 +682,11 @@ int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf,
memset(scm_buf, 0, scm_buf_len);
ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
- resp_len, scm_buf, len);
+ resp_len, scm_buf, len);
return ret;
}
-struct scm_extra_arg {
- union {
- u32 args32[N_EXT_SCM_ARGS];
- u64 args64[N_EXT_SCM_ARGS];
- };
-};
-
-static enum scm_interface_version {
- SCM_UNKNOWN,
- SCM_LEGACY,
- SCM_ARMV8_32,
- SCM_ARMV8_64,
-} scm_version = SCM_UNKNOWN;
-
-/* This will be set to specify SMC32 or SMC64 */
-static u32 scm_version_mask;
-
bool is_scm_armv8(void)
{
int ret;
@@ -478,7 +696,7 @@ bool is_scm_armv8(void)
if (likely(scm_version != SCM_UNKNOWN))
return (scm_version == SCM_ARMV8_32) ||
- (scm_version == SCM_ARMV8_64);
+ (scm_version == SCM_ARMV8_64);
/*
* This is a one time check that runs on the first ever
* invocation of is_scm_armv8. We might be called in atomic
@@ -515,7 +733,7 @@ bool is_scm_armv8(void)
scm_version_mask = SMC64_MASK;
pr_debug("scm_call: scm version is %x, mask is %x\n", scm_version,
- scm_version_mask);
+ scm_version_mask);
return (scm_version == SCM_ARMV8_32) ||
(scm_version == SCM_ARMV8_64);
@@ -557,7 +775,7 @@ static int allocate_extra_arg_buffer(struct scm_desc *desc, gfp_t flags)
desc->x5 = virt_to_phys(argbuf);
__cpuc_flush_dcache_area(argbuf, argbuflen);
outer_flush_range(virt_to_phys(argbuf),
- virt_to_phys(argbuf) + argbuflen);
+ virt_to_phys(argbuf) + argbuflen);
return 0;
}
@@ -582,7 +800,7 @@ static int allocate_extra_arg_buffer(struct scm_desc *desc, gfp_t flags)
* Note that cache maintenance on the argument buffer (desc->args) is taken care
* of by scm_call2; however, callers are responsible for any other cached
* buffers passed over to the secure world.
-*/
+ */
int scm_call2(u32 fn_id, struct scm_desc *desc)
{
int arglen = desc->arginfo & 0xf;
@@ -652,16 +870,16 @@ int scm_call2_atomic(u32 fn_id, struct scm_desc *desc)
x0 = fn_id | BIT(SMC_ATOMIC_SYSCALL) | scm_version_mask;
pr_debug("scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
- x0, desc->arginfo, desc->args[0], desc->args[1],
- desc->args[2], desc->x5);
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5);
ret = scm_call_qcpe(x0, desc);
if (ret < 0)
pr_err("scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
- x0, desc->arginfo, desc->args[0], desc->args[1],
- desc->args[2], desc->x5, ret, desc->ret[0],
- desc->ret[1], desc->ret[2]);
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5, ret, desc->ret[0],
+ desc->ret[1], desc->ret[2]);
if (arglen > N_REGISTER_ARGS)
kfree(desc->extra_arg_buf);
@@ -703,10 +921,10 @@ int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
return -ENOMEM;
ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
- resp_len, cmd, len);
+ resp_len, cmd, len);
if (unlikely(ret == SCM_EBUSY))
ret = _scm_call_retry(svc_id, cmd_id, cmd_buf, cmd_len,
- resp_buf, resp_len, cmd, PAGE_ALIGN(len));
+ resp_buf, resp_len, cmd, PAGE_ALIGN(len));
kfree(cmd);
return ret;
}
@@ -715,9 +933,9 @@ EXPORT_SYMBOL(scm_call);
#define SCM_CLASS_REGISTER (0x2 << 8)
#define SCM_MASK_IRQS BIT(5)
#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
- SCM_CLASS_REGISTER | \
- SCM_MASK_IRQS | \
- (n & 0xf))
+ SCM_CLASS_REGISTER | \
+ SCM_MASK_IRQS | \
+ (n & 0xf))
/**
* scm_call_atomic1() - Send an atomic SCM command with one argument
@@ -914,7 +1132,7 @@ EXPORT_SYMBOL(scm_call_atomic4_3);
* uninterruptable, atomic and SMP safe.
*/
s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
- u32 arg3, u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3)
+ u32 arg3, u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3)
{
int ret;
int context_id;
@@ -991,8 +1209,8 @@ EXPORT_SYMBOL(scm_get_version);
u32 scm_io_read(phys_addr_t address)
{
struct scm_desc desc = {
- .args[0] = address,
- .arginfo = SCM_ARGS(1),
+ .args[0] = address,
+ .arginfo = SCM_ARGS(1),
};
if (!is_scm_armv8())
@@ -1011,12 +1229,12 @@ int scm_io_write(phys_addr_t address, u32 val)
ret = scm_call_atomic2(SCM_SVC_IO, SCM_IO_WRITE, address, val);
else {
struct scm_desc desc = {
- .args[0] = address,
- .args[1] = val,
- .arginfo = SCM_ARGS(2),
+ .args[0] = address,
+ .args[1] = val,
+ .arginfo = SCM_ARGS(2),
};
ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_WRITE),
- &desc);
+ &desc);
}
return ret;
}
@@ -1032,7 +1250,7 @@ int scm_is_call_available(u32 svc_id, u32 cmd_id)
u32 svc_cmd = (svc_id << 10) | cmd_id;
ret = scm_call(SCM_SVC_INFO, IS_CALL_AVAIL_CMD, &svc_cmd,
- sizeof(svc_cmd), &ret_val, sizeof(ret_val));
+ sizeof(svc_cmd), &ret_val, sizeof(ret_val));
if (!ret && ret_val)
return 1;
else
@@ -1130,7 +1348,7 @@ bool scm_is_secure_device(void)
desc.arginfo = 0;
if (!is_scm_armv8()) {
ret = scm_call(SCM_SVC_INFO, TZ_INFO_GET_SECURE_STATE, NULL,
- 0, &resp, sizeof(resp));
+ 0, &resp, sizeof(resp));
} else {
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO,
TZ_INFO_GET_SECURE_STATE),
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index d3130cfd6433..4e4b39c26e89 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -691,19 +691,26 @@ static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
type.type |= bit_mask_irq;
if (flow_type & IRQF_TRIGGER_RISING)
- type.polarity_high |= bit_mask_irq;
+ type.polarity_high |= bit_mask_irq;
+ else
+ type.polarity_high &= ~bit_mask_irq;
if (flow_type & IRQF_TRIGGER_FALLING)
- type.polarity_low |= bit_mask_irq;
+ type.polarity_low |= bit_mask_irq;
+ else
+ type.polarity_low &= ~bit_mask_irq;
} else {
if ((flow_type & (IRQF_TRIGGER_HIGH)) &&
(flow_type & (IRQF_TRIGGER_LOW)))
return -EINVAL;
type.type &= ~bit_mask_irq; /* level trig */
- if (flow_type & IRQF_TRIGGER_HIGH)
- type.polarity_high |= bit_mask_irq;
- else
- type.polarity_low |= bit_mask_irq;
+ if (flow_type & IRQF_TRIGGER_HIGH) {
+ type.polarity_high |= bit_mask_irq;
+ type.polarity_low &= ~bit_mask_irq;
+ } else {
+ type.polarity_low |= bit_mask_irq;
+ type.polarity_high &= ~bit_mask_irq;
+ }
}
qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 26629b856f91..6c4445863705 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -90,6 +90,15 @@ config ONESHOT_SYNC_USER
help
Provide a userspace API for creating oneshot sync objects.
+config ANDROID_VSOC
+ tristate "Android Virtual SoC support"
+ default n
+ depends on PCI_MSI
+ ---help---
+ This option adds support for the Virtual SoC driver needed to boot
+ a 'cuttlefish' Android image inside QEmu. The driver interacts with
+ a QEmu ivshmem device. If built as a module, it will be called vsoc.
+
source "drivers/staging/android/ion/Kconfig"
source "drivers/staging/android/fiq_debugger/Kconfig"
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index b0b47ae4c0ea..8ef816152020 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
obj-$(CONFIG_SYNC) += sync.o sync_debug.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o
obj-$(CONFIG_ONESHOT_SYNC) += oneshot_sync.o
+obj-$(CONFIG_ANDROID_VSOC) += vsoc.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 8f3ac37bfe12..0c32c00fa700 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -25,5 +25,15 @@ ion/
exposes existing cma regions and doesn't reserve unecessarily memory when
booting a system which doesn't use ion.
+vsoc.c, uapi/vsoc_shm.h
+ - The current driver uses the same wait queue for all of the futexes in a
+ region. This will cause false wakeups in regions with a large number of
+ waiting threads. We should eventually use multiple queues and select the
+ queue based on the region.
+ - Add debugfs support for examining the permissions of regions.
+ - Use ioremap_wc instead of ioremap_nocache.
+ - Remove VSOC_WAIT_FOR_INCOMING_INTERRUPT ioctl. This functionality has been
+ superseded by the futex and is there for legacy reasons.
+
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index f5a81fc48ffb..5ed1ed37fad8 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1031,7 +1031,6 @@ void ion_client_destroy(struct ion_client *client)
struct ion_device *dev = client->dev;
struct rb_node *n;
- pr_debug("%s: %d\n", __func__, __LINE__);
mutex_lock(&debugfs_mutex);
while ((n = rb_first(&client->handles))) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
@@ -1239,9 +1238,6 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
int i;
- pr_debug("%s: syncing for device %s\n", __func__,
- dev ? dev_name(dev) : "null");
-
if (!ion_buffer_fault_user_mappings(buffer))
return;
@@ -1295,7 +1291,6 @@ static void ion_vm_open(struct vm_area_struct *vma)
mutex_lock(&buffer->lock);
list_add(&vma_list->list, &buffer->vmas);
mutex_unlock(&buffer->lock);
- pr_debug("%s: adding %pK\n", __func__, vma);
}
static void ion_vm_close(struct vm_area_struct *vma)
@@ -1303,14 +1298,12 @@ static void ion_vm_close(struct vm_area_struct *vma)
struct ion_buffer *buffer = vma->vm_private_data;
struct ion_vma_list *vma_list, *tmp;
- pr_debug("%s\n", __func__);
mutex_lock(&buffer->lock);
list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
if (vma_list->vma != vma)
continue;
list_del(&vma_list->list);
kfree(vma_list);
- pr_debug("%s: deleting %pK\n", __func__, vma);
break;
}
mutex_unlock(&buffer->lock);
@@ -1717,7 +1710,6 @@ static int ion_release(struct inode *inode, struct file *file)
{
struct ion_client *client = file->private_data;
- pr_debug("%s: %d\n", __func__, __LINE__);
ion_client_destroy(client);
return 0;
}
@@ -1729,7 +1721,6 @@ static int ion_open(struct inode *inode, struct file *file)
struct ion_client *client;
char debug_name[64];
- pr_debug("%s: %d\n", __func__, __LINE__);
snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
client = ion_client_create(dev, debug_name);
if (IS_ERR(client))
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index d932db4f9810..77bc25dfd562 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -65,8 +65,6 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
struct device *dev = heap->priv;
struct ion_cma_buffer_info *info;
- dev_dbg(dev, "Request buffer allocation len %ld\n", len);
-
info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
if (!info)
return ION_CMA_ALLOCATE_FAILED;
@@ -94,7 +92,6 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
/* keep this for memory release */
buffer->priv_virt = info;
- dev_dbg(dev, "Allocate buffer %pK\n", buffer);
return 0;
err:
@@ -107,7 +104,6 @@ static void ion_cma_free(struct ion_buffer *buffer)
struct device *dev = buffer->heap->priv;
struct ion_cma_buffer_info *info = buffer->priv_virt;
- dev_dbg(dev, "Release buffer %pK\n", buffer);
/* release memory */
dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
sg_free_table(info->table);
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index c19b87d10df0..0034dfe17ac8 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -64,6 +64,9 @@ static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
list_add_tail(&page->lru, &pool->low_items);
pool->low_count++;
}
+
+ mod_zone_page_state(page_zone(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
+ (1 << (PAGE_SHIFT + pool->order)));
mutex_unlock(&pool->mutex);
return 0;
}
@@ -83,6 +86,8 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
}
list_del(&page->lru);
+ mod_zone_page_state(page_zone(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
+ -(1 << (PAGE_SHIFT + pool->order)));
return page;
}
diff --git a/drivers/staging/android/uapi/vsoc_shm.h b/drivers/staging/android/uapi/vsoc_shm.h
new file mode 100644
index 000000000000..741b1387c25b
--- /dev/null
+++ b/drivers/staging/android/uapi/vsoc_shm.h
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_VSOC_SHM_H
+#define _UAPI_LINUX_VSOC_SHM_H
+
+#include <linux/types.h>
+
+/**
+ * A permission is a token that permits a receiver to read and/or write an area
+ * of memory within a Vsoc region.
+ *
+ * An fd_scoped permission grants both read and write access, and can be
+ * attached to a file description (see open(2)).
+ * Ownership of the area can then be shared by passing a file descriptor
+ * among processes.
+ *
+ * begin_offset and end_offset define the area of memory that is controlled by
+ * the permission. owner_offset points to a word, also in shared memory, that
+ * controls ownership of the area.
+ *
+ * ownership of the region expires when the associated file description is
+ * released.
+ *
+ * At most one permission can be attached to each file description.
+ *
+ * This is useful when implementing HALs like gralloc that scope and pass
+ * ownership of shared resources via file descriptors.
+ *
+ * The caller is responsibe for doing any fencing.
+ *
+ * The calling process will normally identify a currently free area of
+ * memory. It will construct a proposed fd_scoped_permission_arg structure:
+ *
+ * begin_offset and end_offset describe the area being claimed
+ *
+ * owner_offset points to the location in shared memory that indicates the
+ * owner of the area.
+ *
+ * owned_value is the value that will be stored in owner_offset iff the
+ * permission can be granted. It must be different than VSOC_REGION_FREE.
+ *
+ * Two fd_scoped_permission structures are compatible if they vary only by
+ * their owned_value fields.
+ *
+ * The driver ensures that, for any group of simultaneous callers proposing
+ * compatible fd_scoped_permissions, it will accept exactly one of the
+ * propopsals. The other callers will get a failure with errno of EAGAIN.
+ *
+ * A process receiving a file descriptor can identify the region being
+ * granted using the VSOC_GET_FD_SCOPED_PERMISSION ioctl.
+ */
+struct fd_scoped_permission {
+ __u32 begin_offset;
+ __u32 end_offset;
+ __u32 owner_offset;
+ __u32 owned_value;
+};
+
+/*
+ * This value represents a free area of memory. The driver expects to see this
+ * value at owner_offset when creating a permission otherwise it will not do it,
+ * and will write this value back once the permission is no longer needed.
+ */
+#define VSOC_REGION_FREE ((__u32)0)
+
+/**
+ * ioctl argument for VSOC_CREATE_FD_SCOPE_PERMISSION
+ */
+struct fd_scoped_permission_arg {
+ struct fd_scoped_permission perm;
+ __s32 managed_region_fd;
+};
+
+#define VSOC_NODE_FREE ((__u32)0)
+
+/*
+ * Describes a signal table in shared memory. Each non-zero entry in the
+ * table indicates that the receiver should signal the futex at the given
+ * offset. Offsets are relative to the region, not the shared memory window.
+ *
+ * interrupt_signalled_offset is used to reliably signal interrupts across the
+ * vmm boundary. There are two roles: transmitter and receiver. For example,
+ * in the host_to_guest_signal_table the host is the transmitter and the
+ * guest is the receiver. The protocol is as follows:
+ *
+ * 1. The transmitter should convert the offset of the futex to an offset
+ * in the signal table [0, (1 << num_nodes_lg2))
+ * The transmitter can choose any appropriate hashing algorithm, including
+ * hash = futex_offset & ((1 << num_nodes_lg2) - 1)
+ *
+ * 3. The transmitter should atomically compare and swap futex_offset with 0
+ * at hash. There are 3 possible outcomes
+ * a. The swap fails because the futex_offset is already in the table.
+ * The transmitter should stop.
+ * b. Some other offset is in the table. This is a hash collision. The
+ * transmitter should move to another table slot and try again. One
+ * possible algorithm:
+ * hash = (hash + 1) & ((1 << num_nodes_lg2) - 1)
+ * c. The swap worked. Continue below.
+ *
+ * 3. The transmitter atomically swaps 1 with the value at the
+ * interrupt_signalled_offset. There are two outcomes:
+ * a. The prior value was 1. In this case an interrupt has already been
+ * posted. The transmitter is done.
+ * b. The prior value was 0, indicating that the receiver may be sleeping.
+ * The transmitter will issue an interrupt.
+ *
+ * 4. On waking the receiver immediately exchanges a 0 with the
+ * interrupt_signalled_offset. If it receives a 0 then this a spurious
+ * interrupt. That may occasionally happen in the current protocol, but
+ * should be rare.
+ *
+ * 5. The receiver scans the signal table by atomicaly exchanging 0 at each
+ * location. If a non-zero offset is returned from the exchange the
+ * receiver wakes all sleepers at the given offset:
+ * futex((int*)(region_base + old_value), FUTEX_WAKE, MAX_INT);
+ *
+ * 6. The receiver thread then does a conditional wait, waking immediately
+ * if the value at interrupt_signalled_offset is non-zero. This catches cases
+ * here additional signals were posted while the table was being scanned.
+ * On the guest the wait is handled via the VSOC_WAIT_FOR_INCOMING_INTERRUPT
+ * ioctl.
+ */
+struct vsoc_signal_table_layout {
+ /* log_2(Number of signal table entries) */
+ __u32 num_nodes_lg2;
+ /*
+ * Offset to the first signal table entry relative to the start of the
+ * region
+ */
+ __u32 futex_uaddr_table_offset;
+ /*
+ * Offset to an atomic_t / atomic uint32_t. A non-zero value indicates
+ * that one or more offsets are currently posted in the table.
+ * semi-unique access to an entry in the table
+ */
+ __u32 interrupt_signalled_offset;
+};
+
+#define VSOC_REGION_WHOLE ((__s32)0)
+#define VSOC_DEVICE_NAME_SZ 16
+
+/**
+ * Each HAL would (usually) talk to a single device region
+ * Mulitple entities care about these regions:
+ * - The ivshmem_server will populate the regions in shared memory
+ * - The guest kernel will read the region, create minor device nodes, and
+ * allow interested parties to register for FUTEX_WAKE events in the region
+ * - HALs will access via the minor device nodes published by the guest kernel
+ * - Host side processes will access the region via the ivshmem_server:
+ * 1. Pass name to ivshmem_server at a UNIX socket
+ * 2. ivshmemserver will reply with 2 fds:
+ * - host->guest doorbell fd
+ * - guest->host doorbell fd
+ * - fd for the shared memory region
+ * - region offset
+ * 3. Start a futex receiver thread on the doorbell fd pointed at the
+ * signal_nodes
+ */
+struct vsoc_device_region {
+ __u16 current_version;
+ __u16 min_compatible_version;
+ __u32 region_begin_offset;
+ __u32 region_end_offset;
+ __u32 offset_of_region_data;
+ struct vsoc_signal_table_layout guest_to_host_signal_table;
+ struct vsoc_signal_table_layout host_to_guest_signal_table;
+ /* Name of the device. Must always be terminated with a '\0', so
+ * the longest supported device name is 15 characters.
+ */
+ char device_name[VSOC_DEVICE_NAME_SZ];
+ /* There are two ways that permissions to access regions are handled:
+ * - When subdivided_by is VSOC_REGION_WHOLE, any process that can
+ * open the device node for the region gains complete access to it.
+ * - When subdivided is set processes that open the region cannot
+ * access it. Access to a sub-region must be established by invoking
+ * the VSOC_CREATE_FD_SCOPE_PERMISSION ioctl on the region
+ * referenced in subdivided_by, providing a fileinstance
+ * (represented by a fd) opened on this region.
+ */
+ __u32 managed_by;
+};
+
+/*
+ * The vsoc layout descriptor.
+ * The first 4K should be reserved for the shm header and region descriptors.
+ * The regions should be page aligned.
+ */
+
+struct vsoc_shm_layout_descriptor {
+ __u16 major_version;
+ __u16 minor_version;
+
+ /* size of the shm. This may be redundant but nice to have */
+ __u32 size;
+
+ /* number of shared memory regions */
+ __u32 region_count;
+
+ /* The offset to the start of region descriptors */
+ __u32 vsoc_region_desc_offset;
+};
+
+/*
+ * This specifies the current version that should be stored in
+ * vsoc_shm_layout_descriptor.major_version and
+ * vsoc_shm_layout_descriptor.minor_version.
+ * It should be updated only if the vsoc_device_region and
+ * vsoc_shm_layout_descriptor structures have changed.
+ * Versioning within each region is transferred
+ * via the min_compatible_version and current_version fields in
+ * vsoc_device_region. The driver does not consult these fields: they are left
+ * for the HALs and host processes and will change independently of the layout
+ * version.
+ */
+#define CURRENT_VSOC_LAYOUT_MAJOR_VERSION 2
+#define CURRENT_VSOC_LAYOUT_MINOR_VERSION 0
+
+#define VSOC_CREATE_FD_SCOPED_PERMISSION \
+ _IOW(0xF5, 0, struct fd_scoped_permission)
+#define VSOC_GET_FD_SCOPED_PERMISSION _IOR(0xF5, 1, struct fd_scoped_permission)
+
+/*
+ * This is used to signal the host to scan the guest_to_host_signal_table
+ * for new futexes to wake. This sends an interrupt if one is not already
+ * in flight.
+ */
+#define VSOC_MAYBE_SEND_INTERRUPT_TO_HOST _IO(0xF5, 2)
+
+/*
+ * When this returns the guest will scan host_to_guest_signal_table to
+ * check for new futexes to wake.
+ */
+/* TODO(ghartman): Consider moving this to the bottom half */
+#define VSOC_WAIT_FOR_INCOMING_INTERRUPT _IO(0xF5, 3)
+
+/*
+ * Guest HALs will use this to retrieve the region description after
+ * opening their device node.
+ */
+#define VSOC_DESCRIBE_REGION _IOR(0xF5, 4, struct vsoc_device_region)
+
+/*
+ * Wake any threads that may be waiting for a host interrupt on this region.
+ * This is mostly used during shutdown.
+ */
+#define VSOC_SELF_INTERRUPT _IO(0xF5, 5)
+
+/*
+ * This is used to signal the host to scan the guest_to_host_signal_table
+ * for new futexes to wake. This sends an interrupt unconditionally.
+ */
+#define VSOC_SEND_INTERRUPT_TO_HOST _IO(0xF5, 6)
+
+enum wait_types {
+ VSOC_WAIT_UNDEFINED = 0,
+ VSOC_WAIT_IF_EQUAL = 1,
+ VSOC_WAIT_IF_EQUAL_TIMEOUT = 2
+};
+
+/*
+ * Wait for a condition to be true
+ *
+ * Note, this is sized and aligned so the 32 bit and 64 bit layouts are
+ * identical.
+ */
+struct vsoc_cond_wait {
+ /* Input: Offset of the 32 bit word to check */
+ __u32 offset;
+ /* Input: Value that will be compared with the offset */
+ __u32 value;
+ /* Monotonic time to wake at in seconds */
+ __u64 wake_time_sec;
+ /* Input: Monotonic time to wait in nanoseconds */
+ __u32 wake_time_nsec;
+ /* Input: Type of wait */
+ __u32 wait_type;
+ /* Output: Number of times the thread woke before returning. */
+ __u32 wakes;
+ /* Ensure that we're 8-byte aligned and 8 byte length for 32/64 bit
+ * compatibility.
+ */
+ __u32 reserved_1;
+};
+
+#define VSOC_COND_WAIT _IOWR(0xF5, 7, struct vsoc_cond_wait)
+
+/* Wake any local threads waiting at the offset given in arg */
+#define VSOC_COND_WAKE _IO(0xF5, 8)
+
+#endif /* _UAPI_LINUX_VSOC_SHM_H */
diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c
new file mode 100644
index 000000000000..587c66d709b9
--- /dev/null
+++ b/drivers/staging/android/vsoc.c
@@ -0,0 +1,1169 @@
+/*
+ * drivers/android/staging/vsoc.c
+ *
+ * Android Virtual System on a Chip (VSoC) driver
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * Author: ghartman@google.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Based on drivers/char/kvm_ivshmem.c - driver for KVM Inter-VM shared memory
+ * Copyright 2009 Cam Macdonell <cam@cs.ualberta.ca>
+ *
+ * Based on cirrusfb.c and 8139cp.c:
+ * Copyright 1999-2001 Jeff Garzik
+ * Copyright 2001-2004 Jeff Garzik
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/freezer.h>
+#include <linux/futex.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/cdev.h>
+#include <linux/file.h>
+#include "uapi/vsoc_shm.h"
+
+#define VSOC_DEV_NAME "vsoc"
+
+/*
+ * Description of the ivshmem-doorbell PCI device used by QEmu. These
+ * constants follow docs/specs/ivshmem-spec.txt, which can be found in
+ * the QEmu repository. This was last reconciled with the version that
+ * came out with 2.8
+ */
+
+/*
+ * These constants are determined KVM Inter-VM shared memory device
+ * register offsets
+ */
+enum {
+ INTR_MASK = 0x00, /* Interrupt Mask */
+ INTR_STATUS = 0x04, /* Interrupt Status */
+ IV_POSITION = 0x08, /* VM ID */
+ DOORBELL = 0x0c, /* Doorbell */
+};
+
+static const int REGISTER_BAR; /* Equal to 0 */
+static const int MAX_REGISTER_BAR_LEN = 0x100;
+/*
+ * The MSI-x BAR is not used directly.
+ *
+ * static const int MSI_X_BAR = 1;
+ */
+static const int SHARED_MEMORY_BAR = 2;
+
+struct vsoc_region_data {
+ char name[VSOC_DEVICE_NAME_SZ + 1];
+ wait_queue_head_t interrupt_wait_queue;
+ /* TODO(b/73664181): Use multiple futex wait queues */
+ wait_queue_head_t futex_wait_queue;
+ /* Flag indicating that an interrupt has been signalled by the host. */
+ atomic_t *incoming_signalled;
+ /* Flag indicating the guest has signalled the host. */
+ atomic_t *outgoing_signalled;
+ int irq_requested;
+ int device_created;
+};
+
+struct vsoc_device {
+ /* Kernel virtual address of REGISTER_BAR. */
+ void __iomem *regs;
+ /* Physical address of SHARED_MEMORY_BAR. */
+ phys_addr_t shm_phys_start;
+ /* Kernel virtual address of SHARED_MEMORY_BAR. */
+ void *kernel_mapped_shm;
+ /* Size of the entire shared memory window in bytes. */
+ size_t shm_size;
+ /*
+ * Pointer to the virtual address of the shared memory layout structure.
+ * This is probably identical to kernel_mapped_shm, but saving this
+ * here saves a lot of annoying casts.
+ */
+ struct vsoc_shm_layout_descriptor *layout;
+ /*
+ * Points to a table of region descriptors in the kernel's virtual
+ * address space. Calculated from
+ * vsoc_shm_layout_descriptor.vsoc_region_desc_offset
+ */
+ struct vsoc_device_region *regions;
+ /* Head of a list of permissions that have been granted. */
+ struct list_head permissions;
+ struct pci_dev *dev;
+ /* Per-region (and therefore per-interrupt) information. */
+ struct vsoc_region_data *regions_data;
+ /*
+ * Table of msi-x entries. This has to be separated from struct
+ * vsoc_region_data because the kernel deals with them as an array.
+ */
+ struct msix_entry *msix_entries;
+ /*
+ * Flags that indicate what we've initialzied. These are used to do an
+ * orderly cleanup of the device.
+ */
+ char enabled_device;
+ char requested_regions;
+ char cdev_added;
+ char class_added;
+ char msix_enabled;
+ /* Mutex that protectes the permission list */
+ struct mutex mtx;
+ /* Major number assigned by the kernel */
+ int major;
+
+ struct cdev cdev;
+ struct class *class;
+};
+
+static struct vsoc_device vsoc_dev;
+
+/*
+ * TODO(ghartman): Add a /sys filesystem entry that summarizes the permissions.
+ */
+
+struct fd_scoped_permission_node {
+ struct fd_scoped_permission permission;
+ struct list_head list;
+};
+
+struct vsoc_private_data {
+ struct fd_scoped_permission_node *fd_scoped_permission_node;
+};
+
+static long vsoc_ioctl(struct file *, unsigned int, unsigned long);
+static int vsoc_mmap(struct file *, struct vm_area_struct *);
+static int vsoc_open(struct inode *, struct file *);
+static int vsoc_release(struct inode *, struct file *);
+static ssize_t vsoc_read(struct file *, char *, size_t, loff_t *);
+static ssize_t vsoc_write(struct file *, const char *, size_t, loff_t *);
+static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin);
+static int do_create_fd_scoped_permission(
+ struct vsoc_device_region *region_p,
+ struct fd_scoped_permission_node *np,
+ struct fd_scoped_permission_arg *__user arg);
+static void do_destroy_fd_scoped_permission(
+ struct vsoc_device_region *owner_region_p,
+ struct fd_scoped_permission *perm);
+static long do_vsoc_describe_region(struct file *,
+ struct vsoc_device_region __user *);
+static ssize_t vsoc_get_area(struct file *filp, __u32 *perm_off);
+
+/**
+ * Validate arguments on entry points to the driver.
+ */
+inline int vsoc_validate_inode(struct inode *inode)
+{
+ if (iminor(inode) >= vsoc_dev.layout->region_count) {
+ dev_err(&vsoc_dev.dev->dev,
+ "describe_region: invalid region %d\n", iminor(inode));
+ return -ENODEV;
+ }
+ return 0;
+}
+
+inline int vsoc_validate_filep(struct file *filp)
+{
+ int ret = vsoc_validate_inode(file_inode(filp));
+
+ if (ret)
+ return ret;
+ if (!filp->private_data) {
+ dev_err(&vsoc_dev.dev->dev,
+ "No private data on fd, region %d\n",
+ iminor(file_inode(filp)));
+ return -EBADFD;
+ }
+ return 0;
+}
+
+/* Converts from shared memory offset to virtual address */
+static inline void *shm_off_to_virtual_addr(__u32 offset)
+{
+ return vsoc_dev.kernel_mapped_shm + offset;
+}
+
+/* Converts from shared memory offset to physical address */
+static inline phys_addr_t shm_off_to_phys_addr(__u32 offset)
+{
+ return vsoc_dev.shm_phys_start + offset;
+}
+
+/**
+ * Convenience functions to obtain the region from the inode or file.
+ * Dangerous to call before validating the inode/file.
+ */
+static inline struct vsoc_device_region *vsoc_region_from_inode(
+ struct inode *inode)
+{
+ return &vsoc_dev.regions[iminor(inode)];
+}
+
+static inline struct vsoc_device_region *vsoc_region_from_filep(
+ struct file *inode)
+{
+ return vsoc_region_from_inode(file_inode(inode));
+}
+
+static inline uint32_t vsoc_device_region_size(struct vsoc_device_region *r)
+{
+ return r->region_end_offset - r->region_begin_offset;
+}
+
+static const struct file_operations vsoc_ops = {
+ .owner = THIS_MODULE,
+ .open = vsoc_open,
+ .mmap = vsoc_mmap,
+ .read = vsoc_read,
+ .unlocked_ioctl = vsoc_ioctl,
+ .compat_ioctl = vsoc_ioctl,
+ .write = vsoc_write,
+ .llseek = vsoc_lseek,
+ .release = vsoc_release,
+};
+
+static struct pci_device_id vsoc_id_table[] = {
+ {0x1af4, 0x1110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0},
+};
+
+MODULE_DEVICE_TABLE(pci, vsoc_id_table);
+
+static void vsoc_remove_device(struct pci_dev *pdev);
+static int vsoc_probe_device(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static struct pci_driver vsoc_pci_driver = {
+ .name = "vsoc",
+ .id_table = vsoc_id_table,
+ .probe = vsoc_probe_device,
+ .remove = vsoc_remove_device,
+};
+
+static int do_create_fd_scoped_permission(
+ struct vsoc_device_region *region_p,
+ struct fd_scoped_permission_node *np,
+ struct fd_scoped_permission_arg *__user arg)
+{
+ struct file *managed_filp;
+ s32 managed_fd;
+ atomic_t *owner_ptr = NULL;
+ struct vsoc_device_region *managed_region_p;
+
+ if (copy_from_user(&np->permission, &arg->perm, sizeof(*np)) ||
+ copy_from_user(&managed_fd,
+ &arg->managed_region_fd, sizeof(managed_fd))) {
+ return -EFAULT;
+ }
+ managed_filp = fdget(managed_fd).file;
+ /* Check that it's a valid fd, */
+ if (!managed_filp || vsoc_validate_filep(managed_filp))
+ return -EPERM;
+ /* EEXIST if the given fd already has a permission. */
+ if (((struct vsoc_private_data *)managed_filp->private_data)->
+ fd_scoped_permission_node)
+ return -EEXIST;
+ managed_region_p = vsoc_region_from_filep(managed_filp);
+ /* Check that the provided region is managed by this one */
+ if (&vsoc_dev.regions[managed_region_p->managed_by] != region_p)
+ return -EPERM;
+ /* The area must be well formed and have non-zero size */
+ if (np->permission.begin_offset >= np->permission.end_offset)
+ return -EINVAL;
+ /* The area must fit in the memory window */
+ if (np->permission.end_offset >
+ vsoc_device_region_size(managed_region_p))
+ return -ERANGE;
+ /* The area must be in the region data section */
+ if (np->permission.begin_offset <
+ managed_region_p->offset_of_region_data)
+ return -ERANGE;
+ /* The area must be page aligned */
+ if (!PAGE_ALIGNED(np->permission.begin_offset) ||
+ !PAGE_ALIGNED(np->permission.end_offset))
+ return -EINVAL;
+ /* Owner offset must be naturally aligned in the window */
+ if (np->permission.owner_offset &
+ (sizeof(np->permission.owner_offset) - 1))
+ return -EINVAL;
+ /* The owner flag must reside in the owner memory */
+ if (np->permission.owner_offset + sizeof(np->permission.owner_offset) >
+ vsoc_device_region_size(region_p))
+ return -ERANGE;
+ /* The owner flag must reside in the data section */
+ if (np->permission.owner_offset < region_p->offset_of_region_data)
+ return -EINVAL;
+ /* The owner value must change to claim the memory */
+ if (np->permission.owned_value == VSOC_REGION_FREE)
+ return -EINVAL;
+ owner_ptr =
+ (atomic_t *)shm_off_to_virtual_addr(region_p->region_begin_offset +
+ np->permission.owner_offset);
+ /* We've already verified that this is in the shared memory window, so
+ * it should be safe to write to this address.
+ */
+ if (atomic_cmpxchg(owner_ptr,
+ VSOC_REGION_FREE,
+ np->permission.owned_value) != VSOC_REGION_FREE) {
+ return -EBUSY;
+ }
+ ((struct vsoc_private_data *)managed_filp->private_data)->
+ fd_scoped_permission_node = np;
+ /* The file offset needs to be adjusted if the calling
+ * process did any read/write operations on the fd
+ * before creating the permission.
+ */
+ if (managed_filp->f_pos) {
+ if (managed_filp->f_pos > np->permission.end_offset) {
+ /* If the offset is beyond the permission end, set it
+ * to the end.
+ */
+ managed_filp->f_pos = np->permission.end_offset;
+ } else {
+ /* If the offset is within the permission interval
+ * keep it there otherwise reset it to zero.
+ */
+ if (managed_filp->f_pos < np->permission.begin_offset) {
+ managed_filp->f_pos = 0;
+ } else {
+ managed_filp->f_pos -=
+ np->permission.begin_offset;
+ }
+ }
+ }
+ return 0;
+}
+
+static void do_destroy_fd_scoped_permission_node(
+ struct vsoc_device_region *owner_region_p,
+ struct fd_scoped_permission_node *node)
+{
+ if (node) {
+ do_destroy_fd_scoped_permission(owner_region_p,
+ &node->permission);
+ mutex_lock(&vsoc_dev.mtx);
+ list_del(&node->list);
+ mutex_unlock(&vsoc_dev.mtx);
+ kfree(node);
+ }
+}
+
+static void do_destroy_fd_scoped_permission(
+ struct vsoc_device_region *owner_region_p,
+ struct fd_scoped_permission *perm)
+{
+ atomic_t *owner_ptr = NULL;
+ int prev = 0;
+
+ if (!perm)
+ return;
+ owner_ptr = (atomic_t *)shm_off_to_virtual_addr(
+ owner_region_p->region_begin_offset + perm->owner_offset);
+ prev = atomic_xchg(owner_ptr, VSOC_REGION_FREE);
+ if (prev != perm->owned_value)
+ dev_err(&vsoc_dev.dev->dev,
+ "%x-%x: owner (%s) %x: expected to be %x was %x",
+ perm->begin_offset, perm->end_offset,
+ owner_region_p->device_name, perm->owner_offset,
+ perm->owned_value, prev);
+}
+
+static long do_vsoc_describe_region(struct file *filp,
+ struct vsoc_device_region __user *dest)
+{
+ struct vsoc_device_region *region_p;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ region_p = vsoc_region_from_filep(filp);
+ if (copy_to_user(dest, region_p, sizeof(*region_p)))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * Implements the inner logic of cond_wait. Copies to and from userspace are
+ * done in the helper function below.
+ */
+static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg)
+{
+ DEFINE_WAIT(wait);
+ u32 region_number = iminor(file_inode(filp));
+ struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
+ struct hrtimer_sleeper timeout, *to = NULL;
+ int ret = 0;
+ struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
+ atomic_t *address = NULL;
+ struct timespec ts;
+
+ /* Ensure that the offset is aligned */
+ if (arg->offset & (sizeof(uint32_t) - 1))
+ return -EADDRNOTAVAIL;
+ /* Ensure that the offset is within shared memory */
+ if (((uint64_t)arg->offset) + region_p->region_begin_offset +
+ sizeof(uint32_t) > region_p->region_end_offset)
+ return -E2BIG;
+ address = shm_off_to_virtual_addr(region_p->region_begin_offset +
+ arg->offset);
+
+ /* Ensure that the type of wait is valid */
+ switch (arg->wait_type) {
+ case VSOC_WAIT_IF_EQUAL:
+ break;
+ case VSOC_WAIT_IF_EQUAL_TIMEOUT:
+ to = &timeout;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (to) {
+ /* Copy the user-supplied timesec into the kernel structure.
+ * We do things this way to flatten differences between 32 bit
+ * and 64 bit timespecs.
+ */
+ ts.tv_sec = arg->wake_time_sec;
+ ts.tv_nsec = arg->wake_time_nsec;
+
+ if (!timespec_valid(&ts))
+ return -EINVAL;
+ hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
+ hrtimer_set_expires_range_ns(&to->timer, timespec_to_ktime(ts),
+ current->timer_slack_ns);
+
+ hrtimer_init_sleeper(to, current);
+ }
+
+ while (1) {
+ prepare_to_wait(&data->futex_wait_queue, &wait,
+ TASK_INTERRUPTIBLE);
+ /*
+ * Check the sentinel value after prepare_to_wait. If the value
+ * changes after this check the writer will call signal,
+ * changing the task state from INTERRUPTIBLE to RUNNING. That
+ * will ensure that schedule() will eventually schedule this
+ * task.
+ */
+ if (atomic_read(address) != arg->value) {
+ ret = 0;
+ break;
+ }
+ if (to) {
+ hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
+ if (likely(to->task))
+ freezable_schedule();
+ hrtimer_cancel(&to->timer);
+ if (!to->task) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ } else {
+ freezable_schedule();
+ }
+ /* Count the number of times that we woke up. This is useful
+ * for unit testing.
+ */
+ ++arg->wakes;
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+ }
+ finish_wait(&data->futex_wait_queue, &wait);
+ if (to)
+ destroy_hrtimer_on_stack(&to->timer);
+ return ret;
+}
+
+/**
+ * Handles the details of copying from/to userspace to ensure that the copies
+ * happen on all of the return paths of cond_wait.
+ */
+static int do_vsoc_cond_wait(struct file *filp,
+ struct vsoc_cond_wait __user *untrusted_in)
+{
+ struct vsoc_cond_wait arg;
+ int rval = 0;
+
+ if (copy_from_user(&arg, untrusted_in, sizeof(arg)))
+ return -EFAULT;
+ /* wakes is an out parameter. Initialize it to something sensible. */
+ arg.wakes = 0;
+ rval = handle_vsoc_cond_wait(filp, &arg);
+ if (copy_to_user(untrusted_in, &arg, sizeof(arg)))
+ return -EFAULT;
+ return rval;
+}
+
+static int do_vsoc_cond_wake(struct file *filp, uint32_t offset)
+{
+ struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
+ u32 region_number = iminor(file_inode(filp));
+ struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
+ /* Ensure that the offset is aligned */
+ if (offset & (sizeof(uint32_t) - 1))
+ return -EADDRNOTAVAIL;
+ /* Ensure that the offset is within shared memory */
+ if (((uint64_t)offset) + region_p->region_begin_offset +
+ sizeof(uint32_t) > region_p->region_end_offset)
+ return -E2BIG;
+ /*
+ * TODO(b/73664181): Use multiple futex wait queues.
+ * We need to wake every sleeper when the condition changes. Typically
+ * only a single thread will be waiting on the condition, but there
+ * are exceptions. The worst case is about 10 threads.
+ */
+ wake_up_interruptible_all(&data->futex_wait_queue);
+ return 0;
+}
+
+static long vsoc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int rv = 0;
+ struct vsoc_device_region *region_p;
+ u32 reg_num;
+ struct vsoc_region_data *reg_data;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ region_p = vsoc_region_from_filep(filp);
+ reg_num = iminor(file_inode(filp));
+ reg_data = vsoc_dev.regions_data + reg_num;
+ switch (cmd) {
+ case VSOC_CREATE_FD_SCOPED_PERMISSION:
+ {
+ struct fd_scoped_permission_node *node = NULL;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ /* We can't allocate memory for the permission */
+ if (!node)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&node->list);
+ rv = do_create_fd_scoped_permission(
+ region_p,
+ node,
+ (struct fd_scoped_permission_arg __user *)arg);
+ if (!rv) {
+ mutex_lock(&vsoc_dev.mtx);
+ list_add(&node->list, &vsoc_dev.permissions);
+ mutex_unlock(&vsoc_dev.mtx);
+ } else {
+ kfree(node);
+ return rv;
+ }
+ }
+ break;
+
+ case VSOC_GET_FD_SCOPED_PERMISSION:
+ {
+ struct fd_scoped_permission_node *node =
+ ((struct vsoc_private_data *)filp->private_data)->
+ fd_scoped_permission_node;
+ if (!node)
+ return -ENOENT;
+ if (copy_to_user
+ ((struct fd_scoped_permission __user *)arg,
+ &node->permission, sizeof(node->permission)))
+ return -EFAULT;
+ }
+ break;
+
+ case VSOC_MAYBE_SEND_INTERRUPT_TO_HOST:
+ if (!atomic_xchg(
+ reg_data->outgoing_signalled,
+ 1)) {
+ writel(reg_num, vsoc_dev.regs + DOORBELL);
+ return 0;
+ } else {
+ return -EBUSY;
+ }
+ break;
+
+ case VSOC_SEND_INTERRUPT_TO_HOST:
+ writel(reg_num, vsoc_dev.regs + DOORBELL);
+ return 0;
+
+ case VSOC_WAIT_FOR_INCOMING_INTERRUPT:
+ wait_event_interruptible(
+ reg_data->interrupt_wait_queue,
+ (atomic_read(reg_data->incoming_signalled) != 0));
+ break;
+
+ case VSOC_DESCRIBE_REGION:
+ return do_vsoc_describe_region(
+ filp,
+ (struct vsoc_device_region __user *)arg);
+
+ case VSOC_SELF_INTERRUPT:
+ atomic_set(reg_data->incoming_signalled, 1);
+ wake_up_interruptible(&reg_data->interrupt_wait_queue);
+ break;
+
+ case VSOC_COND_WAIT:
+ return do_vsoc_cond_wait(filp,
+ (struct vsoc_cond_wait __user *)arg);
+ case VSOC_COND_WAKE:
+ return do_vsoc_cond_wake(filp, arg);
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static ssize_t vsoc_read(struct file *filp, char *buffer, size_t len,
+ loff_t *poffset)
+{
+ __u32 area_off;
+ void *area_p;
+ ssize_t area_len;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ area_len = vsoc_get_area(filp, &area_off);
+ area_p = shm_off_to_virtual_addr(area_off);
+ area_p += *poffset;
+ area_len -= *poffset;
+ if (area_len <= 0)
+ return 0;
+ if (area_len < len)
+ len = area_len;
+ if (copy_to_user(buffer, area_p, len))
+ return -EFAULT;
+ *poffset += len;
+ return len;
+}
+
+static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin)
+{
+ ssize_t area_len = 0;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ area_len = vsoc_get_area(filp, NULL);
+ switch (origin) {
+ case SEEK_SET:
+ break;
+
+ case SEEK_CUR:
+ if (offset > 0 && offset + filp->f_pos < 0)
+ return -EOVERFLOW;
+ offset += filp->f_pos;
+ break;
+
+ case SEEK_END:
+ if (offset > 0 && offset + area_len < 0)
+ return -EOVERFLOW;
+ offset += area_len;
+ break;
+
+ case SEEK_DATA:
+ if (offset >= area_len)
+ return -EINVAL;
+ if (offset < 0)
+ offset = 0;
+ break;
+
+ case SEEK_HOLE:
+ /* Next hole is always the end of the region, unless offset is
+ * beyond that
+ */
+ if (offset < area_len)
+ offset = area_len;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (offset < 0 || offset > area_len)
+ return -EINVAL;
+ filp->f_pos = offset;
+
+ return offset;
+}
+
+static ssize_t vsoc_write(struct file *filp, const char *buffer,
+ size_t len, loff_t *poffset)
+{
+ __u32 area_off;
+ void *area_p;
+ ssize_t area_len;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ area_len = vsoc_get_area(filp, &area_off);
+ area_p = shm_off_to_virtual_addr(area_off);
+ area_p += *poffset;
+ area_len -= *poffset;
+ if (area_len <= 0)
+ return 0;
+ if (area_len < len)
+ len = area_len;
+ if (copy_from_user(area_p, buffer, len))
+ return -EFAULT;
+ *poffset += len;
+ return len;
+}
+
+static irqreturn_t vsoc_interrupt(int irq, void *region_data_v)
+{
+ struct vsoc_region_data *region_data =
+ (struct vsoc_region_data *)region_data_v;
+ int reg_num = region_data - vsoc_dev.regions_data;
+
+ if (unlikely(!region_data))
+ return IRQ_NONE;
+
+ if (unlikely(reg_num < 0 ||
+ reg_num >= vsoc_dev.layout->region_count)) {
+ dev_err(&vsoc_dev.dev->dev,
+ "invalid irq @%p reg_num=0x%04x\n",
+ region_data, reg_num);
+ return IRQ_NONE;
+ }
+ if (unlikely(vsoc_dev.regions_data + reg_num != region_data)) {
+ dev_err(&vsoc_dev.dev->dev,
+ "irq not aligned @%p reg_num=0x%04x\n",
+ region_data, reg_num);
+ return IRQ_NONE;
+ }
+ wake_up_interruptible(&region_data->interrupt_wait_queue);
+ return IRQ_HANDLED;
+}
+
+static int vsoc_probe_device(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int result;
+ int i;
+ resource_size_t reg_size;
+ dev_t devt;
+
+ vsoc_dev.dev = pdev;
+ result = pci_enable_device(pdev);
+ if (result) {
+ dev_err(&pdev->dev,
+ "pci_enable_device failed %s: error %d\n",
+ pci_name(pdev), result);
+ return result;
+ }
+ vsoc_dev.enabled_device = 1;
+ result = pci_request_regions(pdev, "vsoc");
+ if (result < 0) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+ vsoc_dev.requested_regions = 1;
+ /* Set up the control registers in BAR 0 */
+ reg_size = pci_resource_len(pdev, REGISTER_BAR);
+ if (reg_size > MAX_REGISTER_BAR_LEN)
+ vsoc_dev.regs =
+ pci_iomap(pdev, REGISTER_BAR, MAX_REGISTER_BAR_LEN);
+ else
+ vsoc_dev.regs = pci_iomap(pdev, REGISTER_BAR, reg_size);
+
+ if (!vsoc_dev.regs) {
+ dev_err(&pdev->dev,
+ "cannot ioremap registers of size %zu\n",
+ (size_t)reg_size);
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+
+ /* Map the shared memory in BAR 2 */
+ vsoc_dev.shm_phys_start = pci_resource_start(pdev, SHARED_MEMORY_BAR);
+ vsoc_dev.shm_size = pci_resource_len(pdev, SHARED_MEMORY_BAR);
+
+ dev_info(&pdev->dev, "shared memory @ DMA %p size=0x%zx\n",
+ (void *)vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
+ /* TODO(ghartman): ioremap_wc should work here */
+ vsoc_dev.kernel_mapped_shm = ioremap_nocache(
+ vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
+ if (!vsoc_dev.kernel_mapped_shm) {
+ dev_err(&vsoc_dev.dev->dev, "cannot iomap region\n");
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+
+ vsoc_dev.layout =
+ (struct vsoc_shm_layout_descriptor *)vsoc_dev.kernel_mapped_shm;
+ dev_info(&pdev->dev, "major_version: %d\n",
+ vsoc_dev.layout->major_version);
+ dev_info(&pdev->dev, "minor_version: %d\n",
+ vsoc_dev.layout->minor_version);
+ dev_info(&pdev->dev, "size: 0x%x\n", vsoc_dev.layout->size);
+ dev_info(&pdev->dev, "regions: %d\n", vsoc_dev.layout->region_count);
+ if (vsoc_dev.layout->major_version !=
+ CURRENT_VSOC_LAYOUT_MAJOR_VERSION) {
+ dev_err(&vsoc_dev.dev->dev,
+ "driver supports only major_version %d\n",
+ CURRENT_VSOC_LAYOUT_MAJOR_VERSION);
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+ result = alloc_chrdev_region(&devt, 0, vsoc_dev.layout->region_count,
+ VSOC_DEV_NAME);
+ if (result) {
+ dev_err(&vsoc_dev.dev->dev, "alloc_chrdev_region failed\n");
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+ vsoc_dev.major = MAJOR(devt);
+ cdev_init(&vsoc_dev.cdev, &vsoc_ops);
+ vsoc_dev.cdev.owner = THIS_MODULE;
+ result = cdev_add(&vsoc_dev.cdev, devt, vsoc_dev.layout->region_count);
+ if (result) {
+ dev_err(&vsoc_dev.dev->dev, "cdev_add error\n");
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+ vsoc_dev.cdev_added = 1;
+ vsoc_dev.class = class_create(THIS_MODULE, VSOC_DEV_NAME);
+ if (IS_ERR(vsoc_dev.class)) {
+ dev_err(&vsoc_dev.dev->dev, "class_create failed\n");
+ vsoc_remove_device(pdev);
+ return PTR_ERR(vsoc_dev.class);
+ }
+ vsoc_dev.class_added = 1;
+ vsoc_dev.regions = (struct vsoc_device_region *)
+ (vsoc_dev.kernel_mapped_shm +
+ vsoc_dev.layout->vsoc_region_desc_offset);
+ vsoc_dev.msix_entries = kcalloc(
+ vsoc_dev.layout->region_count,
+ sizeof(vsoc_dev.msix_entries[0]), GFP_KERNEL);
+ if (!vsoc_dev.msix_entries) {
+ dev_err(&vsoc_dev.dev->dev,
+ "unable to allocate msix_entries\n");
+ vsoc_remove_device(pdev);
+ return -ENOSPC;
+ }
+ vsoc_dev.regions_data = kcalloc(
+ vsoc_dev.layout->region_count,
+ sizeof(vsoc_dev.regions_data[0]), GFP_KERNEL);
+ if (!vsoc_dev.regions_data) {
+ dev_err(&vsoc_dev.dev->dev,
+ "unable to allocate regions' data\n");
+ vsoc_remove_device(pdev);
+ return -ENOSPC;
+ }
+ for (i = 0; i < vsoc_dev.layout->region_count; ++i)
+ vsoc_dev.msix_entries[i].entry = i;
+
+ result = pci_enable_msix_exact(vsoc_dev.dev, vsoc_dev.msix_entries,
+ vsoc_dev.layout->region_count);
+ if (result) {
+ dev_info(&pdev->dev, "pci_enable_msix failed: %d\n", result);
+ vsoc_remove_device(pdev);
+ return -ENOSPC;
+ }
+ /* Check that all regions are well formed */
+ for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
+ const struct vsoc_device_region *region = vsoc_dev.regions + i;
+
+ if (!PAGE_ALIGNED(region->region_begin_offset) ||
+ !PAGE_ALIGNED(region->region_end_offset)) {
+ dev_err(&vsoc_dev.dev->dev,
+ "region %d not aligned (%x:%x)", i,
+ region->region_begin_offset,
+ region->region_end_offset);
+ vsoc_remove_device(pdev);
+ return -EFAULT;
+ }
+ if (region->region_begin_offset >= region->region_end_offset ||
+ region->region_end_offset > vsoc_dev.shm_size) {
+ dev_err(&vsoc_dev.dev->dev,
+ "region %d offsets are wrong: %x %x %zx",
+ i, region->region_begin_offset,
+ region->region_end_offset, vsoc_dev.shm_size);
+ vsoc_remove_device(pdev);
+ return -EFAULT;
+ }
+ if (region->managed_by >= vsoc_dev.layout->region_count) {
+ dev_err(&vsoc_dev.dev->dev,
+ "region %d has invalid owner: %u",
+ i, region->managed_by);
+ vsoc_remove_device(pdev);
+ return -EFAULT;
+ }
+ }
+ vsoc_dev.msix_enabled = 1;
+ for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
+ const struct vsoc_device_region *region = vsoc_dev.regions + i;
+ size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1;
+ const struct vsoc_signal_table_layout *h_to_g_signal_table =
+ &region->host_to_guest_signal_table;
+ const struct vsoc_signal_table_layout *g_to_h_signal_table =
+ &region->guest_to_host_signal_table;
+
+ vsoc_dev.regions_data[i].name[name_sz] = '\0';
+ memcpy(vsoc_dev.regions_data[i].name, region->device_name,
+ name_sz);
+ dev_info(&pdev->dev, "region %d name=%s\n",
+ i, vsoc_dev.regions_data[i].name);
+ init_waitqueue_head(
+ &vsoc_dev.regions_data[i].interrupt_wait_queue);
+ init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue);
+ vsoc_dev.regions_data[i].incoming_signalled =
+ vsoc_dev.kernel_mapped_shm +
+ region->region_begin_offset +
+ h_to_g_signal_table->interrupt_signalled_offset;
+ vsoc_dev.regions_data[i].outgoing_signalled =
+ vsoc_dev.kernel_mapped_shm +
+ region->region_begin_offset +
+ g_to_h_signal_table->interrupt_signalled_offset;
+
+ result = request_irq(
+ vsoc_dev.msix_entries[i].vector,
+ vsoc_interrupt, 0,
+ vsoc_dev.regions_data[i].name,
+ vsoc_dev.regions_data + i);
+ if (result) {
+ dev_info(&pdev->dev,
+ "request_irq failed irq=%d vector=%d\n",
+ i, vsoc_dev.msix_entries[i].vector);
+ vsoc_remove_device(pdev);
+ return -ENOSPC;
+ }
+ vsoc_dev.regions_data[i].irq_requested = 1;
+ if (!device_create(vsoc_dev.class, NULL,
+ MKDEV(vsoc_dev.major, i),
+ NULL, vsoc_dev.regions_data[i].name)) {
+ dev_err(&vsoc_dev.dev->dev, "device_create failed\n");
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+ vsoc_dev.regions_data[i].device_created = 1;
+ }
+ return 0;
+}
+
+/*
+ * This should undo all of the allocations in the probe function in reverse
+ * order.
+ *
+ * Notes:
+ *
+ * The device may have been partially initialized, so double check
+ * that the allocations happened.
+ *
+ * This function may be called multiple times, so mark resources as freed
+ * as they are deallocated.
+ */
+static void vsoc_remove_device(struct pci_dev *pdev)
+{
+ int i;
+ /*
+ * pdev is the first thing to be set on probe and the last thing
+ * to be cleared here. If it's NULL then there is no cleanup.
+ */
+ if (!pdev || !vsoc_dev.dev)
+ return;
+ dev_info(&pdev->dev, "remove_device\n");
+ if (vsoc_dev.regions_data) {
+ for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
+ if (vsoc_dev.regions_data[i].device_created) {
+ device_destroy(vsoc_dev.class,
+ MKDEV(vsoc_dev.major, i));
+ vsoc_dev.regions_data[i].device_created = 0;
+ }
+ if (vsoc_dev.regions_data[i].irq_requested)
+ free_irq(vsoc_dev.msix_entries[i].vector, NULL);
+ vsoc_dev.regions_data[i].irq_requested = 0;
+ }
+ kfree(vsoc_dev.regions_data);
+ vsoc_dev.regions_data = 0;
+ }
+ if (vsoc_dev.msix_enabled) {
+ pci_disable_msix(pdev);
+ vsoc_dev.msix_enabled = 0;
+ }
+ kfree(vsoc_dev.msix_entries);
+ vsoc_dev.msix_entries = 0;
+ vsoc_dev.regions = 0;
+ if (vsoc_dev.class_added) {
+ class_destroy(vsoc_dev.class);
+ vsoc_dev.class_added = 0;
+ }
+ if (vsoc_dev.cdev_added) {
+ cdev_del(&vsoc_dev.cdev);
+ vsoc_dev.cdev_added = 0;
+ }
+ if (vsoc_dev.major && vsoc_dev.layout) {
+ unregister_chrdev_region(MKDEV(vsoc_dev.major, 0),
+ vsoc_dev.layout->region_count);
+ vsoc_dev.major = 0;
+ }
+ vsoc_dev.layout = 0;
+ if (vsoc_dev.kernel_mapped_shm) {
+ pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm);
+ vsoc_dev.kernel_mapped_shm = 0;
+ }
+ if (vsoc_dev.regs) {
+ pci_iounmap(pdev, vsoc_dev.regs);
+ vsoc_dev.regs = 0;
+ }
+ if (vsoc_dev.requested_regions) {
+ pci_release_regions(pdev);
+ vsoc_dev.requested_regions = 0;
+ }
+ if (vsoc_dev.enabled_device) {
+ pci_disable_device(pdev);
+ vsoc_dev.enabled_device = 0;
+ }
+ /* Do this last: it indicates that the device is not initialized. */
+ vsoc_dev.dev = NULL;
+}
+
+static void __exit vsoc_cleanup_module(void)
+{
+ vsoc_remove_device(vsoc_dev.dev);
+ pci_unregister_driver(&vsoc_pci_driver);
+}
+
+static int __init vsoc_init_module(void)
+{
+ int err = -ENOMEM;
+
+ INIT_LIST_HEAD(&vsoc_dev.permissions);
+ mutex_init(&vsoc_dev.mtx);
+
+ err = pci_register_driver(&vsoc_pci_driver);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+static int vsoc_open(struct inode *inode, struct file *filp)
+{
+ /* Can't use vsoc_validate_filep because filp is still incomplete */
+ int ret = vsoc_validate_inode(inode);
+
+ if (ret)
+ return ret;
+ filp->private_data =
+ kzalloc(sizeof(struct vsoc_private_data), GFP_KERNEL);
+ if (!filp->private_data)
+ return -ENOMEM;
+ return 0;
+}
+
+static int vsoc_release(struct inode *inode, struct file *filp)
+{
+ struct vsoc_private_data *private_data = NULL;
+ struct fd_scoped_permission_node *node = NULL;
+ struct vsoc_device_region *owner_region_p = NULL;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ private_data = (struct vsoc_private_data *)filp->private_data;
+ if (!private_data)
+ return 0;
+
+ node = private_data->fd_scoped_permission_node;
+ if (node) {
+ owner_region_p = vsoc_region_from_inode(inode);
+ if (owner_region_p->managed_by != VSOC_REGION_WHOLE) {
+ owner_region_p =
+ &vsoc_dev.regions[owner_region_p->managed_by];
+ }
+ do_destroy_fd_scoped_permission_node(owner_region_p, node);
+ private_data->fd_scoped_permission_node = NULL;
+ }
+ kfree(private_data);
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+/*
+ * Returns the device relative offset and length of the area specified by the
+ * fd scoped permission. If there is no fd scoped permission set, a default
+ * permission covering the entire region is assumed, unless the region is owned
+ * by another one, in which case the default is a permission with zero size.
+ */
+static ssize_t vsoc_get_area(struct file *filp, __u32 *area_offset)
+{
+ __u32 off = 0;
+ ssize_t length = 0;
+ struct vsoc_device_region *region_p;
+ struct fd_scoped_permission *perm;
+
+ region_p = vsoc_region_from_filep(filp);
+ off = region_p->region_begin_offset;
+ perm = &((struct vsoc_private_data *)filp->private_data)->
+ fd_scoped_permission_node->permission;
+ if (perm) {
+ off += perm->begin_offset;
+ length = perm->end_offset - perm->begin_offset;
+ } else if (region_p->managed_by == VSOC_REGION_WHOLE) {
+ /* No permission set and the regions is not owned by another,
+ * default to full region access.
+ */
+ length = vsoc_device_region_size(region_p);
+ } else {
+ /* return zero length, access is denied. */
+ length = 0;
+ }
+ if (area_offset)
+ *area_offset = off;
+ return length;
+}
+
+static int vsoc_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long len = vma->vm_end - vma->vm_start;
+ __u32 area_off;
+ phys_addr_t mem_off;
+ ssize_t area_len;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ area_len = vsoc_get_area(filp, &area_off);
+ /* Add the requested offset */
+ area_off += (vma->vm_pgoff << PAGE_SHIFT);
+ area_len -= (vma->vm_pgoff << PAGE_SHIFT);
+ if (area_len < len)
+ return -EINVAL;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ mem_off = shm_off_to_phys_addr(area_off);
+ if (io_remap_pfn_range(vma, vma->vm_start, mem_off >> PAGE_SHIFT,
+ len, vma->vm_page_prot))
+ return -EAGAIN;
+ return 0;
+}
+
+module_init(vsoc_init_module);
+module_exit(vsoc_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Greg Hartman <ghartman@google.com>");
+MODULE_DESCRIPTION("VSoC interpretation of QEmu's ivshmem device");
+MODULE_VERSION("1.0");
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 013a6240f193..c1ad0aea23b9 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -169,7 +169,7 @@ int prism2mgmt_scan(wlandevice_t *wlandev, void *msgp)
hw->ident_sta_fw.variant) >
HFA384x_FIRMWARE_VERSION(1, 5, 0)) {
if (msg->scantype.data != P80211ENUM_scantype_active)
- word = cpu_to_le16(msg->maxchanneltime.data);
+ word = msg->maxchanneltime.data;
else
word = 0;
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index c5547bd711db..6a8300108148 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -589,6 +589,9 @@ static int imx_thermal_probe(struct platform_device *pdev)
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+ data->irq_enabled = true;
+ data->mode = THERMAL_DEVICE_ENABLED;
+
ret = devm_request_threaded_irq(&pdev->dev, data->irq,
imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
0, "imx_thermal", data);
@@ -600,9 +603,6 @@ static int imx_thermal_probe(struct platform_device *pdev)
return ret;
}
- data->irq_enabled = true;
- data->mode = THERMAL_DEVICE_ENABLED;
-
return 0;
}
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 1246aa6fcab0..737635f0bec0 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -523,6 +523,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
struct thermal_instance *instance;
struct power_allocator_params *params = tz->governor_data;
+ mutex_lock(&tz->lock);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
if ((instance->trip != params->trip_max_desired_temperature) ||
(!cdev_is_power_actor(instance->cdev)))
@@ -532,6 +533,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
instance->cdev->updated = false;
thermal_cdev_update(instance->cdev);
}
+ mutex_unlock(&tz->lock);
}
/**
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 20a41f7de76f..6713fd1958e7 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -627,6 +627,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
* we just disable hotplug, the
* pci-tunnels stay alive.
*/
+ .thaw_noirq = nhi_resume_noirq,
.restore_noirq = nhi_resume_noirq,
};
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index dc5c0e6cdb84..4686e93aaf94 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -87,32 +87,35 @@ static void do_rw_io(struct goldfish_tty *qtty,
}
static void goldfish_tty_rw(struct goldfish_tty *qtty,
- unsigned long addr,
+ const void *address_ptr,
unsigned int count,
int is_write)
{
dma_addr_t dma_handle;
enum dma_data_direction dma_dir;
+ uintptr_t address;
+ address = (uintptr_t)address_ptr;
dma_dir = (is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
if (qtty->version > 0) {
/*
* Goldfish TTY for Ranchu platform uses
* physical addresses and DMA for read/write operations
*/
- unsigned long addr_end = addr + count;
+ uintptr_t address_end = address + count;
- while (addr < addr_end) {
- unsigned long pg_end = (addr & PAGE_MASK) + PAGE_SIZE;
- unsigned long next =
- pg_end < addr_end ? pg_end : addr_end;
- unsigned long avail = next - addr;
+ while (address < address_end) {
+ uintptr_t page_end = (address & PAGE_MASK) + PAGE_SIZE;
+ uintptr_t next = page_end < address_end ?
+ page_end : address_end;
+ uintptr_t avail = next - address;
/*
* Map the buffer's virtual address to the DMA address
* so the buffer can be accessed by the device.
*/
- dma_handle = dma_map_single(qtty->dev, (void *)addr,
+ dma_handle = dma_map_single(qtty->dev, (void *)address,
avail, dma_dir);
if (dma_mapping_error(qtty->dev, dma_handle)) {
@@ -127,31 +130,30 @@ static void goldfish_tty_rw(struct goldfish_tty *qtty,
*/
dma_unmap_single(qtty->dev, dma_handle, avail, dma_dir);
- addr += avail;
+ address += avail;
}
} else {
/*
* Old style Goldfish TTY used on the Goldfish platform
* uses virtual addresses.
*/
- do_rw_io(qtty, addr, count, is_write);
+ do_rw_io(qtty, address, count, is_write);
}
+
}
static void goldfish_tty_do_write(int line, const char *buf,
unsigned int count)
{
struct goldfish_tty *qtty = &goldfish_ttys[line];
- unsigned long address = (unsigned long)(void *)buf;
- goldfish_tty_rw(qtty, address, count, 1);
+ goldfish_tty_rw(qtty, buf, count, 1);
}
static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
{
struct goldfish_tty *qtty = dev_id;
void __iomem *base = qtty->base;
- unsigned long address;
unsigned char *buf;
u32 count;
@@ -160,9 +162,7 @@ static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
return IRQ_NONE;
count = tty_prepare_flip_string(&qtty->port, &buf, count);
-
- address = (unsigned long)(void *)buf;
- goldfish_tty_rw(qtty, address, count, 0);
+ goldfish_tty_rw(qtty, buf, count, 0);
tty_schedule_flip(&qtty->port);
return IRQ_HANDLED;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 9aff37186246..6060c3e8925e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -137,6 +137,9 @@ struct gsm_dlci {
struct mutex mutex;
/* Link layer */
+ int mode;
+#define DLCI_MODE_ABM 0 /* Normal Asynchronous Balanced Mode */
+#define DLCI_MODE_ADM 1 /* Asynchronous Disconnected Mode */
spinlock_t lock; /* Protects the internal state */
struct timer_list t1; /* Retransmit timer for SABM and UA */
int retries;
@@ -1380,7 +1383,13 @@ retry:
ctrl->data = data;
ctrl->len = clen;
gsm->pending_cmd = ctrl;
- gsm->cretries = gsm->n2;
+
+ /* If DLCI0 is in ADM mode skip retries, it won't respond */
+ if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
+ gsm->cretries = 1;
+ else
+ gsm->cretries = gsm->n2;
+
mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
gsm_control_transmit(gsm, ctrl);
spin_unlock_irqrestore(&gsm->control_lock, flags);
@@ -1467,6 +1476,10 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
* in which case an opening port goes back to closed and a closing port
* is simply put into closed state (any further frames from the other
* end will get a DM response)
+ *
+ * Some control dlci can stay in ADM mode with other dlci working just
+ * fine. In that case we can just keep the control dlci open after the
+ * DLCI_OPENING retries time out.
*/
static void gsm_dlci_t1(unsigned long data)
@@ -1480,8 +1493,16 @@ static void gsm_dlci_t1(unsigned long data)
if (dlci->retries) {
gsm_command(dlci->gsm, dlci->addr, SABM|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
- } else
+ } else if (!dlci->addr && gsm->control == (DM | PF)) {
+ if (debug & 8)
+ pr_info("DLCI %d opening in ADM mode.\n",
+ dlci->addr);
+ dlci->mode = DLCI_MODE_ADM;
+ gsm_dlci_open(dlci);
+ } else {
gsm_dlci_close(dlci);
+ }
+
break;
case DLCI_CLOSING:
dlci->retries--;
@@ -1499,8 +1520,8 @@ static void gsm_dlci_t1(unsigned long data)
* @dlci: DLCI to open
*
* Commence opening a DLCI from the Linux side. We issue SABM messages
- * to the modem which should then reply with a UA, at which point we
- * will move into open state. Opening is done asynchronously with retry
+ * to the modem which should then reply with a UA or ADM, at which point
+ * we will move into open state. Opening is done asynchronously with retry
* running off timers and the responses.
*/
@@ -2870,11 +2891,22 @@ static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
static int gsm_carrier_raised(struct tty_port *port)
{
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+ struct gsm_mux *gsm = dlci->gsm;
+
/* Not yet open so no carrier info */
if (dlci->state != DLCI_OPEN)
return 0;
if (debug & 2)
return 1;
+
+ /*
+ * Basic mode with control channel in ADM mode may not respond
+ * to CMD_MSC at all and modem_rx is empty.
+ */
+ if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
+ !dlci->modem_rx)
+ return 1;
+
return dlci->modem_rx & TIOCM_CD;
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 41dda25da049..190e5dc15738 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2238,6 +2238,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
}
if (tty_hung_up_p(file))
break;
+ /*
+ * Abort readers for ttys which never actually
+ * get hung up. See __tty_hangup().
+ */
+ if (test_bit(TTY_HUPPING, &tty->flags))
+ break;
if (!timeout)
break;
if (file->f_flags & O_NONBLOCK) {
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index e8dd296fb25b..c4383573cf66 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -608,6 +608,10 @@ static int omap_8250_startup(struct uart_port *port)
up->lsr_saved_flags = 0;
up->msr_saved_flags = 0;
+ /* Disable DMA for console UART */
+ if (uart_console(port))
+ up->dma = NULL;
+
if (up->dma) {
ret = serial8250_request_dma(up);
if (ret) {
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 5da2f1406546..4d02ff24cffe 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -3,7 +3,7 @@
* MSM 7k High speed uart driver
*
* Copyright (c) 2008 Google Inc.
- * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
* Modified: Nick Pelly <npelly@google.com>
*
* All source code in this file is licensed under the following license
@@ -2656,6 +2656,7 @@ static int msm_hs_startup(struct uart_port *uport)
int ret;
int rfr_level;
unsigned long flags;
+ u32 irq_type;
unsigned int data;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
struct circ_buf *tx_buf = &uport->state->xmit;
@@ -2676,8 +2677,11 @@ static int msm_hs_startup(struct uart_port *uport)
msm_hs_resource_vote(msm_uport);
if (is_use_low_power_wakeup(msm_uport)) {
+ irq_type = irq_get_trigger_type(msm_uport->wakeup.irq);
+ if (irq_type == IRQ_TYPE_NONE)
+ irq_type = IRQ_TYPE_EDGE_FALLING;
ret = request_irq(msm_uport->wakeup.irq, msm_hs_wakeup_isr,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ irq_type | IRQF_ONESHOT,
"msm_hs_wakeup", msm_uport);
if (unlikely(ret)) {
MSM_HS_ERR("%s():Err getting uart wakeup_irq %d\n",
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index fcf803ffad19..cdd2f942317c 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -884,14 +884,19 @@ static int sccnxp_probe(struct platform_device *pdev)
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
- if (PTR_ERR(clk) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
+ ret = PTR_ERR(clk);
+ if (ret == -EPROBE_DEFER)
goto err_out;
- }
+ uartclk = 0;
+ } else {
+ clk_prepare_enable(clk);
+ uartclk = clk_get_rate(clk);
+ }
+
+ if (!uartclk) {
dev_notice(&pdev->dev, "Using default clock frequency\n");
uartclk = s->chip->freq_std;
- } else
- uartclk = clk_get_rate(clk);
+ }
/* Check input frequency */
if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) {
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 3eb57eb532f1..02147361eaa9 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -20,6 +20,7 @@
#include <linux/gpio/consumer.h>
#include <linux/termios.h>
#include <linux/serial_core.h>
+#include <linux/module.h>
#include "serial_mctrl_gpio.h"
@@ -193,6 +194,7 @@ struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
return gpios;
}
+EXPORT_SYMBOL_GPL(mctrl_gpio_init);
void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
{
@@ -247,3 +249,6 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
disable_irq(gpios->irq[i]);
}
}
+EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index fc7711c75b01..8dd822feb972 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1457,7 +1457,16 @@ static void sci_free_dma(struct uart_port *port)
if (s->chan_rx)
sci_rx_dma_release(s, false);
}
-#else
+
+static void sci_flush_buffer(struct uart_port *port)
+{
+ /*
+ * In uart_flush_buffer(), the xmit circular buffer has just been
+ * cleared, so we have to reset tx_dma_len accordingly.
+ */
+ to_sci_port(port)->tx_dma_len = 0;
+}
+#else /* !CONFIG_SERIAL_SH_SCI_DMA */
static inline void sci_request_dma(struct uart_port *port)
{
}
@@ -1465,7 +1474,9 @@ static inline void sci_request_dma(struct uart_port *port)
static inline void sci_free_dma(struct uart_port *port)
{
}
-#endif
+
+#define sci_flush_buffer NULL
+#endif /* !CONFIG_SERIAL_SH_SCI_DMA */
static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
{
@@ -2205,6 +2216,7 @@ static struct uart_ops sci_uart_ops = {
.break_ctl = sci_break_ctl,
.startup = sci_startup,
.shutdown = sci_shutdown,
+ .flush_buffer = sci_flush_buffer,
.set_termios = sci_set_termios,
.pm = sci_pm,
.type = sci_type,
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index a638c1738547..198451fa9e5d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -702,6 +702,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
return;
}
+ /*
+ * Some console devices aren't actually hung up for technical and
+ * historical reasons, which can lead to indefinite interruptible
+ * sleep in n_tty_read(). The following explicitly tells
+ * n_tty_read() to abort readers.
+ */
+ set_bit(TTY_HUPPING, &tty->flags);
+
/* inuse_filps is protected by the single tty lock,
this really needs to change if we want to flush the
workqueue with the lock held */
@@ -757,6 +765,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
* can't yet guarantee all that.
*/
set_bit(TTY_HUPPED, &tty->flags);
+ clear_bit(TTY_HUPPING, &tty->flags);
tty_unlock(tty);
if (f)
@@ -3145,7 +3154,10 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
kref_init(&tty->kref);
tty->magic = TTY_MAGIC;
- tty_ldisc_init(tty);
+ if (tty_ldisc_init(tty)) {
+ kfree(tty);
+ return NULL;
+ }
tty->session = NULL;
tty->pgrp = NULL;
mutex_init(&tty->legacy_mutex);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 9bee25cfa0be..d9e013dc2c08 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -168,12 +168,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
return ERR_CAST(ldops);
}
- ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
- if (ld == NULL) {
- put_ldops(ldops);
- return ERR_PTR(-ENOMEM);
- }
-
+ /*
+ * There is no way to handle allocation failure of only 16 bytes.
+ * Let's simplify error handling and save more memory.
+ */
+ ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
ld->ops = ldops;
ld->tty = tty;
@@ -804,12 +803,13 @@ void tty_ldisc_release(struct tty_struct *tty)
* the tty structure is not completely set up when this call is made.
*/
-void tty_ldisc_init(struct tty_struct *tty)
+int tty_ldisc_init(struct tty_struct *tty)
{
struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
if (IS_ERR(ld))
- panic("n_tty: init_tty");
+ return PTR_ERR(ld);
tty->ldisc = ld;
+ return 0;
}
/**
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 939c6ad71068..57ee43512992 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -851,7 +851,7 @@ static inline void ci_role_destroy(struct ci_hdrc *ci)
{
ci_hdrc_gadget_destroy(ci);
ci_hdrc_host_destroy(ci);
- if (ci->is_otg)
+ if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
ci_hdrc_otg_destroy(ci);
}
@@ -951,27 +951,35 @@ static int ci_hdrc_probe(struct platform_device *pdev)
/* initialize role(s) before the interrupt is requested */
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
ret = ci_hdrc_host_init(ci);
- if (ret)
- dev_info(dev, "doesn't support host\n");
+ if (ret) {
+ if (ret == -ENXIO)
+ dev_info(dev, "doesn't support host\n");
+ else
+ goto deinit_phy;
+ }
}
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
ret = ci_hdrc_gadget_init(ci);
- if (ret)
- dev_info(dev, "doesn't support gadget\n");
+ if (ret) {
+ if (ret == -ENXIO)
+ dev_info(dev, "doesn't support gadget\n");
+ else
+ goto deinit_host;
+ }
}
if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) {
dev_err(dev, "no supported roles\n");
ret = -ENODEV;
- goto deinit_phy;
+ goto deinit_gadget;
}
if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) {
ret = ci_hdrc_otg_init(ci);
if (ret) {
dev_err(dev, "init otg fails, ret = %d\n", ret);
- goto stop;
+ goto deinit_gadget;
}
}
@@ -1036,7 +1044,12 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ci_extcon_unregister(ci);
stop:
- ci_role_destroy(ci);
+ if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
+ ci_hdrc_otg_destroy(ci);
+deinit_gadget:
+ ci_hdrc_gadget_destroy(ci);
+deinit_host:
+ ci_hdrc_host_destroy(ci);
deinit_phy:
ci_usb_phy_exit(ci);
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 72d1109f13eb..619e5446cbe8 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -242,8 +242,13 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg)
if (!udev->parent)
rc = hcd_bus_suspend(udev, msg);
- /* Non-root devices don't need to do anything for FREEZE or PRETHAW */
- else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
+ /*
+ * Non-root USB2 devices don't need to do anything for FREEZE
+ * or PRETHAW. USB3 devices don't support global suspend and
+ * needs to be selectively suspended.
+ */
+ else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
+ && (udev->speed < USB_SPEED_SUPER))
rc = 0;
else
rc = usb_port_suspend(udev, msg);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 592f45e6dbac..8d732e9f74fa 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2242,25 +2242,26 @@ usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
}
dma_addr_t
-usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
- struct usb_host_endpoint *ep)
+usb_hcd_get_dcba_dma_addr(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (!HCD_RH_RUNNING(hcd))
return 0;
- return hcd->driver->get_xfer_ring_dma_addr(hcd, udev, ep);
+ return hcd->driver->get_dcba_dma_addr(hcd, udev);
}
-int usb_hcd_get_controller_id(struct usb_device *udev)
+dma_addr_t
+usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (!HCD_RH_RUNNING(hcd))
- return -EINVAL;
+ return 0;
- return hcd->driver->get_core_id(hcd);
+ return hcd->driver->get_xfer_ring_dma_addr(hcd, udev, ep);
}
#ifdef CONFIG_PM
@@ -2395,6 +2396,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
spin_lock_irqsave (&hcd_root_hub_lock, flags);
if (hcd->rh_registered) {
+ pm_wakeup_event(&hcd->self.root_hub->dev, 0);
set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
queue_work(pm_wq, &hcd->wakeup_work);
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 5df314dd5f3c..cc7ab666d650 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -643,12 +643,17 @@ void usb_wakeup_notification(struct usb_device *hdev,
unsigned int portnum)
{
struct usb_hub *hub;
+ struct usb_port *port_dev;
if (!hdev)
return;
hub = usb_hub_to_struct_hub(hdev);
if (hub) {
+ port_dev = hub->ports[portnum - 1];
+ if (port_dev && port_dev->child)
+ pm_wakeup_event(&port_dev->child->dev, 0);
+
set_bit(portnum, hub->wakeup_bits);
kick_hub_wq(hub);
}
@@ -3372,8 +3377,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
/* Skip the initial Clear-Suspend step for a remote wakeup */
status = hub_port_status(hub, port1, &portstatus, &portchange);
- if (status == 0 && !port_is_suspended(hub, portstatus))
+ if (status == 0 && !port_is_suspended(hub, portstatus)) {
+ if (portchange & USB_PORT_STAT_C_SUSPEND)
+ pm_wakeup_event(&udev->dev, 0);
goto SuspendCleared;
+ }
/* see 7.1.7.7; affects power usage, but not budgeting */
if (hub_is_superspeed(hub->hdev))
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 4f1c6f8d4352..40ce175655e6 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -45,6 +45,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
+ /* HP v222w 16GB Mini USB Drive */
+ { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 65bf86f18a34..062677f8e91d 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -697,24 +697,25 @@ usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
}
EXPORT_SYMBOL(usb_get_sec_event_ring_dma_addr);
-dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
- struct usb_host_endpoint *ep)
+dma_addr_t
+usb_get_dcba_dma_addr(struct usb_device *dev)
{
if (dev->state == USB_STATE_NOTATTACHED)
return 0;
- return usb_hcd_get_xfer_ring_dma_addr(dev, ep);
+ return usb_hcd_get_dcba_dma_addr(dev);
}
-EXPORT_SYMBOL(usb_get_xfer_ring_dma_addr);
+EXPORT_SYMBOL(usb_get_dcba_dma_addr);
-int usb_get_controller_id(struct usb_device *dev)
+dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
+ struct usb_host_endpoint *ep)
{
if (dev->state == USB_STATE_NOTATTACHED)
- return -EINVAL;
+ return 0;
- return usb_hcd_get_controller_id(dev);
+ return usb_hcd_get_xfer_ring_dma_addr(dev, ep);
}
-EXPORT_SYMBOL(usb_get_controller_id);
+EXPORT_SYMBOL(usb_get_xfer_ring_dma_addr);
/*-------------------------------------------------------------------*/
/*
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 2be268d2423d..03a926ebf34b 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -112,6 +112,10 @@ static int kdwc3_probe(struct platform_device *pdev)
dev->dma_mask = &kdwc3_dma_mask;
kdwc->clk = devm_clk_get(kdwc->dev, "usb");
+ if (IS_ERR(kdwc->clk)) {
+ dev_err(kdwc->dev, "unable to get usb clock\n");
+ return PTR_ERR(kdwc->clk);
+ }
error = clk_prepare_enable(kdwc->clk);
if (error < 0) {
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 2cd600a58fd7..3f59a2f8b84f 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -3487,6 +3487,7 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
if (on) {
dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
+ pm_runtime_get_sync(mdwc->dev);
mdwc->hs_phy->flags |= PHY_HOST_MODE;
if (dwc->maximum_speed == USB_SPEED_SUPER) {
mdwc->ss_phy->flags |= PHY_HOST_MODE;
@@ -3495,7 +3496,6 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
}
usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
- pm_runtime_get_sync(mdwc->dev);
dbg_event(0xFF, "StrtHost gync",
atomic_read(&mdwc->dev->power.usage_count));
if (!IS_ERR(mdwc->vbus_reg))
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index d2c0c1a8d979..68230adf2449 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -167,7 +167,7 @@ static int dwc3_pci_probe(struct pci_dev *pci,
ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
if (ret) {
dev_err(dev, "couldn't add resources to dwc3 device\n");
- return ret;
+ goto err;
}
pci_set_drvdata(pci, dwc3);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 1ffde9c5408c..9edc01692142 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -68,18 +68,27 @@ __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
static int __must_check
__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
+static LIST_HEAD(inst_list);
+
/* ffs instance status */
-static DEFINE_MUTEX(ffs_ep_lock);
-static bool ffs_inst_exist;
-static struct f_fs_opts *g_opts;
+#define INST_NAME_SIZE 16
-/* Free instance structures */
-static void ffs_inst_clean(struct f_fs_opts *opts);
-static void ffs_inst_clean_delay(void);
-static int ffs_inst_exist_check(void);
+struct ffs_inst_status {
+ char inst_name[INST_NAME_SIZE];
+ struct list_head list;
+ struct mutex ffs_lock;
+ bool inst_exist;
+ struct f_fs_opts *opts;
+ struct ffs_data *ffs_data;
+};
-/* Global ffs_data pointer */
-static struct ffs_data *g_ffs_data;
+/* Free instance structures */
+static void ffs_inst_clean(struct f_fs_opts *opts,
+ const char *inst_name);
+static void ffs_inst_clean_delay(const char *inst_name);
+static int ffs_inst_exist_check(const char *inst_name);
+static struct ffs_inst_status *name_to_inst_status(
+ const char *inst_name, bool create_inst);
/* The function structure ***************************************************/
@@ -300,7 +309,7 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
ffs->state, ffs->setup_state, ffs->flags);
- ret = ffs_inst_exist_check();
+ ret = ffs_inst_exist_check(ffs->dev_name);
if (ret < 0)
return ret;
@@ -490,7 +499,7 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
ffs->state, ffs->setup_state, ffs->flags);
- ret = ffs_inst_exist_check();
+ ret = ffs_inst_exist_check(ffs->dev_name);
if (ret < 0)
return ret;
@@ -601,7 +610,7 @@ static int ffs_ep0_open(struct inode *inode, struct file *file)
ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
- ret = ffs_inst_exist_check();
+ ret = ffs_inst_exist_check(ffs->dev_name);
if (ret < 0)
return ret;
@@ -643,7 +652,7 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
- ret = ffs_inst_exist_check();
+ ret = ffs_inst_exist_check(ffs->dev_name);
if (ret < 0)
return ret;
@@ -668,7 +677,7 @@ static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
ffs_log("enter:state %d setup_state %d flags %lu opened %d", ffs->state,
ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
- ret = ffs_inst_exist_check();
+ ret = ffs_inst_exist_check(ffs->dev_name);
if (ret < 0)
return ret;
@@ -799,6 +808,10 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
ffs_log("enter: epfile name %s epfile err %d (%s)", epfile->name,
atomic_read(&epfile->error), io_data->read ? "READ" : "WRITE");
+ ret = ffs_inst_exist_check(epfile->ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
smp_mb__before_atomic();
retry:
if (atomic_read(&epfile->error))
@@ -1085,7 +1098,7 @@ ffs_epfile_open(struct inode *inode, struct file *file)
ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
epfile->ffs->setup_state, epfile->ffs->flags);
- ret = ffs_inst_exist_check();
+ ret = ffs_inst_exist_check(epfile->ffs->dev_name);
if (ret < 0)
return ret;
@@ -1143,16 +1156,11 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{
struct ffs_io_data io_data, *p = &io_data;
ssize_t res;
- int ret;
ENTER();
ffs_log("enter");
- ret = ffs_inst_exist_check();
- if (ret < 0)
- return ret;
-
if (!is_sync_kiocb(kiocb)) {
p = kmalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
@@ -1189,16 +1197,11 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
{
struct ffs_io_data io_data, *p = &io_data;
ssize_t res;
- int ret;
ENTER();
ffs_log("enter");
- ret = ffs_inst_exist_check();
- if (ret < 0)
- return ret;
-
if (!is_sync_kiocb(kiocb)) {
p = kmalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
@@ -1275,7 +1278,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
epfile->ffs->setup_state, epfile->ffs->flags);
- ret = ffs_inst_exist_check();
+ ret = ffs_inst_exist_check(epfile->ffs->dev_name);
if (ret < 0)
return ret;
@@ -1583,6 +1586,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
int ret;
void *ffs_dev;
struct ffs_data *ffs;
+ struct ffs_inst_status *inst_status;
ENTER();
@@ -1612,6 +1616,18 @@ ffs_fs_mount(struct file_system_type *t, int flags,
ffs->private_data = ffs_dev;
data.ffs_data = ffs;
+ inst_status = name_to_inst_status(ffs->dev_name, false);
+ if (IS_ERR(inst_status)) {
+ ffs_log("failed to find instance (%s)\n",
+ ffs->dev_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Store ffs to global status structure */
+ ffs_dev_lock();
+ inst_status->ffs_data = ffs;
+ ffs_dev_unlock();
+
rv = mount_nodev(t, flags, &data, ffs_sb_fill);
if (IS_ERR(rv) && data.ffs_data) {
ffs_release_dev(data.ffs_data);
@@ -1711,6 +1727,9 @@ static void ffs_data_opened(struct ffs_data *ffs)
static void ffs_data_put(struct ffs_data *ffs)
{
+ struct ffs_inst_status *inst_status;
+ const char *dev_name;
+
ENTER();
ffs_log("enter");
@@ -1718,16 +1737,20 @@ static void ffs_data_put(struct ffs_data *ffs)
smp_mb__before_atomic();
if (unlikely(atomic_dec_and_test(&ffs->ref))) {
pr_info("%s(): freeing\n", __func__);
- /* Clear g_ffs_data */
- ffs_dev_lock();
- g_ffs_data = NULL;
- ffs_dev_unlock();
+ /* Clear ffs from global structure */
+ inst_status = name_to_inst_status(ffs->dev_name, false);
+ if (!IS_ERR(inst_status)) {
+ ffs_dev_lock();
+ inst_status->ffs_data = NULL;
+ ffs_dev_unlock();
+ }
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
waitqueue_active(&ffs->ep0req_completion.wait));
- kfree(ffs->dev_name);
+ dev_name = ffs->dev_name;
kfree(ffs);
- ffs_inst_clean_delay();
+ ffs_inst_clean_delay(dev_name);
+ kfree(dev_name);
}
ffs_log("exit");
@@ -1792,11 +1815,6 @@ static struct ffs_data *ffs_data_new(void)
/* XXX REVISIT need to update it in some places, or do we? */
ffs->ev.can_stall = 1;
- /* Store ffs to g_ffs_data */
- ffs_dev_lock();
- g_ffs_data = ffs;
- ffs_dev_unlock();
-
ffs_log("exit");
return ffs;
@@ -3684,79 +3702,146 @@ static struct config_item_type ffs_func_type = {
/* Function registration interface ******************************************/
-static int ffs_inst_exist_check(void)
+static struct ffs_inst_status *name_to_inst_status(
+ const char *inst_name, bool create_inst)
+{
+ struct ffs_inst_status *inst_status;
+
+ list_for_each_entry(inst_status, &inst_list, list) {
+ if (!strncasecmp(inst_status->inst_name,
+ inst_name, strlen(inst_name)))
+ return inst_status;
+ }
+
+ if (!create_inst)
+ return ERR_PTR(-ENODEV);
+
+ inst_status = kzalloc(sizeof(struct ffs_inst_status),
+ GFP_KERNEL);
+ if (!inst_status)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&inst_status->ffs_lock);
+ snprintf(inst_status->inst_name, INST_NAME_SIZE, inst_name);
+ list_add_tail(&inst_status->list, &inst_list);
+
+ return inst_status;
+}
+
+static int ffs_inst_exist_check(const char *inst_name)
{
- mutex_lock(&ffs_ep_lock);
+ struct ffs_inst_status *inst_status;
- if (unlikely(ffs_inst_exist == false)) {
- mutex_unlock(&ffs_ep_lock);
+ inst_status = name_to_inst_status(inst_name, false);
+ if (IS_ERR(inst_status)) {
pr_err_ratelimited(
- "%s: f_fs instance freed already.\n",
- __func__);
+ "%s: failed to find instance (%s)\n",
+ __func__, inst_name);
+ return -ENODEV;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
+
+ if (unlikely(inst_status->inst_exist == false)) {
+ mutex_unlock(&inst_status->ffs_lock);
+ pr_err_ratelimited(
+ "%s: f_fs instance (%s) has been freed already.\n",
+ __func__, inst_name);
return -ENODEV;
}
- mutex_unlock(&ffs_ep_lock);
+ mutex_unlock(&inst_status->ffs_lock);
return 0;
}
-static void ffs_inst_clean(struct f_fs_opts *opts)
+static void ffs_inst_clean(struct f_fs_opts *opts,
+ const char *inst_name)
{
- g_opts = NULL;
+ struct ffs_inst_status *inst_status;
+
+ inst_status = name_to_inst_status(inst_name, false);
+ if (IS_ERR(inst_status)) {
+ pr_err_ratelimited(
+ "%s: failed to find instance (%s)\n",
+ __func__, inst_name);
+ return;
+ }
+
+ inst_status->opts = NULL;
+
ffs_dev_lock();
_ffs_free_dev(opts->dev);
ffs_dev_unlock();
kfree(opts);
}
-static void ffs_inst_clean_delay(void)
+static void ffs_inst_clean_delay(const char *inst_name)
{
- mutex_lock(&ffs_ep_lock);
+ struct ffs_inst_status *inst_status;
- if (unlikely(ffs_inst_exist == false)) {
- if (g_opts) {
- ffs_inst_clean(g_opts);
+ inst_status = name_to_inst_status(inst_name, false);
+ if (IS_ERR(inst_status)) {
+ pr_err_ratelimited(
+ "%s: failed to find (%s) instance\n",
+ __func__, inst_name);
+ return;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
+
+ if (unlikely(inst_status->inst_exist == false)) {
+ if (inst_status->opts) {
+ ffs_inst_clean(inst_status->opts, inst_name);
pr_err_ratelimited("%s: Delayed free memory\n",
__func__);
}
- mutex_unlock(&ffs_ep_lock);
+ mutex_unlock(&inst_status->ffs_lock);
return;
}
- mutex_unlock(&ffs_ep_lock);
+ mutex_unlock(&inst_status->ffs_lock);
}
static void ffs_free_inst(struct usb_function_instance *f)
{
struct f_fs_opts *opts;
+ struct ffs_inst_status *inst_status;
opts = to_f_fs_opts(f);
- mutex_lock(&ffs_ep_lock);
+ inst_status = name_to_inst_status(opts->dev->name, false);
+ if (IS_ERR(inst_status)) {
+ ffs_log("failed to find (%s) instance\n",
+ opts->dev->name);
+ return;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
if (opts->dev->ffs_data
&& atomic_read(&opts->dev->ffs_data->opened)) {
- ffs_inst_exist = false;
- mutex_unlock(&ffs_ep_lock);
- ffs_log("%s: Dev is open, free mem when dev close\n",
- __func__);
+ inst_status->inst_exist = false;
+ mutex_unlock(&inst_status->ffs_lock);
+ ffs_log("Dev is open, free mem when dev (%s) close\n",
+ opts->dev->name);
return;
}
- ffs_inst_clean(opts);
- ffs_inst_exist = false;
- g_opts = NULL;
- mutex_unlock(&ffs_ep_lock);
+ ffs_inst_clean(opts, opts->dev->name);
+ inst_status->inst_exist = false;
+ mutex_unlock(&inst_status->ffs_lock);
}
#define MAX_INST_NAME_LEN 40
static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
{
- struct f_fs_opts *opts;
+ struct f_fs_opts *opts, *opts_prev;
+ struct ffs_data *ffs_data_tmp;
char *ptr;
const char *tmp;
int name_len, ret;
+ struct ffs_inst_status *inst_status;
name_len = strlen(name) + 1;
if (name_len > MAX_INST_NAME_LEN)
@@ -3766,13 +3851,22 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
if (!ptr)
return -ENOMEM;
- mutex_lock(&ffs_ep_lock);
- if (g_opts) {
- mutex_unlock(&ffs_ep_lock);
- ffs_log("%s: prev inst do not freed yet\n", __func__);
+ inst_status = name_to_inst_status(ptr, true);
+ if (IS_ERR(inst_status)) {
+ ffs_log("failed to create status struct for (%s) instance\n",
+ ptr);
+ return -EINVAL;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
+ opts_prev = inst_status->opts;
+ if (opts_prev) {
+ mutex_unlock(&inst_status->ffs_lock);
+ ffs_log("instance (%s): prev inst do not freed yet\n",
+ inst_status->inst_name);
return -EBUSY;
}
- mutex_unlock(&ffs_ep_lock);
+ mutex_unlock(&inst_status->ffs_lock);
opts = to_f_fs_opts(fi);
tmp = NULL;
@@ -3794,8 +3888,9 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
* ffs_private_data also need to update new allocated opts->dev
* address.
*/
- if (g_ffs_data)
- opts->dev->ffs_data = g_ffs_data;
+ ffs_data_tmp = inst_status->ffs_data;
+ if (ffs_data_tmp)
+ opts->dev->ffs_data = ffs_data_tmp;
if (opts->dev->ffs_data)
opts->dev->ffs_data->private_data = opts->dev;
@@ -3804,10 +3899,10 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
kfree(tmp);
- mutex_lock(&ffs_ep_lock);
- ffs_inst_exist = true;
- g_opts = opts;
- mutex_unlock(&ffs_ep_lock);
+ mutex_lock(&inst_status->ffs_lock);
+ inst_status->inst_exist = true;
+ inst_status->opts = opts;
+ mutex_unlock(&inst_status->ffs_lock);
return 0;
}
@@ -4212,6 +4307,20 @@ module_init(ffs_init);
static void __exit ffs_exit(void)
{
+ struct ffs_inst_status *inst_status, *inst_status_tmp = NULL;
+
+ list_for_each_entry(inst_status, &inst_list, list) {
+ if (inst_status_tmp) {
+ list_del(&inst_status_tmp->list);
+ kfree(inst_status_tmp);
+ }
+ inst_status_tmp = inst_status;
+ }
+ if (inst_status_tmp) {
+ list_del(&inst_status_tmp->list);
+ kfree(inst_status_tmp);
+ }
+
if (ffs_ipc_log) {
ipc_log_context_destroy(ffs_ipc_log);
ffs_ipc_log = NULL;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 408c8eca2bbe..b8ff368e4464 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -233,10 +233,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
- if (device_property_read_u32(pdev->dev.parent, "usb-core-id",
- &xhci->core_id))
- xhci->core_id = -EINVAL;
-
hcd->usb_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
if (IS_ERR(hcd->usb_phy)) {
ret = PTR_ERR(hcd->usb_phy);
@@ -437,7 +433,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
static struct platform_driver usb_xhci_driver = {
.probe = xhci_plat_probe,
.remove = xhci_plat_remove,
- .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xhci-hcd",
.pm = DEV_PM_OPS,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7e76573c8236..4954e22a421b 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4999,6 +4999,17 @@ dma_addr_t xhci_get_sec_event_ring_dma_addr(struct usb_hcd *hcd,
return 0;
}
+static dma_addr_t xhci_get_dcba_dma_addr(struct usb_hcd *hcd,
+ struct usb_device *udev)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (!(xhci->xhc_state & XHCI_STATE_HALTED) && xhci->dcbaa)
+ return xhci->dcbaa->dev_context_ptrs[udev->slot_id];
+
+ return 0;
+}
+
dma_addr_t xhci_get_xfer_ring_dma_addr(struct usb_hcd *hcd,
struct usb_device *udev, struct usb_host_endpoint *ep)
{
@@ -5024,13 +5035,6 @@ dma_addr_t xhci_get_xfer_ring_dma_addr(struct usb_hcd *hcd,
return 0;
}
-int xhci_get_core_id(struct usb_hcd *hcd)
-{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-
- return xhci->core_id;
-}
-
static const struct hc_driver xhci_hc_driver = {
.description = "xhci-hcd",
.product_desc = "xHCI Host Controller",
@@ -5094,7 +5098,7 @@ static const struct hc_driver xhci_hc_driver = {
.sec_event_ring_cleanup = xhci_sec_event_ring_cleanup,
.get_sec_event_ring_dma_addr = xhci_get_sec_event_ring_dma_addr,
.get_xfer_ring_dma_addr = xhci_get_xfer_ring_dma_addr,
- .get_core_id = xhci_get_core_id,
+ .get_dcba_dma_addr = xhci_get_dcba_dma_addr,
};
void xhci_init_driver(struct hc_driver *drv,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ac637dc6e3cc..c665806983be 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1519,8 +1519,6 @@ struct xhci_hcd {
/* secondary interrupter */
struct xhci_intr_reg __iomem **sec_ir_set;
- int core_id;
-
/* Cached register copies of read-only HC data */
__u32 hcs_params1;
__u32 hcs_params2;
@@ -1948,7 +1946,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength);
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
-int xhci_get_core_id(struct usb_hcd *hcd);
#ifdef CONFIG_PM
int xhci_bus_suspend(struct usb_hcd *hcd);
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 10d30afe4a3c..a0d1417362cd 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -114,15 +114,19 @@ static int service_tx_status_request(
}
is_in = epnum & USB_DIR_IN;
- if (is_in) {
- epnum &= 0x0f;
+ epnum &= 0x0f;
+ if (epnum >= MUSB_C_NUM_EPS) {
+ handled = -EINVAL;
+ break;
+ }
+
+ if (is_in)
ep = &musb->endpoints[epnum].ep_in;
- } else {
+ else
ep = &musb->endpoints[epnum].ep_out;
- }
regs = musb->endpoints[epnum].regs;
- if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
+ if (!ep->desc) {
handled = -EINVAL;
break;
}
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 584ae8cbaf1c..77c3ebe860c5 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -62,6 +62,7 @@ config USB_SERIAL_SIMPLE
- Fundamental Software dongle.
- Google USB serial devices
- HP4x calculators
+ - Libtransistor USB console
- a number of Motorola phones
- Motorola Tetra devices
- Novatel Wireless GPS receivers
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 64a4427678b0..32cadca198b2 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -210,6 +210,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+ { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index a224c7a3ce09..3e5b189a79b4 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1911,7 +1911,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
return ftdi_jtag_probe(serial);
if (udev->product &&
- (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
+ (!strcmp(udev->product, "Arrow USB Blaster") ||
+ !strcmp(udev->product, "BeagleBone/XDS100V2") ||
!strcmp(udev->product, "SNAP Connect E10")))
return ftdi_jtag_probe(serial);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 6aa7ff2c1cf7..2674da40d9cd 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -66,6 +66,11 @@ DEVICE(flashloader, FLASHLOADER_IDS);
0x01) }
DEVICE(google, GOOGLE_IDS);
+/* Libtransistor USB console */
+#define LIBTRANSISTOR_IDS() \
+ { USB_DEVICE(0x1209, 0x8b00) }
+DEVICE(libtransistor, LIBTRANSISTOR_IDS);
+
/* ViVOpay USB Serial Driver */
#define VIVOPAY_IDS() \
{ USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
@@ -113,6 +118,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
&funsoft_device,
&flashloader_device,
&google_device,
+ &libtransistor_device,
&vivopay_device,
&moto_modem_device,
&motorola_tetra_device,
@@ -129,6 +135,7 @@ static const struct usb_device_id id_table[] = {
FUNSOFT_IDS(),
FLASHLOADER_IDS(),
GOOGLE_IDS(),
+ LIBTRANSISTOR_IDS(),
VIVOPAY_IDS(),
MOTO_IDS(),
MOTOROLA_TETRA_IDS(),
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 091e8ec7a6c0..962bb6376b0c 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -1953,6 +1953,8 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag)
bcb->CDB[0] = 0xEF;
result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0);
+ if (us->srb != NULL)
+ scsi_set_resid(us->srb, 0);
info->BIN_FLAG = flag;
kfree(buf);
@@ -2306,21 +2308,22 @@ static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
static int ene_transport(struct scsi_cmnd *srb, struct us_data *us)
{
- int result = 0;
+ int result = USB_STOR_XFER_GOOD;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
/*US_DEBUG(usb_stor_show_command(us, srb)); */
scsi_set_resid(srb, 0);
- if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) {
+ if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready)))
result = ene_init(us);
- } else {
+ if (result == USB_STOR_XFER_GOOD) {
+ result = USB_STOR_TRANSPORT_ERROR;
if (info->SD_Status.Ready)
result = sd_scsi_irp(us, srb);
if (info->MS_Status.Ready)
result = ms_scsi_irp(us, srb);
}
- return 0;
+ return result;
}
static struct scsi_host_template ene_ub6250_host_template;
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 325b4c05acdd..f761e02e75c9 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -201,7 +201,12 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
if (!bid)
return -ENODEV;
+ /* device_attach() callers should hold parent lock for USB */
+ if (bid->udev->dev.parent)
+ device_lock(bid->udev->dev.parent);
ret = device_attach(&bid->udev->dev);
+ if (bid->udev->dev.parent)
+ device_unlock(bid->udev->dev.parent);
if (ret < 0) {
dev_err(&bid->udev->dev, "rebind failed\n");
return ret;
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index f875ccaa55f9..0fc5ace57c0e 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -248,7 +248,7 @@ enum usbip_side {
#define SDEV_EVENT_ERROR_SUBMIT (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define SDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
-#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
+#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
#define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index fe2b470d7ec6..c55c632a3b24 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -752,6 +752,62 @@ static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
return 0;
}
+static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ __le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
+ offset + PCI_EXP_DEVCTL);
+ int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ;
+
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0)
+ return count;
+
+ /*
+ * The FLR bit is virtualized, if set and the device supports PCIe
+ * FLR, issue a reset_function. Regardless, clear the bit, the spec
+ * requires it to be always read as zero. NB, reset_function might
+ * not use a PCIe FLR, we don't have that level of granularity.
+ */
+ if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) {
+ u32 cap;
+ int ret;
+
+ *ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR);
+
+ ret = pci_user_read_config_dword(vdev->pdev,
+ pos - offset + PCI_EXP_DEVCAP,
+ &cap);
+
+ if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
+ pci_try_reset_function(vdev->pdev);
+ }
+
+ /*
+ * MPS is virtualized to the user, writes do not change the physical
+ * register since determining a proper MPS value requires a system wide
+ * device view. The MRRS is largely independent of MPS, but since the
+ * user does not have that system-wide view, they might set a safe, but
+ * inefficiently low value. Here we allow writes through to hardware,
+ * but we set the floor to the physical device MPS setting, so that
+ * we can at least use full TLPs, as defined by the MPS value.
+ *
+ * NB, if any devices actually depend on an artificially low MRRS
+ * setting, this will need to be revisited, perhaps with a quirk
+ * though pcie_set_readrq().
+ */
+ if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) {
+ readrq = 128 <<
+ ((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12);
+ readrq = max(readrq, pcie_get_mps(vdev->pdev));
+
+ pcie_set_readrq(vdev->pdev, readrq);
+ }
+
+ return count;
+}
+
/* Permissions for PCI Express capability */
static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
{
@@ -759,26 +815,67 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
return -ENOMEM;
+ perm->writefn = vfio_exp_config_write;
+
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
/*
- * Allow writes to device control fields (includes FLR!)
- * but not to devctl_phantom which could confuse IOMMU
- * or to the ARI bit in devctl2 which is set at probe time
+ * Allow writes to device control fields, except devctl_phantom,
+ * which could confuse IOMMU, MPS, which can break communication
+ * with other physical devices, and the ARI bit in devctl2, which
+ * is set at probe time. FLR and MRRS get virtualized via our
+ * writefn.
*/
- p_setw(perm, PCI_EXP_DEVCTL, NO_VIRT, ~PCI_EXP_DEVCTL_PHANTOM);
+ p_setw(perm, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD |
+ PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM);
p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
return 0;
}
+static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL;
+
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0)
+ return count;
+
+ /*
+ * The FLR bit is virtualized, if set and the device supports AF
+ * FLR, issue a reset_function. Regardless, clear the bit, the spec
+ * requires it to be always read as zero. NB, reset_function might
+ * not use an AF FLR, we don't have that level of granularity.
+ */
+ if (*ctrl & PCI_AF_CTRL_FLR) {
+ u8 cap;
+ int ret;
+
+ *ctrl &= ~PCI_AF_CTRL_FLR;
+
+ ret = pci_user_read_config_byte(vdev->pdev,
+ pos - offset + PCI_AF_CAP,
+ &cap);
+
+ if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
+ pci_try_reset_function(vdev->pdev);
+ }
+
+ return count;
+}
+
/* Permissions for Advanced Function capability */
static int __init init_pci_cap_af_perm(struct perm_bits *perm)
{
if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF]))
return -ENOMEM;
+ perm->writefn = vfio_af_config_write;
+
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
- p_setb(perm, PCI_AF_CTRL, NO_VIRT, PCI_AF_CTRL_FLR);
+ p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR);
return 0;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ad2146a9ab2d..675819a1af37 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -173,8 +173,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
if (mask & POLLERR) {
- if (poll->wqh)
- remove_wait_queue(poll->wqh, &poll->wait);
+ vhost_poll_stop(poll);
ret = -EINVAL;
}
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c
index d357a616b05e..8a9e8acf6c0e 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.c
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1563,12 +1563,16 @@ static int mdp3_get_metadata(struct msm_fb_data_type *mfd,
}
break;
case metadata_op_get_ion_fd:
- if (mfd->fb_ion_handle) {
+ if (mfd->fb_ion_handle && mfd->fb_ion_client) {
+ get_dma_buf(mfd->fbmem_buf);
metadata->data.fbmem_ionfd =
- dma_buf_fd(mfd->fbmem_buf, 0);
- if (metadata->data.fbmem_ionfd < 0)
+ ion_share_dma_buf_fd(mfd->fb_ion_client,
+ mfd->fb_ion_handle);
+ if (metadata->data.fbmem_ionfd < 0) {
+ dma_buf_put(mfd->fbmem_buf);
pr_err("fd allocation failed. fd = %d\n",
- metadata->data.fbmem_ionfd);
+ metadata->data.fbmem_ionfd);
+ }
}
break;
default:
diff --git a/drivers/video/fbdev/msm/msm_dba/adv7533.c b/drivers/video/fbdev/msm/msm_dba/adv7533.c
index 086946e806d2..15fe77d05091 100644
--- a/drivers/video/fbdev/msm/msm_dba/adv7533.c
+++ b/drivers/video/fbdev/msm/msm_dba/adv7533.c
@@ -191,7 +191,7 @@ static struct adv7533_reg_cfg adv7533_video_en[] = {
static struct adv7533_reg_cfg adv7533_video_disable[] = {
/* Timing Generator Disable */
- {I2C_ADDR_CEC_DSI, 0x27, 0x4B, 0},
+ {I2C_ADDR_CEC_DSI, 0x27, 0x0B, 0},
/* SPDIF disable */
{I2C_ADDR_MAIN, 0x0B, 0x00, 0},
/* Gate CEC Clock */
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index 728cb6b23c42..7d8dfc7f1269 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -298,8 +298,23 @@ static int vfb_check_var(struct fb_var_screeninfo *var,
*/
static int vfb_set_par(struct fb_info *info)
{
+ switch (info->var.bits_per_pixel) {
+ case 1:
+ info->fix.visual = FB_VISUAL_MONO01;
+ break;
+ case 8:
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ break;
+ case 16:
+ case 24:
+ case 32:
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ break;
+ }
+
info->fix.line_length = get_line_length(info->var.xres_virtual,
info->var.bits_per_pixel);
+
return 0;
}
@@ -540,6 +555,8 @@ static int vfb_probe(struct platform_device *dev)
goto err2;
platform_set_drvdata(dev, info);
+ vfb_set_par(info);
+
fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n",
videomemorysize >> 10);
return 0;
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 016bd9355190..aa93df5833dc 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -450,7 +450,7 @@ static bool watchdog_is_running(void)
is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0))
&& (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF)
- & F71808FG_FLAG_WD_EN);
+ & BIT(F71808FG_FLAG_WD_EN));
superio_exit(watchdog.sioaddr);
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 7a54c6a867c8..500098cdb960 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -746,7 +746,7 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, umode_t m
autofs4_del_active(dentry);
- inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555);
+ inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
if (!inode)
return -ENOMEM;
d_add(dentry, inode);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index e767f347f2b1..88bee6703cc0 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2534,7 +2534,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
if (!uptodate) {
ClearPageUptodate(page);
SetPageError(page);
- ret = ret < 0 ? ret : -EIO;
+ ret = err < 0 ? err : -EIO;
mapping_set_error(page->mapping, ret);
}
return 0;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 49a0d6b027c1..76dacd5307b9 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -673,6 +673,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
goto mknod_out;
}
+ if (!S_ISCHR(mode) && !S_ISBLK(mode))
+ goto mknod_out;
+
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
goto mknod_out;
@@ -681,10 +684,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
- kfree(full_path);
rc = -ENOMEM;
- free_xid(xid);
- return rc;
+ goto mknod_out;
}
if (backup_cred(cifs_sb))
@@ -731,7 +732,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
pdev->minor = cpu_to_le64(MINOR(device_number));
rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
&bytes_written, iov, 1);
- } /* else if (S_ISFIFO) */
+ }
tcon->ses->server->ops->close(xid, tcon, &fid);
d_drop(direntry);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 744be3c146f5..0141aba9eca6 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -589,7 +589,7 @@ cifs_relock_file(struct cifsFileInfo *cfile)
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
int rc = 0;
- down_read(&cinode->lock_sem);
+ down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
if (cinode->can_cache_brlcks) {
/* can cache locks - no need to relock */
up_read(&cinode->lock_sem);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 33b1bc21a120..807e989f436a 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -999,15 +999,19 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
goto tcon_exit;
}
- if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
+ switch (rsp->ShareType) {
+ case SMB2_SHARE_TYPE_DISK:
cifs_dbg(FYI, "connection to disk share\n");
- else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
+ break;
+ case SMB2_SHARE_TYPE_PIPE:
tcon->ipc = true;
cifs_dbg(FYI, "connection to pipe share\n");
- } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
- tcon->print = true;
+ break;
+ case SMB2_SHARE_TYPE_PRINT:
+ tcon->ipc = true;
cifs_dbg(FYI, "connection to printer\n");
- } else {
+ break;
+ default:
cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
rc = -EOPNOTSUPP;
goto tcon_error_exit;
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index a91ed46fe503..c7cf565c434e 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -25,15 +25,8 @@
#include <linux/namei.h>
#include "fscrypt_private.h"
-/*
- * Call fscrypt_decrypt_page on every single page, reusing the encryption
- * context.
- */
-static void completion_pages(struct work_struct *work)
+static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
{
- struct fscrypt_ctx *ctx =
- container_of(work, struct fscrypt_ctx, r.work);
- struct bio *bio = ctx->r.bio;
struct bio_vec *bv;
int i;
@@ -45,22 +38,38 @@ static void completion_pages(struct work_struct *work)
if (ret) {
WARN_ON_ONCE(1);
SetPageError(page);
- } else {
+ } else if (done) {
SetPageUptodate(page);
}
- unlock_page(page);
+ if (done)
+ unlock_page(page);
}
+}
+
+void fscrypt_decrypt_bio(struct bio *bio)
+{
+ __fscrypt_decrypt_bio(bio, false);
+}
+EXPORT_SYMBOL(fscrypt_decrypt_bio);
+
+static void completion_pages(struct work_struct *work)
+{
+ struct fscrypt_ctx *ctx =
+ container_of(work, struct fscrypt_ctx, r.work);
+ struct bio *bio = ctx->r.bio;
+
+ __fscrypt_decrypt_bio(bio, true);
fscrypt_release_ctx(ctx);
bio_put(bio);
}
-void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
+void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
{
INIT_WORK(&ctx->r.work, completion_pages);
ctx->r.bio = bio;
- queue_work(fscrypt_read_workqueue, &ctx->r.work);
+ fscrypt_enqueue_decrypt_work(&ctx->r.work);
}
-EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
+EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
void fscrypt_pullback_bio_page(struct page **page, bool restore)
{
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 732a786cce9d..0758d32ad01b 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -27,6 +27,7 @@
#include <linux/dcache.h>
#include <linux/namei.h>
#include <crypto/aes.h>
+#include <crypto/skcipher.h>
#include "fscrypt_private.h"
static unsigned int num_prealloc_crypto_pages = 32;
@@ -44,12 +45,18 @@ static mempool_t *fscrypt_bounce_page_pool = NULL;
static LIST_HEAD(fscrypt_free_ctxs);
static DEFINE_SPINLOCK(fscrypt_ctx_lock);
-struct workqueue_struct *fscrypt_read_workqueue;
+static struct workqueue_struct *fscrypt_read_workqueue;
static DEFINE_MUTEX(fscrypt_init_mutex);
static struct kmem_cache *fscrypt_ctx_cachep;
struct kmem_cache *fscrypt_info_cachep;
+void fscrypt_enqueue_decrypt_work(struct work_struct *work)
+{
+ queue_work(fscrypt_read_workqueue, work);
+}
+EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
+
/**
* fscrypt_release_ctx() - Releases an encryption context
* @ctx: The encryption context to release.
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 6eb434363ff2..b18fa323d1d9 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -12,42 +12,46 @@
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
+#include <crypto/skcipher.h>
#include "fscrypt_private.h"
+static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
+{
+ if (str->len == 1 && str->name[0] == '.')
+ return true;
+
+ if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
+ return true;
+
+ return false;
+}
+
/**
* fname_encrypt() - encrypt a filename
*
- * The caller must have allocated sufficient memory for the @oname string.
+ * The output buffer must be at least as large as the input buffer.
+ * Any extra space is filled with NUL padding before encryption.
*
* Return: 0 on success, -errno on failure
*/
-static int fname_encrypt(struct inode *inode,
- const struct qstr *iname, struct fscrypt_str *oname)
+int fname_encrypt(struct inode *inode, const struct qstr *iname,
+ u8 *out, unsigned int olen)
{
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
- struct fscrypt_info *ci = inode->i_crypt_info;
- struct crypto_skcipher *tfm = ci->ci_ctfm;
+ struct crypto_skcipher *tfm = inode->i_crypt_info->ci_ctfm;
int res = 0;
char iv[FS_CRYPTO_BLOCK_SIZE];
struct scatterlist sg;
- int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
- unsigned int lim;
- unsigned int cryptlen;
-
- lim = inode->i_sb->s_cop->max_namelen(inode);
- if (iname->len <= 0 || iname->len > lim)
- return -EIO;
/*
* Copy the filename to the output buffer for encrypting in-place and
* pad it with the needed number of NUL bytes.
*/
- cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE);
- cryptlen = round_up(cryptlen, padding);
- cryptlen = min(cryptlen, lim);
- memcpy(oname->name, iname->name, iname->len);
- memset(oname->name + iname->len, 0, cryptlen - iname->len);
+ if (WARN_ON(olen < iname->len))
+ return -ENOBUFS;
+ memcpy(out, iname->name, iname->len);
+ memset(out + iname->len, 0, olen - iname->len);
/* Initialize the IV */
memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
@@ -62,8 +66,8 @@ static int fname_encrypt(struct inode *inode,
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &wait);
- sg_init_one(&sg, oname->name, cryptlen);
- skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv);
+ sg_init_one(&sg, out, olen);
+ skcipher_request_set_crypt(req, &sg, &sg, olen, iv);
/* Do the encryption */
res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
@@ -74,7 +78,6 @@ static int fname_encrypt(struct inode *inode,
return res;
}
- oname->len = cryptlen;
return 0;
}
@@ -187,50 +190,52 @@ static int digest_decode(const char *src, int len, char *dst)
return cp - dst;
}
-u32 fscrypt_fname_encrypted_size(const struct inode *inode, u32 ilen)
+bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len,
+ u32 max_len, u32 *encrypted_len_ret)
{
- int padding = 32;
- struct fscrypt_info *ci = inode->i_crypt_info;
-
- if (ci)
- padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
- ilen = max(ilen, (u32)FS_CRYPTO_BLOCK_SIZE);
- return round_up(ilen, padding);
+ int padding = 4 << (inode->i_crypt_info->ci_flags &
+ FS_POLICY_FLAGS_PAD_MASK);
+ u32 encrypted_len;
+
+ if (orig_len > max_len)
+ return false;
+ encrypted_len = max(orig_len, (u32)FS_CRYPTO_BLOCK_SIZE);
+ encrypted_len = round_up(encrypted_len, padding);
+ *encrypted_len_ret = min(encrypted_len, max_len);
+ return true;
}
-EXPORT_SYMBOL(fscrypt_fname_encrypted_size);
/**
- * fscrypt_fname_crypto_alloc_obuff() -
+ * fscrypt_fname_alloc_buffer - allocate a buffer for presented filenames
+ *
+ * Allocate a buffer that is large enough to hold any decrypted or encoded
+ * filename (null-terminated), for the given maximum encrypted filename length.
*
- * Allocates an output buffer that is sufficient for the crypto operation
- * specified by the context and the direction.
+ * Return: 0 on success, -errno on failure
*/
int fscrypt_fname_alloc_buffer(const struct inode *inode,
- u32 ilen, struct fscrypt_str *crypto_str)
+ u32 max_encrypted_len,
+ struct fscrypt_str *crypto_str)
{
- u32 olen = fscrypt_fname_encrypted_size(inode, ilen);
const u32 max_encoded_len =
max_t(u32, BASE64_CHARS(FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE),
1 + BASE64_CHARS(sizeof(struct fscrypt_digested_name)));
+ u32 max_presented_len;
- crypto_str->len = olen;
- olen = max(olen, max_encoded_len);
+ max_presented_len = max(max_encoded_len, max_encrypted_len);
- /*
- * Allocated buffer can hold one more character to null-terminate the
- * string
- */
- crypto_str->name = kmalloc(olen + 1, GFP_NOFS);
- if (!(crypto_str->name))
+ crypto_str->name = kmalloc(max_presented_len + 1, GFP_NOFS);
+ if (!crypto_str->name)
return -ENOMEM;
+ crypto_str->len = max_presented_len;
return 0;
}
EXPORT_SYMBOL(fscrypt_fname_alloc_buffer);
/**
- * fscrypt_fname_crypto_free_buffer() -
+ * fscrypt_fname_free_buffer - free the buffer for presented filenames
*
- * Frees the buffer allocated for crypto operation.
+ * Free the buffer allocated by fscrypt_fname_alloc_buffer().
*/
void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
{
@@ -297,35 +302,6 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
/**
- * fscrypt_fname_usr_to_disk() - converts a filename from user space to disk
- * space
- *
- * The caller must have allocated sufficient memory for the @oname string.
- *
- * Return: 0 on success, -errno on failure
- */
-int fscrypt_fname_usr_to_disk(struct inode *inode,
- const struct qstr *iname,
- struct fscrypt_str *oname)
-{
- if (fscrypt_is_dot_dotdot(iname)) {
- oname->name[0] = '.';
- oname->name[iname->len - 1] = '.';
- oname->len = iname->len;
- return 0;
- }
- if (inode->i_crypt_info)
- return fname_encrypt(inode, iname, oname);
- /*
- * Without a proper key, a user is not allowed to modify the filenames
- * in a directory. Consequently, a user space name cannot be mapped to
- * a disk-space name
- */
- return -ENOKEY;
-}
-EXPORT_SYMBOL(fscrypt_fname_usr_to_disk);
-
-/**
* fscrypt_setup_filename() - prepare to search a possibly encrypted directory
* @dir: the directory that will be searched
* @iname: the user-provided filename being searched for
@@ -368,11 +344,17 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
return ret;
if (dir->i_crypt_info) {
- ret = fscrypt_fname_alloc_buffer(dir, iname->len,
- &fname->crypto_buf);
- if (ret)
- return ret;
- ret = fname_encrypt(dir, iname, &fname->crypto_buf);
+ if (!fscrypt_fname_encrypted_size(dir, iname->len,
+ dir->i_sb->s_cop->max_namelen(dir),
+ &fname->crypto_buf.len))
+ return -ENAMETOOLONG;
+ fname->crypto_buf.name = kmalloc(fname->crypto_buf.len,
+ GFP_NOFS);
+ if (!fname->crypto_buf.name)
+ return -ENOMEM;
+
+ ret = fname_encrypt(dir, iname, fname->crypto_buf.name,
+ fname->crypto_buf.len);
if (ret)
goto errout;
fname->disk_name.name = fname->crypto_buf.name;
@@ -424,7 +406,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
return 0;
errout:
- fscrypt_fname_free_buffer(&fname->crypto_buf);
+ kfree(fname->crypto_buf.name);
return ret;
}
EXPORT_SYMBOL(fscrypt_setup_filename);
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index c3ad415cd14f..426aa1b27f17 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -49,6 +49,15 @@ struct fscrypt_context {
#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
+/**
+ * For encrypted symlinks, the ciphertext length is stored at the beginning
+ * of the string in little-endian format.
+ */
+struct fscrypt_symlink_data {
+ __le16 len;
+ char encrypted_path[1];
+} __packed;
+
/*
* A pointer to this structure is stored in the file system's in-core
* representation of an inode.
@@ -81,9 +90,23 @@ static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
bio->bi_rw = op | op_flags;
}
+static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
+ u32 filenames_mode)
+{
+ if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
+ filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
+ return true;
+
+ if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS &&
+ filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
+ return true;
+
+ return false;
+}
+
/* crypto.c */
+extern struct kmem_cache *fscrypt_info_cachep;
extern int fscrypt_initialize(unsigned int cop_flags);
-extern struct workqueue_struct *fscrypt_read_workqueue;
extern int fscrypt_do_page_crypto(const struct inode *inode,
fscrypt_direction_t rw, u64 lblk_num,
struct page *src_page,
@@ -93,6 +116,13 @@ extern int fscrypt_do_page_crypto(const struct inode *inode,
extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
gfp_t gfp_flags);
+/* fname.c */
+extern int fname_encrypt(struct inode *inode, const struct qstr *iname,
+ u8 *out, unsigned int olen);
+extern bool fscrypt_fname_encrypted_size(const struct inode *inode,
+ u32 orig_len, u32 max_len,
+ u32 *encrypted_len_ret);
+
/* keyinfo.c */
extern void __exit fscrypt_essiv_cleanup(void);
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index 9f5fb2eb9cf7..bc010e4609ef 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -110,3 +110,159 @@ int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry)
return 0;
}
EXPORT_SYMBOL_GPL(__fscrypt_prepare_lookup);
+
+int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
+ unsigned int max_len,
+ struct fscrypt_str *disk_link)
+{
+ int err;
+
+ /*
+ * To calculate the size of the encrypted symlink target we need to know
+ * the amount of NUL padding, which is determined by the flags set in
+ * the encryption policy which will be inherited from the directory.
+ * The easiest way to get access to this is to just load the directory's
+ * fscrypt_info, since we'll need it to create the dir_entry anyway.
+ *
+ * Note: in test_dummy_encryption mode, @dir may be unencrypted.
+ */
+ err = fscrypt_get_encryption_info(dir);
+ if (err)
+ return err;
+ if (!fscrypt_has_encryption_key(dir))
+ return -ENOKEY;
+
+ /*
+ * Calculate the size of the encrypted symlink and verify it won't
+ * exceed max_len. Note that for historical reasons, encrypted symlink
+ * targets are prefixed with the ciphertext length, despite this
+ * actually being redundant with i_size. This decreases by 2 bytes the
+ * longest symlink target we can accept.
+ *
+ * We could recover 1 byte by not counting a null terminator, but
+ * counting it (even though it is meaningless for ciphertext) is simpler
+ * for now since filesystems will assume it is there and subtract it.
+ */
+ if (!fscrypt_fname_encrypted_size(dir, len,
+ max_len - sizeof(struct fscrypt_symlink_data),
+ &disk_link->len))
+ return -ENAMETOOLONG;
+ disk_link->len += sizeof(struct fscrypt_symlink_data);
+
+ disk_link->name = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__fscrypt_prepare_symlink);
+
+int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
+ unsigned int len, struct fscrypt_str *disk_link)
+{
+ int err;
+ struct qstr iname = QSTR_INIT(target, len);
+ struct fscrypt_symlink_data *sd;
+ unsigned int ciphertext_len;
+
+ err = fscrypt_require_key(inode);
+ if (err)
+ return err;
+
+ if (disk_link->name) {
+ /* filesystem-provided buffer */
+ sd = (struct fscrypt_symlink_data *)disk_link->name;
+ } else {
+ sd = kmalloc(disk_link->len, GFP_NOFS);
+ if (!sd)
+ return -ENOMEM;
+ }
+ ciphertext_len = disk_link->len - sizeof(*sd);
+ sd->len = cpu_to_le16(ciphertext_len);
+
+ err = fname_encrypt(inode, &iname, sd->encrypted_path, ciphertext_len);
+ if (err) {
+ if (!disk_link->name)
+ kfree(sd);
+ return err;
+ }
+ /*
+ * Null-terminating the ciphertext doesn't make sense, but we still
+ * count the null terminator in the length, so we might as well
+ * initialize it just in case the filesystem writes it out.
+ */
+ sd->encrypted_path[ciphertext_len] = '\0';
+
+ if (!disk_link->name)
+ disk_link->name = (unsigned char *)sd;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__fscrypt_encrypt_symlink);
+
+/**
+ * fscrypt_get_symlink - get the target of an encrypted symlink
+ * @inode: the symlink inode
+ * @caddr: the on-disk contents of the symlink
+ * @max_size: size of @caddr buffer
+ * @done: if successful, will be set up to free the returned target
+ *
+ * If the symlink's encryption key is available, we decrypt its target.
+ * Otherwise, we encode its target for presentation.
+ *
+ * This may sleep, so the filesystem must have dropped out of RCU mode already.
+ *
+ * Return: the presentable symlink target or an ERR_PTR()
+ */
+void *fscrypt_get_symlink(struct inode *inode, const void *caddr,
+ unsigned int max_size)
+{
+ const struct fscrypt_symlink_data *sd;
+ struct fscrypt_str cstr, pstr;
+ int err;
+
+ /* This is for encrypted symlinks only */
+ if (WARN_ON(!IS_ENCRYPTED(inode)))
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Try to set up the symlink's encryption key, but we can continue
+ * regardless of whether the key is available or not.
+ */
+ err = fscrypt_get_encryption_info(inode);
+ if (err)
+ return ERR_PTR(err);
+
+ /*
+ * For historical reasons, encrypted symlink targets are prefixed with
+ * the ciphertext length, even though this is redundant with i_size.
+ */
+
+ if (max_size < sizeof(*sd))
+ return ERR_PTR(-EUCLEAN);
+ sd = caddr;
+ cstr.name = (unsigned char *)sd->encrypted_path;
+ cstr.len = le16_to_cpu(sd->len);
+
+ if (cstr.len == 0)
+ return ERR_PTR(-EUCLEAN);
+
+ if (cstr.len + sizeof(*sd) - 1 > max_size)
+ return ERR_PTR(-EUCLEAN);
+
+ err = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
+ if (err)
+ return ERR_PTR(err);
+
+ err = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
+ if (err)
+ goto err_kfree;
+
+ err = -EUCLEAN;
+ if (pstr.name[0] == '\0')
+ goto err_kfree;
+
+ pstr.name[pstr.len] = '\0';
+ return pstr.name;
+
+err_kfree:
+ kfree(pstr.name);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(fscrypt_get_symlink);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 444c65ed6db8..7c00331da5df 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -13,6 +13,7 @@
#include <linux/ratelimit.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
+#include <crypto/skcipher.h>
#include "fscrypt_private.h"
static struct crypto_shash *essiv_hash_tfm;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index f97110461c19..c57a94f1c198 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -242,8 +242,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
*/
ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
sb->s_blocksize * 8, bh->b_data);
- ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
- ext4_group_desc_csum_set(sb, block_group, gdp);
return 0;
}
@@ -322,6 +320,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_grpblk_t offset;
ext4_grpblk_t next_zero_bit;
+ ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
ext4_fsblk_t blk;
ext4_fsblk_t group_first_block;
@@ -339,20 +338,25 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
/* check whether block bitmap block number is set */
blk = ext4_block_bitmap(sb, desc);
offset = blk - group_first_block;
- if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
/* bad block bitmap */
return blk;
/* check whether the inode bitmap block number is set */
blk = ext4_inode_bitmap(sb, desc);
offset = blk - group_first_block;
- if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
/* bad block bitmap */
return blk;
/* check whether the inode table block number is set */
blk = ext4_inode_table(sb, desc);
offset = blk - group_first_block;
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
+ return blk;
next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group),
EXT4_B2C(sbi, offset));
@@ -418,6 +422,7 @@ struct buffer_head *
ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
{
struct ext4_group_desc *desc;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
struct buffer_head *bh;
ext4_fsblk_t bitmap_blk;
int err;
@@ -426,6 +431,12 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
if (!desc)
return ERR_PTR(-EFSCORRUPTED);
bitmap_blk = ext4_block_bitmap(sb, desc);
+ if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+ (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
+ ext4_error(sb, "Invalid block bitmap block %llu in "
+ "block_group %u", bitmap_blk, block_group);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
ext4_error(sb, "Cannot get buffer for block bitmap - "
@@ -447,6 +458,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
err = ext4_init_block_bitmap(sb, bh, block_group, desc);
set_bitmap_uptodate(bh);
set_buffer_uptodate(bh);
+ set_buffer_verified(bh);
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
if (err) {
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index cfb978fd3ec4..359ef3774f4b 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5372,8 +5372,9 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
stop = le32_to_cpu(extent->ee_block);
/*
- * In case of left shift, Don't start shifting extents until we make
- * sure the hole is big enough to accommodate the shift.
+ * For left shifts, make sure the hole on the left is big enough to
+ * accommodate the shift. For right shifts, make sure the last extent
+ * won't be shifted beyond EXT_MAX_BLOCKS.
*/
if (SHIFT == SHIFT_LEFT) {
path = ext4_find_extent(inode, start - 1, &path,
@@ -5393,9 +5394,14 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
if ((start == ex_start && shift > ex_start) ||
(shift > start - ex_end)) {
- ext4_ext_drop_refs(path);
- kfree(path);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ if (shift > EXT_MAX_BLOCKS -
+ (stop + ext4_ext_get_actual_len(extent))) {
+ ret = -EINVAL;
+ goto out;
}
}
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index a8b1749d79a8..debf0707789d 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -460,7 +460,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
int i, num;
unsigned long nr_pages;
- num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+ num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
(pgoff_t)num);
if (nr_pages == 0)
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 5388207d2832..9fe55b7d4c2c 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -63,44 +63,6 @@ void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
}
-/* Initializes an uninitialized inode bitmap */
-static int ext4_init_inode_bitmap(struct super_block *sb,
- struct buffer_head *bh,
- ext4_group_t block_group,
- struct ext4_group_desc *gdp)
-{
- struct ext4_group_info *grp;
- struct ext4_sb_info *sbi = EXT4_SB(sb);
- J_ASSERT_BH(bh, buffer_locked(bh));
-
- /* If checksum is bad mark all blocks and inodes use to prevent
- * allocation, essentially implementing a per-group read-only flag. */
- if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
- grp = ext4_get_group_info(sb, block_group);
- if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
- percpu_counter_sub(&sbi->s_freeclusters_counter,
- grp->bb_free);
- set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
- if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
- int count;
- count = ext4_free_inodes_count(sb, gdp);
- percpu_counter_sub(&sbi->s_freeinodes_counter,
- count);
- }
- set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
- return -EFSBADCRC;
- }
-
- memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
- ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
- bh->b_data);
- ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
- ext4_group_desc_csum_set(sb, block_group, gdp);
-
- return 0;
-}
-
void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
{
if (uptodate) {
@@ -157,6 +119,7 @@ static struct buffer_head *
ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
{
struct ext4_group_desc *desc;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
struct buffer_head *bh = NULL;
ext4_fsblk_t bitmap_blk;
int err;
@@ -166,6 +129,12 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
return ERR_PTR(-EFSCORRUPTED);
bitmap_blk = ext4_inode_bitmap(sb, desc);
+ if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+ (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
+ ext4_error(sb, "Invalid inode bitmap blk %llu in "
+ "block_group %u", bitmap_blk, block_group);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
ext4_error(sb, "Cannot read inode bitmap - "
@@ -184,17 +153,14 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
ext4_lock_group(sb, block_group);
if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
- err = ext4_init_inode_bitmap(sb, bh, block_group, desc);
+ memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
+ ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
+ sb->s_blocksize * 8, bh->b_data);
set_bitmap_uptodate(bh);
set_buffer_uptodate(bh);
set_buffer_verified(bh);
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
- if (err) {
- ext4_error(sb, "Failed to init inode bitmap for group "
- "%u: %d", block_group, err);
- goto out;
- }
return bh;
}
ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 280d67fe33a7..820d692bc931 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -377,7 +377,7 @@ out:
static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
unsigned int len)
{
- int ret, size;
+ int ret, size, no_expand;
struct ext4_inode_info *ei = EXT4_I(inode);
if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
@@ -387,15 +387,14 @@ static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
if (size < len)
return -ENOSPC;
- down_write(&EXT4_I(inode)->xattr_sem);
+ ext4_write_lock_xattr(inode, &no_expand);
if (ei->i_inline_off)
ret = ext4_update_inline_data(handle, inode, len);
else
ret = ext4_create_inline_data(handle, inode, len);
- up_write(&EXT4_I(inode)->xattr_sem);
-
+ ext4_write_unlock_xattr(inode, &no_expand);
return ret;
}
@@ -537,7 +536,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
struct inode *inode,
unsigned flags)
{
- int ret, needed_blocks;
+ int ret, needed_blocks, no_expand;
handle_t *handle = NULL;
int retries = 0, sem_held = 0;
struct page *page = NULL;
@@ -577,7 +576,7 @@ retry:
goto out;
}
- down_write(&EXT4_I(inode)->xattr_sem);
+ ext4_write_lock_xattr(inode, &no_expand);
sem_held = 1;
/* If some one has already done this for us, just exit. */
if (!ext4_has_inline_data(inode)) {
@@ -613,7 +612,7 @@ retry:
page_cache_release(page);
page = NULL;
ext4_orphan_add(handle, inode);
- up_write(&EXT4_I(inode)->xattr_sem);
+ ext4_write_unlock_xattr(inode, &no_expand);
sem_held = 0;
ext4_journal_stop(handle);
handle = NULL;
@@ -639,7 +638,7 @@ out:
page_cache_release(page);
}
if (sem_held)
- up_write(&EXT4_I(inode)->xattr_sem);
+ ext4_write_unlock_xattr(inode, &no_expand);
if (handle)
ext4_journal_stop(handle);
brelse(iloc.bh);
@@ -732,7 +731,7 @@ convert:
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
unsigned copied, struct page *page)
{
- int ret;
+ int ret, no_expand;
void *kaddr;
struct ext4_iloc iloc;
@@ -750,7 +749,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
goto out;
}
- down_write(&EXT4_I(inode)->xattr_sem);
+ ext4_write_lock_xattr(inode, &no_expand);
BUG_ON(!ext4_has_inline_data(inode));
kaddr = kmap_atomic(page);
@@ -760,7 +759,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
/* clear page dirty so that writepages wouldn't work for us. */
ClearPageDirty(page);
- up_write(&EXT4_I(inode)->xattr_sem);
+ ext4_write_unlock_xattr(inode, &no_expand);
brelse(iloc.bh);
out:
return copied;
@@ -771,7 +770,7 @@ ext4_journalled_write_inline_data(struct inode *inode,
unsigned len,
struct page *page)
{
- int ret;
+ int ret, no_expand;
void *kaddr;
struct ext4_iloc iloc;
@@ -781,11 +780,11 @@ ext4_journalled_write_inline_data(struct inode *inode,
return NULL;
}
- down_write(&EXT4_I(inode)->xattr_sem);
+ ext4_write_lock_xattr(inode, &no_expand);
kaddr = kmap_atomic(page);
ext4_write_inline_data(inode, &iloc, kaddr, 0, len);
kunmap_atomic(kaddr);
- up_write(&EXT4_I(inode)->xattr_sem);
+ ext4_write_unlock_xattr(inode, &no_expand);
return iloc.bh;
}
@@ -1268,7 +1267,7 @@ out:
int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
struct inode *dir, struct inode *inode)
{
- int ret, inline_size;
+ int ret, inline_size, no_expand;
void *inline_start;
struct ext4_iloc iloc;
@@ -1276,7 +1275,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
if (ret)
return ret;
- down_write(&EXT4_I(dir)->xattr_sem);
+ ext4_write_lock_xattr(dir, &no_expand);
if (!ext4_has_inline_data(dir))
goto out;
@@ -1322,7 +1321,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
out:
ext4_mark_inode_dirty(handle, dir);
- up_write(&EXT4_I(dir)->xattr_sem);
+ ext4_write_unlock_xattr(dir, &no_expand);
brelse(iloc.bh);
return ret;
}
@@ -1682,7 +1681,7 @@ int ext4_delete_inline_entry(handle_t *handle,
struct buffer_head *bh,
int *has_inline_data)
{
- int err, inline_size;
+ int err, inline_size, no_expand;
struct ext4_iloc iloc;
void *inline_start;
@@ -1690,7 +1689,7 @@ int ext4_delete_inline_entry(handle_t *handle,
if (err)
return err;
- down_write(&EXT4_I(dir)->xattr_sem);
+ ext4_write_lock_xattr(dir, &no_expand);
if (!ext4_has_inline_data(dir)) {
*has_inline_data = 0;
goto out;
@@ -1725,7 +1724,7 @@ int ext4_delete_inline_entry(handle_t *handle,
ext4_show_inline_dir(dir, iloc.bh, inline_start, inline_size);
out:
- up_write(&EXT4_I(dir)->xattr_sem);
+ ext4_write_unlock_xattr(dir, &no_expand);
brelse(iloc.bh);
if (err != -ENOENT)
ext4_std_error(dir->i_sb, err);
@@ -1824,11 +1823,11 @@ out:
int ext4_destroy_inline_data(handle_t *handle, struct inode *inode)
{
- int ret;
+ int ret, no_expand;
- down_write(&EXT4_I(inode)->xattr_sem);
+ ext4_write_lock_xattr(inode, &no_expand);
ret = ext4_destroy_inline_data_nolock(handle, inode);
- up_write(&EXT4_I(inode)->xattr_sem);
+ ext4_write_unlock_xattr(inode, &no_expand);
return ret;
}
@@ -1913,7 +1912,7 @@ out:
void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
{
handle_t *handle;
- int inline_size, value_len, needed_blocks;
+ int inline_size, value_len, needed_blocks, no_expand;
size_t i_size;
void *value = NULL;
struct ext4_xattr_ibody_find is = {
@@ -1930,7 +1929,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
if (IS_ERR(handle))
return;
- down_write(&EXT4_I(inode)->xattr_sem);
+ ext4_write_lock_xattr(inode, &no_expand);
if (!ext4_has_inline_data(inode)) {
*has_inline = 0;
ext4_journal_stop(handle);
@@ -1988,7 +1987,7 @@ out_error:
up_write(&EXT4_I(inode)->i_data_sem);
out:
brelse(is.iloc.bh);
- up_write(&EXT4_I(inode)->xattr_sem);
+ ext4_write_unlock_xattr(inode, &no_expand);
kfree(value);
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
@@ -2004,7 +2003,7 @@ out:
int ext4_convert_inline_data(struct inode *inode)
{
- int error, needed_blocks;
+ int error, needed_blocks, no_expand;
handle_t *handle;
struct ext4_iloc iloc;
@@ -2026,15 +2025,10 @@ int ext4_convert_inline_data(struct inode *inode)
goto out_free;
}
- down_write(&EXT4_I(inode)->xattr_sem);
- if (!ext4_has_inline_data(inode)) {
- up_write(&EXT4_I(inode)->xattr_sem);
- goto out;
- }
-
- error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
- up_write(&EXT4_I(inode)->xattr_sem);
-out:
+ ext4_write_lock_xattr(inode, &no_expand);
+ if (ext4_has_inline_data(inode))
+ error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
+ ext4_write_unlock_xattr(inode, &no_expand);
ext4_journal_stop(handle);
out_free:
brelse(iloc.bh);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 78701445348f..760c5f40a555 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1545,6 +1545,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
if (invalidate) {
+ if (page_mapped(page))
+ clear_page_dirty_for_io(page);
block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
ClearPageUptodate(page);
}
@@ -3297,29 +3299,29 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
* case, we allocate an io_end structure to hook to the iocb.
*/
iocb->private = NULL;
- ext4_inode_aio_set(inode, NULL);
- if (!is_sync_kiocb(iocb)) {
- io_end = ext4_init_io_end(inode, GFP_NOFS);
- if (!io_end) {
- ret = -ENOMEM;
- goto retake_lock;
- }
- /*
- * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
- */
- iocb->private = ext4_get_io_end(io_end);
- /*
- * we save the io structure for current async direct
- * IO, so that later ext4_map_blocks() could flag the
- * io structure whether there is a unwritten extents
- * needs to be converted when IO is completed.
- */
- ext4_inode_aio_set(inode, io_end);
- }
-
if (overwrite) {
get_block_func = ext4_get_block_write_nolock;
} else {
+ ext4_inode_aio_set(inode, NULL);
+ if (!is_sync_kiocb(iocb)) {
+ io_end = ext4_init_io_end(inode, GFP_NOFS);
+ if (!io_end) {
+ ret = -ENOMEM;
+ goto retake_lock;
+ }
+ /*
+ * Grab reference for DIO. Will be dropped in
+ * ext4_end_io_dio()
+ */
+ iocb->private = ext4_get_io_end(io_end);
+ /*
+ * we save the io structure for current async direct
+ * IO, so that later ext4_map_blocks() could flag the
+ * io structure whether there is a unwritten extents
+ * needs to be converted when IO is completed.
+ */
+ ext4_inode_aio_set(inode, io_end);
+ }
get_block_func = ext4_get_block_write;
dio_flags = DIO_LOCKING;
}
@@ -4317,6 +4319,12 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
goto bad_inode;
raw_inode = ext4_raw_inode(&iloc);
+ if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
+ EXT4_ERROR_INODE(inode, "root inode unallocated");
+ ret = -EFSCORRUPTED;
+ goto bad_inode;
+ }
+
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 27ff3706d632..03b874f3fefd 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3875,7 +3875,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
err = ext4_mb_load_buddy(sb, group, &e4b);
if (err) {
- ext4_error(sb, "Error loading buddy information for %u", group);
+ ext4_warning(sb, "Error %d loading buddy information for %u",
+ err, group);
put_bh(bitmap_bh);
return 0;
}
@@ -4032,10 +4033,11 @@ repeat:
BUG_ON(pa->pa_type != MB_INODE_PA);
group = ext4_get_group_number(sb, pa->pa_pstart);
- err = ext4_mb_load_buddy(sb, group, &e4b);
+ err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+ GFP_NOFS|__GFP_NOFAIL);
if (err) {
- ext4_error(sb, "Error loading buddy information for %u",
- group);
+ ext4_error(sb, "Error %d loading buddy information for %u",
+ err, group);
continue;
}
@@ -4291,11 +4293,14 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
spin_unlock(&lg->lg_prealloc_lock);
list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
+ int err;
group = ext4_get_group_number(sb, pa->pa_pstart);
- if (ext4_mb_load_buddy(sb, group, &e4b)) {
- ext4_error(sb, "Error loading buddy information for %u",
- group);
+ err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+ GFP_NOFS|__GFP_NOFAIL);
+ if (err) {
+ ext4_error(sb, "Error %d loading buddy information for %u",
+ err, group);
continue;
}
ext4_lock_group(sb, group);
@@ -5121,8 +5126,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ret = ext4_mb_load_buddy(sb, group, &e4b);
if (ret) {
- ext4_error(sb, "Error in loading buddy "
- "information for %u", group);
+ ext4_warning(sb, "Error %d loading buddy information for %u",
+ ret, group);
return ret;
}
bitmap = e4b.bd_bitmap;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index bc79e2ca4adb..8cff133ff5f3 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2130,6 +2130,8 @@ static int ext4_check_descriptors(struct super_block *sb,
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Block bitmap for group %u overlaps "
"superblock", i);
+ if (!(sb->s_flags & MS_RDONLY))
+ return 0;
}
if (block_bitmap < first_block || block_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -2142,6 +2144,8 @@ static int ext4_check_descriptors(struct super_block *sb,
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode bitmap for group %u overlaps "
"superblock", i);
+ if (!(sb->s_flags & MS_RDONLY))
+ return 0;
}
if (inode_bitmap < first_block || inode_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -2154,6 +2158,8 @@ static int ext4_check_descriptors(struct super_block *sb,
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode table for group %u overlaps "
"superblock", i);
+ if (!(sb->s_flags & MS_RDONLY))
+ return 0;
}
if (inode_table < first_block ||
inode_table + sbi->s_itb_per_group - 1 > last_block) {
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index c356b49540cb..b16bfb52edb2 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1143,16 +1143,14 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
struct ext4_xattr_block_find bs = {
.s = { .not_found = -ENODATA, },
};
- unsigned long no_expand;
+ int no_expand;
int error;
if (!name)
return -EINVAL;
if (strlen(name) > 255)
return -ERANGE;
- down_write(&EXT4_I(inode)->xattr_sem);
- no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
- ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
+ ext4_write_lock_xattr(inode, &no_expand);
error = ext4_reserve_inode_write(handle, inode, &is.iloc);
if (error)
@@ -1213,7 +1211,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
ext4_xattr_update_super_block(handle, inode->i_sb);
inode->i_ctime = ext4_current_time(inode);
if (!value)
- ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
+ no_expand = 0;
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
/*
* The bh is consumed by ext4_mark_iloc_dirty, even with
@@ -1227,9 +1225,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
cleanup:
brelse(is.iloc.bh);
brelse(bs.bh);
- if (no_expand == 0)
- ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
- up_write(&EXT4_I(inode)->xattr_sem);
+ ext4_write_unlock_xattr(inode, &no_expand);
return error;
}
@@ -1313,12 +1309,11 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
int error = 0, tried_min_extra_isize = 0;
int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
int isize_diff; /* How much do we need to grow i_extra_isize */
+ int no_expand;
+
+ if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
+ return 0;
- down_write(&EXT4_I(inode)->xattr_sem);
- /*
- * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
- */
- ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
retry:
isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
@@ -1512,8 +1507,7 @@ retry:
}
brelse(bh);
out:
- ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
- up_write(&EXT4_I(inode)->xattr_sem);
+ ext4_write_unlock_xattr(inode, &no_expand);
return 0;
cleanup:
@@ -1525,10 +1519,10 @@ cleanup:
kfree(bs);
brelse(bh);
/*
- * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
- * size expansion failed.
+ * Inode size expansion failed; don't try again
*/
- up_write(&EXT4_I(inode)->xattr_sem);
+ no_expand = 1;
+ ext4_write_unlock_xattr(inode, &no_expand);
return error;
}
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 10b0f7323ed6..cdc413476241 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -101,6 +101,38 @@ extern const struct xattr_handler ext4_xattr_security_handler;
#define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c"
+/*
+ * The EXT4_STATE_NO_EXPAND is overloaded and used for two purposes.
+ * The first is to signal that there the inline xattrs and data are
+ * taking up so much space that we might as well not keep trying to
+ * expand it. The second is that xattr_sem is taken for writing, so
+ * we shouldn't try to recurse into the inode expansion. For this
+ * second case, we need to make sure that we take save and restore the
+ * NO_EXPAND state flag appropriately.
+ */
+static inline void ext4_write_lock_xattr(struct inode *inode, int *save)
+{
+ down_write(&EXT4_I(inode)->xattr_sem);
+ *save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
+ ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
+}
+
+static inline int ext4_write_trylock_xattr(struct inode *inode, int *save)
+{
+ if (down_write_trylock(&EXT4_I(inode)->xattr_sem) == 0)
+ return 0;
+ *save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
+ ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
+ return 1;
+}
+
+static inline void ext4_write_unlock_xattr(struct inode *inode, int *save)
+{
+ if (*save == 0)
+ ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
+ up_write(&EXT4_I(inode)->xattr_sem);
+}
+
extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 3c343e922f6e..760d1ad22722 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -68,6 +68,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
.old_blkaddr = index,
.new_blkaddr = index,
.encrypted_page = NULL,
+ .is_meta = is_meta,
};
if (unlikely(!is_meta))
@@ -163,6 +164,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
REQ_RAHEAD,
.encrypted_page = NULL,
.in_list = false,
+ .is_meta = (type != META_POR),
};
struct blk_plug plug;
@@ -384,7 +386,7 @@ static int f2fs_set_meta_page_dirty(struct page *page)
if (!PageUptodate(page))
SetPageUptodate(page);
if (!PageDirty(page)) {
- f2fs_set_page_dirty_nobuffers(page);
+ __set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
SetPagePrivate(page);
f2fs_trace_pid(page);
@@ -573,13 +575,8 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
struct node_info ni;
int err = acquire_orphan_inode(sbi);
- if (err) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_msg(sbi->sb, KERN_WARNING,
- "%s: orphan failed (ino=%x), run fsck to fix.",
- __func__, ino);
- return err;
- }
+ if (err)
+ goto err_out;
__add_ino_entry(sbi, ino, 0, ORPHAN_INO);
@@ -593,6 +590,11 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
return PTR_ERR(inode);
}
+ err = dquot_initialize(inode);
+ if (err)
+ goto err_out;
+
+ dquot_initialize(inode);
clear_nlink(inode);
/* truncate all the data during iput */
@@ -602,14 +604,18 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
/* ENOMEM was fully retried in f2fs_evict_inode. */
if (ni.blk_addr != NULL_ADDR) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_msg(sbi->sb, KERN_WARNING,
- "%s: orphan failed (ino=%x) by kernel, retry mount.",
- __func__, ino);
- return -EIO;
+ err = -EIO;
+ goto err_out;
}
__remove_ino_entry(sbi, ino, ORPHAN_INO);
return 0;
+
+err_out:
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: orphan failed (ino=%x), run fsck to fix.",
+ __func__, ino);
+ return err;
}
int recover_orphan_inodes(struct f2fs_sb_info *sbi)
@@ -1140,6 +1146,8 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (cpc->reason & CP_TRIMMED)
__set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
if (cpc->reason & CP_UMOUNT)
__set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
@@ -1166,6 +1174,39 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
spin_unlock_irqrestore(&sbi->cp_lock, flags);
}
+static void commit_checkpoint(struct f2fs_sb_info *sbi,
+ void *src, block_t blk_addr)
+{
+ struct writeback_control wbc = {
+ .for_reclaim = 0,
+ };
+
+ /*
+ * pagevec_lookup_tag and lock_page again will take
+ * some extra time. Therefore, update_meta_pages and
+ * sync_meta_pages are combined in this function.
+ */
+ struct page *page = grab_meta_page(sbi, blk_addr);
+ int err;
+
+ memcpy(page_address(page), src, PAGE_SIZE);
+ set_page_dirty(page);
+
+ f2fs_wait_on_page_writeback(page, META, true);
+ f2fs_bug_on(sbi, PageWriteback(page));
+ if (unlikely(!clear_page_dirty_for_io(page)))
+ f2fs_bug_on(sbi, 1);
+
+ /* writeout cp pack 2 page */
+ err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
+ f2fs_bug_on(sbi, err);
+
+ f2fs_put_page(page, 0);
+
+ /* submit checkpoint (with barrier if NOBARRIER is not set) */
+ f2fs_submit_merged_write(sbi, META_FLUSH);
+}
+
static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
@@ -1268,16 +1309,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
}
}
- /* need to wait for end_io results */
- wait_on_all_pages_writeback(sbi);
- if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
-
- /* flush all device cache */
- err = f2fs_flush_device_cache(sbi);
- if (err)
- return err;
-
/* write out checkpoint buffer at block 0 */
update_meta_page(sbi, ckpt, start_blk++);
@@ -1305,26 +1336,26 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
start_blk += NR_CURSEG_NODE_TYPE;
}
- /* writeout checkpoint block */
- update_meta_page(sbi, ckpt, start_blk);
+ /* update user_block_counts */
+ sbi->last_valid_block_count = sbi->total_valid_block_count;
+ percpu_counter_set(&sbi->alloc_valid_block_count, 0);
+
+ /* Here, we have one bio having CP pack except cp pack 2 page */
+ sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
- /* wait for previous submitted node/meta pages writeback */
+ /* wait for previous submitted meta pages writeback */
wait_on_all_pages_writeback(sbi);
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LLONG_MAX);
- filemap_fdatawait_range(META_MAPPING(sbi), 0, LLONG_MAX);
-
- /* update user_block_counts */
- sbi->last_valid_block_count = sbi->total_valid_block_count;
- percpu_counter_set(&sbi->alloc_valid_block_count, 0);
-
- /* Here, we only have one bio having CP pack */
- sync_meta_pages(sbi, META_FLUSH, LONG_MAX, FS_CP_META_IO);
+ /* flush all device cache */
+ err = f2fs_flush_device_cache(sbi);
+ if (err)
+ return err;
- /* wait for previous submitted meta pages writeback */
+ /* barrier and flush checkpoint cp pack 2 page if it can */
+ commit_checkpoint(sbi, ckpt, start_blk);
wait_on_all_pages_writeback(sbi);
release_ino_entry(sbi, false);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index d5299265feea..a670702cf3ff 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -19,8 +19,6 @@
#include <linux/bio.h>
#include <linux/prefetch.h>
#include <linux/uio.h>
-#include <linux/mm.h>
-#include <linux/memcontrol.h>
#include <linux/cleancache.h>
#include "f2fs.h"
@@ -30,6 +28,11 @@
#include <trace/events/f2fs.h>
#include <trace/events/android_fs.h>
+#define NUM_PREALLOC_POST_READ_CTXS 128
+
+static struct kmem_cache *bio_post_read_ctx_cache;
+static mempool_t *bio_post_read_ctx_pool;
+
static bool __is_cp_guaranteed(struct page *page)
{
struct address_space *mapping = page->mapping;
@@ -50,11 +53,77 @@ static bool __is_cp_guaranteed(struct page *page)
return false;
}
-static void f2fs_read_end_io(struct bio *bio)
+/* postprocessing steps for read bios */
+enum bio_post_read_step {
+ STEP_INITIAL = 0,
+ STEP_DECRYPT,
+};
+
+struct bio_post_read_ctx {
+ struct bio *bio;
+ struct work_struct work;
+ unsigned int cur_step;
+ unsigned int enabled_steps;
+};
+
+static void __read_end_io(struct bio *bio)
{
- struct bio_vec *bvec;
+ struct page *page;
+ struct bio_vec *bv;
int i;
+ bio_for_each_segment_all(bv, bio, i) {
+ page = bv->bv_page;
+
+ /* PG_error was set if any post_read step failed */
+ if (bio->bi_error || PageError(page)) {
+ ClearPageUptodate(page);
+ SetPageError(page);
+ } else {
+ SetPageUptodate(page);
+ }
+ unlock_page(page);
+ }
+ if (bio->bi_private)
+ mempool_free(bio->bi_private, bio_post_read_ctx_pool);
+ bio_put(bio);
+}
+
+static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
+
+static void decrypt_work(struct work_struct *work)
+{
+ struct bio_post_read_ctx *ctx =
+ container_of(work, struct bio_post_read_ctx, work);
+
+ fscrypt_decrypt_bio(ctx->bio);
+
+ bio_post_read_processing(ctx);
+}
+
+static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
+{
+ switch (++ctx->cur_step) {
+ case STEP_DECRYPT:
+ if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
+ INIT_WORK(&ctx->work, decrypt_work);
+ fscrypt_enqueue_decrypt_work(&ctx->work);
+ return;
+ }
+ ctx->cur_step++;
+ /* fall-through */
+ default:
+ __read_end_io(ctx->bio);
+ }
+}
+
+static bool f2fs_bio_post_read_required(struct bio *bio)
+{
+ return bio->bi_private && !bio->bi_error;
+}
+
+static void f2fs_read_end_io(struct bio *bio)
+{
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
f2fs_show_injection_info(FAULT_IO);
@@ -62,28 +131,15 @@ static void f2fs_read_end_io(struct bio *bio)
}
#endif
- if (f2fs_bio_encrypted(bio)) {
- if (bio->bi_error) {
- fscrypt_release_ctx(bio->bi_private);
- } else {
- fscrypt_decrypt_bio_pages(bio->bi_private, bio);
- return;
- }
- }
-
- bio_for_each_segment_all(bvec, bio, i) {
- struct page *page = bvec->bv_page;
+ if (f2fs_bio_post_read_required(bio)) {
+ struct bio_post_read_ctx *ctx = bio->bi_private;
- if (!bio->bi_error) {
- if (!PageUptodate(page))
- SetPageUptodate(page);
- } else {
- ClearPageUptodate(page);
- SetPageError(page);
- }
- unlock_page(page);
+ ctx->cur_step = STEP_INITIAL;
+ bio_post_read_processing(ctx);
+ return;
}
- bio_put(bio);
+
+ __read_end_io(bio);
}
static void f2fs_write_end_io(struct bio *bio)
@@ -174,15 +230,22 @@ static bool __same_bdev(struct f2fs_sb_info *sbi,
*/
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
struct writeback_control *wbc,
- int npages, bool is_read)
+ int npages, bool is_read,
+ enum page_type type, enum temp_type temp)
{
struct bio *bio;
bio = f2fs_bio_alloc(sbi, npages, true);
f2fs_target_device(sbi, blk_addr, bio);
- bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
- bio->bi_private = is_read ? NULL : sbi;
+ if (is_read) {
+ bio->bi_end_io = f2fs_read_end_io;
+ bio->bi_private = NULL;
+ } else {
+ bio->bi_end_io = f2fs_write_end_io;
+ bio->bi_private = sbi;
+ bio->bi_write_hint = io_type_to_rw_hint(sbi, type, temp);
+ }
if (wbc)
wbc_init_bio(wbc, bio);
@@ -195,13 +258,12 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
if (!is_read_io(bio_op(bio))) {
unsigned int start;
- if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
- current->plug && (type == DATA || type == NODE))
- blk_finish_plug(current->plug);
-
if (type != DATA && type != NODE)
goto submit_io;
+ if (f2fs_sb_has_blkzoned(sbi->sb) && current->plug)
+ blk_finish_plug(current->plug);
+
start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
start %= F2FS_IO_SIZE(sbi);
@@ -376,12 +438,13 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
+ verify_block_addr(fio, fio->new_blkaddr);
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);
/* Allocate a new bio */
bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
- 1, is_read_io(fio->op));
+ 1, is_read_io(fio->op), fio->type, fio->temp);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
@@ -421,8 +484,8 @@ next:
}
if (fio->old_blkaddr != NEW_ADDR)
- verify_block_addr(sbi, fio->old_blkaddr);
- verify_block_addr(sbi, fio->new_blkaddr);
+ verify_block_addr(fio, fio->old_blkaddr);
+ verify_block_addr(fio, fio->new_blkaddr);
bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
@@ -444,7 +507,8 @@ alloc_new:
goto out_fail;
}
io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
- BIO_MAX_PAGES, false);
+ BIO_MAX_PAGES, false,
+ fio->type, fio->temp);
io->fio = *fio;
}
@@ -472,29 +536,33 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
unsigned nr_pages)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct fscrypt_ctx *ctx = NULL;
struct bio *bio;
-
- if (f2fs_encrypted_file(inode)) {
- ctx = fscrypt_get_ctx(inode, GFP_NOFS);
- if (IS_ERR(ctx))
- return ERR_CAST(ctx);
-
- /* wait the page to be moved by cleaning */
- f2fs_wait_on_block_writeback(sbi, blkaddr);
- }
+ struct bio_post_read_ctx *ctx;
+ unsigned int post_read_steps = 0;
bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
- if (!bio) {
- if (ctx)
- fscrypt_release_ctx(ctx);
+ if (!bio)
return ERR_PTR(-ENOMEM);
- }
f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
- bio->bi_private = ctx;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ if (f2fs_encrypted_file(inode))
+ post_read_steps |= 1 << STEP_DECRYPT;
+ if (post_read_steps) {
+ ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
+ if (!ctx) {
+ bio_put(bio);
+ return ERR_PTR(-ENOMEM);
+ }
+ ctx->bio = bio;
+ ctx->enabled_steps = post_read_steps;
+ bio->bi_private = ctx;
+
+ /* wait the page to be moved by cleaning */
+ f2fs_wait_on_block_writeback(sbi, blkaddr);
+ }
+
return bio;
}
@@ -831,13 +899,6 @@ alloc:
return 0;
}
-static inline bool __force_buffered_io(struct inode *inode, int rw)
-{
- return (f2fs_encrypted_file(inode) ||
- (rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
- F2FS_I_SB(inode)->s_ndevs);
-}
-
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
@@ -868,9 +929,8 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
map.m_seg_type = NO_CHECK_TYPE;
if (direct_io) {
- /* map.m_seg_type = rw_hint_to_seg_type(iocb->ki_hint); */
- map.m_seg_type = rw_hint_to_seg_type(WRITE_LIFE_NOT_SET);
- flag = __force_buffered_io(inode, WRITE) ?
+ map.m_seg_type = rw_hint_to_seg_type(iocb->ki_hint);
+ flag = f2fs_force_buffered_io(inode, WRITE) ?
F2FS_GET_BLOCK_PRE_AIO :
F2FS_GET_BLOCK_PRE_DIO;
goto map_blocks;
@@ -1114,6 +1174,31 @@ out:
return err;
}
+bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
+{
+ struct f2fs_map_blocks map;
+ block_t last_lblk;
+ int err;
+
+ if (pos + len > i_size_read(inode))
+ return false;
+
+ map.m_lblk = F2FS_BYTES_TO_BLK(pos);
+ map.m_next_pgofs = NULL;
+ map.m_next_extent = NULL;
+ map.m_seg_type = NO_CHECK_TYPE;
+ last_lblk = F2FS_BLK_ALIGN(pos + len);
+
+ while (map.m_lblk < last_lblk) {
+ map.m_len = last_lblk - map.m_lblk;
+ err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
+ if (err || map.m_len == 0)
+ return false;
+ map.m_lblk += map.m_len;
+ }
+ return true;
+}
+
static int __get_data_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create, int flag,
pgoff_t *next_pgofs, int seg_type)
@@ -1151,8 +1236,7 @@ static int get_data_block_dio(struct inode *inode, sector_t iblock,
return __get_data_block(inode, iblock, bh_result, create,
F2FS_GET_BLOCK_DEFAULT, NULL,
rw_hint_to_seg_type(
- WRITE_LIFE_NOT_SET));
- /* inode->i_write_hint)); */
+ inode->i_write_hint));
}
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
@@ -1499,7 +1583,7 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
if (!f2fs_encrypted_file(inode))
return 0;
- /* wait for GCed encrypted page writeback */
+ /* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
retry_encrypt:
@@ -1649,6 +1733,7 @@ got_it:
goto out_writepage;
set_page_writeback(page);
+ ClearPageError(page);
f2fs_put_dnode(&dn);
if (fio->need_lock == LOCK_REQ)
f2fs_unlock_op(fio->sbi);
@@ -1671,6 +1756,7 @@ got_it:
goto out_writepage;
set_page_writeback(page);
+ ClearPageError(page);
/* LFS mode write path */
write_data_page(&dn, fio);
@@ -2211,8 +2297,8 @@ repeat:
f2fs_wait_on_page_writeback(page, DATA, false);
- /* wait for GCed encrypted page writeback */
- if (f2fs_encrypted_file(inode))
+ /* wait for GCed page writeback via META_MAPPING */
+ if (f2fs_post_read_required(inode))
f2fs_wait_on_block_writeback(sbi, blkaddr);
if (len == PAGE_SIZE || PageUptodate(page))
@@ -2304,15 +2390,18 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
{
struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
size_t count = iov_iter_count(iter);
int rw = iov_iter_rw(iter);
int err;
+ enum rw_hint hint = iocb->ki_hint;
+ int whint_mode = F2FS_OPTION(sbi).whint_mode;
err = check_direct_IO(inode, iter, offset);
if (err)
return err;
- if (__force_buffered_io(inode, rw))
+ if (f2fs_force_buffered_io(inode, rw))
return 0;
if (trace_android_fs_dataread_start_enabled() &&
@@ -2339,11 +2428,24 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
}
trace_f2fs_direct_IO_enter(inode, offset, count, rw);
- down_read(&F2FS_I(inode)->dio_rwsem[rw]);
+ if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
+ iocb->ki_hint = WRITE_LIFE_NOT_SET;
+
+ if (!down_read_trylock(&F2FS_I(inode)->dio_rwsem[rw])) {
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ iocb->ki_hint = hint;
+ err = -EAGAIN;
+ goto out;
+ }
+ down_read(&F2FS_I(inode)->dio_rwsem[rw]);
+ }
+
err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
up_read(&F2FS_I(inode)->dio_rwsem[rw]);
if (rw == WRITE) {
+ if (whint_mode == WHINT_MODE_OFF)
+ iocb->ki_hint = hint;
if (err > 0) {
f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
err);
@@ -2352,7 +2454,7 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
f2fs_write_failed(mapping, offset + count);
}
}
-
+out:
if (trace_android_fs_dataread_start_enabled() &&
(iov_iter_rw(iter) == READ))
trace_android_fs_dataread_end(inode, offset, count);
@@ -2409,37 +2511,6 @@ int f2fs_release_page(struct page *page, gfp_t wait)
return 1;
}
-/*
- * This was copied from __set_page_dirty_buffers which gives higher performance
- * in very high speed storages. (e.g., pmem)
- */
-void f2fs_set_page_dirty_nobuffers(struct page *page)
-{
- struct address_space *mapping = page->mapping;
- struct mem_cgroup *memcg;
- unsigned long flags;
-
- if (unlikely(!mapping))
- return;
-
- spin_lock(&mapping->private_lock);
- memcg = mem_cgroup_begin_page_stat(page);
- SetPageDirty(page);
- spin_unlock(&mapping->private_lock);
-
- spin_lock_irqsave(&mapping->tree_lock, flags);
- WARN_ON_ONCE(!PageUptodate(page));
- account_page_dirtied(page, mapping, memcg);
- radix_tree_tag_set(&mapping->page_tree,
- page_index(page), PAGECACHE_TAG_DIRTY);
- spin_unlock_irqrestore(&mapping->tree_lock, flags);
-
- mem_cgroup_end_page_stat(memcg);
-
- __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- return;
-}
-
static int f2fs_set_data_page_dirty(struct page *page)
{
struct address_space *mapping = page->mapping;
@@ -2463,7 +2534,7 @@ static int f2fs_set_data_page_dirty(struct page *page)
}
if (!PageDirty(page)) {
- f2fs_set_page_dirty_nobuffers(page);
+ __set_page_dirty_nobuffers(page);
update_dirty_page(inode, page);
return 1;
}
@@ -2556,3 +2627,27 @@ const struct address_space_operations f2fs_dblock_aops = {
.migratepage = f2fs_migrate_page,
#endif
};
+
+int __init f2fs_init_post_read_processing(void)
+{
+ bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, 0);
+ if (!bio_post_read_ctx_cache)
+ goto fail;
+ bio_post_read_ctx_pool =
+ mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
+ bio_post_read_ctx_cache);
+ if (!bio_post_read_ctx_pool)
+ goto fail_free_cache;
+ return 0;
+
+fail_free_cache:
+ kmem_cache_destroy(bio_post_read_ctx_cache);
+fail:
+ return -ENOMEM;
+}
+
+void __exit f2fs_destroy_post_read_processing(void)
+{
+ mempool_destroy(bio_post_read_ctx_pool);
+ kmem_cache_destroy(bio_post_read_ctx_cache);
+}
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 560b707050ca..41d32171bd52 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -361,6 +361,7 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
struct page *dpage)
{
struct page *page;
+ int dummy_encrypt = DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(dir));
int err;
if (is_inode_flag_set(inode, FI_NEW_INODE)) {
@@ -387,7 +388,8 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
if (err)
goto put_error;
- if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode)) {
+ if ((f2fs_encrypted_inode(dir) || dummy_encrypt) &&
+ f2fs_may_encrypt(inode)) {
err = fscrypt_inherit_context(dir, inode, page, false);
if (err)
goto put_error;
@@ -396,8 +398,6 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
page = get_node_page(F2FS_I_SB(dir), inode->i_ino);
if (IS_ERR(page))
return page;
-
- set_cold_node(inode, page);
}
if (new_name) {
@@ -704,7 +704,8 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
- add_ino_entry(F2FS_I_SB(dir), dir->i_ino, TRANS_DIR_INO);
+ if (F2FS_OPTION(F2FS_I_SB(dir)).fsync_mode == FSYNC_MODE_STRICT)
+ add_ino_entry(F2FS_I_SB(dir), dir->i_ino, TRANS_DIR_INO);
if (f2fs_has_inline_dentry(dir))
return f2fs_delete_inline_entry(dentry, page, dir, inode);
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index ff2352a0ed15..d5a861bf2b42 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -460,7 +460,7 @@ static struct extent_node *__insert_extent_tree(struct inode *inode,
struct rb_node *insert_parent)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct rb_node **p = &et->root.rb_node;
+ struct rb_node **p;
struct rb_node *parent = NULL;
struct extent_node *en = NULL;
@@ -706,6 +706,9 @@ void f2fs_drop_extent_tree(struct inode *inode)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et = F2FS_I(inode)->extent_tree;
+ if (!f2fs_may_extent_tree(inode))
+ return;
+
set_inode_flag(inode, FI_NO_EXTENT);
write_lock(&et->lock);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 34595ca42f68..d0bfcfed35e2 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -99,9 +99,10 @@ extern char *fault_name[FAULT_MAX];
#define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
#define F2FS_MOUNT_RESERVE_ROOT 0x01000000
-#define clear_opt(sbi, option) ((sbi)->mount_opt.opt &= ~F2FS_MOUNT_##option)
-#define set_opt(sbi, option) ((sbi)->mount_opt.opt |= F2FS_MOUNT_##option)
-#define test_opt(sbi, option) ((sbi)->mount_opt.opt & F2FS_MOUNT_##option)
+#define F2FS_OPTION(sbi) ((sbi)->mount_opt)
+#define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
+#define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
+#define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
#define ver_after(a, b) (typecheck(unsigned long long, a) && \
typecheck(unsigned long long, b) && \
@@ -114,7 +115,26 @@ typedef u32 block_t; /*
typedef u32 nid_t;
struct f2fs_mount_info {
- unsigned int opt;
+ unsigned int opt;
+ int write_io_size_bits; /* Write IO size bits */
+ block_t root_reserved_blocks; /* root reserved blocks */
+ kuid_t s_resuid; /* reserved blocks for uid */
+ kgid_t s_resgid; /* reserved blocks for gid */
+ int active_logs; /* # of active logs */
+ int inline_xattr_size; /* inline xattr size */
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ struct f2fs_fault_info fault_info; /* For fault injection */
+#endif
+#ifdef CONFIG_QUOTA
+ /* Names of quota files with journalled quota */
+ char *s_qf_names[MAXQUOTAS];
+ int s_jquota_fmt; /* Format of quota to use */
+#endif
+ /* For which write hints are passed down to block layer */
+ int whint_mode;
+ int alloc_mode; /* segment allocation policy */
+ int fsync_mode; /* fsync policy */
+ bool test_dummy_encryption; /* test dummy encryption */
};
#define F2FS_FEATURE_ENCRYPT 0x0001
@@ -126,6 +146,8 @@ struct f2fs_mount_info {
#define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040
#define F2FS_FEATURE_QUOTA_INO 0x0080
#define F2FS_FEATURE_INODE_CRTIME 0x0100
+#define F2FS_FEATURE_LOST_FOUND 0x0200
+#define F2FS_FEATURE_VERITY 0x0400 /* reserved */
#define F2FS_HAS_FEATURE(sb, mask) \
((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -509,7 +531,7 @@ static inline void make_dentry_ptr_block(struct inode *inode,
d->inode = inode;
d->max = NR_DENTRY_IN_BLOCK;
d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
- d->bitmap = &t->dentry_bitmap;
+ d->bitmap = t->dentry_bitmap;
d->dentry = t->dentry;
d->filename = t->filename;
}
@@ -635,6 +657,8 @@ enum {
#define FADVISE_ENCRYPT_BIT 0x04
#define FADVISE_ENC_NAME_BIT 0x08
#define FADVISE_KEEP_SIZE_BIT 0x10
+#define FADVISE_HOT_BIT 0x20
+#define FADVISE_VERITY_BIT 0x40 /* reserved */
#define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
@@ -649,6 +673,9 @@ enum {
#define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
#define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT)
#define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
+#define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT)
+#define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT)
+#define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT)
#define DEF_DIR_LEVEL 0
@@ -696,6 +723,7 @@ struct f2fs_inode_info {
kprojid_t i_projid; /* id for project quota */
int i_inline_xattr_size; /* inline xattr size */
struct timespec i_crtime; /* inode creation time */
+ struct timespec i_disk_time[4]; /* inode disk times */
};
static inline void get_extent_info(struct extent_info *ext,
@@ -802,7 +830,7 @@ struct f2fs_nm_info {
unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */
spinlock_t nid_list_lock; /* protect nid lists ops */
struct mutex build_lock; /* lock for build free nids */
- unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE];
+ unsigned char **free_nid_bitmap;
unsigned char *nat_block_bitmap;
unsigned short *free_nid_count; /* free nid count of NAT block */
@@ -1035,6 +1063,7 @@ struct f2fs_io_info {
bool submitted; /* indicate IO submission */
int need_lock; /* indicate we need to lock cp_rwsem */
bool in_list; /* indicate fio is in io_list */
+ bool is_meta; /* indicate borrow meta inode mapping or not */
enum iostat_type io_type; /* io type */
struct writeback_control *io_wbc; /* writeback control */
};
@@ -1096,10 +1125,34 @@ enum {
MAX_TIME,
};
+enum {
+ WHINT_MODE_OFF, /* not pass down write hints */
+ WHINT_MODE_USER, /* try to pass down hints given by users */
+ WHINT_MODE_FS, /* pass down hints with F2FS policy */
+};
+
+enum {
+ ALLOC_MODE_DEFAULT, /* stay default */
+ ALLOC_MODE_REUSE, /* reuse segments as much as possible */
+};
+
+enum fsync_mode {
+ FSYNC_MODE_POSIX, /* fsync follows posix semantics */
+ FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */
+};
+
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#define DUMMY_ENCRYPTION_ENABLED(sbi) \
+ (unlikely(F2FS_OPTION(sbi).test_dummy_encryption))
+#else
+#define DUMMY_ENCRYPTION_ENABLED(sbi) (0)
+#endif
+
struct f2fs_sb_info {
struct super_block *sb; /* pointer to VFS super block */
struct proc_dir_entry *s_proc; /* proc entry */
struct f2fs_super_block *raw_super; /* raw super block pointer */
+ struct rw_semaphore sb_lock; /* lock for raw super block */
int valid_super_block; /* valid super block no */
unsigned long s_flag; /* flags for sbi */
@@ -1119,7 +1172,6 @@ struct f2fs_sb_info {
struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
struct mutex wio_mutex[NR_PAGE_TYPE - 1][NR_TEMP_TYPE];
/* bio ordering for NODE/DATA */
- int write_io_size_bits; /* Write IO size bits */
mempool_t *write_io_dummy; /* Dummy pages */
/* for checkpoint */
@@ -1169,9 +1221,7 @@ struct f2fs_sb_info {
unsigned int total_node_count; /* total node block count */
unsigned int total_valid_node_count; /* valid node block count */
loff_t max_file_blocks; /* max block index of file */
- int active_logs; /* # of active logs */
int dir_level; /* directory level */
- int inline_xattr_size; /* inline xattr size */
unsigned int trigger_ssr_threshold; /* threshold to trigger ssr */
int readdir_ra; /* readahead inode in readdir */
@@ -1181,9 +1231,6 @@ struct f2fs_sb_info {
block_t last_valid_block_count; /* for recovery */
block_t reserved_blocks; /* configurable reserved blocks */
block_t current_reserved_blocks; /* current reserved blocks */
- block_t root_reserved_blocks; /* root reserved blocks */
- kuid_t s_resuid; /* reserved blocks for uid */
- kgid_t s_resgid; /* reserved blocks for gid */
unsigned int nquota_files; /* # of quota sysfile */
@@ -1268,17 +1315,6 @@ struct f2fs_sb_info {
/* Precomputed FS UUID checksum for seeding other checksums */
__u32 s_chksum_seed;
-
- /* For fault injection */
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- struct f2fs_fault_info fault_info;
-#endif
-
-#ifdef CONFIG_QUOTA
- /* Names of quota files with journalled quota */
- char *s_qf_names[MAXQUOTAS];
- int s_jquota_fmt; /* Format of quota to use */
-#endif
};
#ifdef CONFIG_F2FS_FAULT_INJECTION
@@ -1288,7 +1324,7 @@ struct f2fs_sb_info {
__func__, __builtin_return_address(0))
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
{
- struct f2fs_fault_info *ffi = &sbi->fault_info;
+ struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
if (!ffi->inject_rate)
return false;
@@ -1637,7 +1673,7 @@ static inline bool f2fs_has_xattr_block(unsigned int ofs)
}
static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
- struct inode *inode)
+ struct inode *inode, bool cap)
{
if (!inode)
return true;
@@ -1645,12 +1681,12 @@ static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
return false;
if (IS_NOQUOTA(inode))
return true;
- if (capable(CAP_SYS_RESOURCE))
+ if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
return true;
- if (uid_eq(sbi->s_resuid, current_fsuid()))
+ if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
+ in_group_p(F2FS_OPTION(sbi).s_resgid))
return true;
- if (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) &&
- in_group_p(sbi->s_resgid))
+ if (cap && capable(CAP_SYS_RESOURCE))
return true;
return false;
}
@@ -1685,8 +1721,8 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
avail_user_block_count = sbi->user_block_count -
sbi->current_reserved_blocks;
- if (!__allow_reserved_blocks(sbi, inode))
- avail_user_block_count -= sbi->root_reserved_blocks;
+ if (!__allow_reserved_blocks(sbi, inode, true))
+ avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
diff = sbi->total_valid_block_count - avail_user_block_count;
@@ -1821,6 +1857,12 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
int offset;
+ if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
+ offset = (flag == SIT_BITMAP) ?
+ le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
+ return &ckpt->sit_nat_version_bitmap + offset;
+ }
+
if (__cp_payload(sbi) > 0) {
if (flag == NAT_BITMAP)
return &ckpt->sit_nat_version_bitmap;
@@ -1886,8 +1928,8 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
valid_block_count = sbi->total_valid_block_count +
sbi->current_reserved_blocks + 1;
- if (!__allow_reserved_blocks(sbi, inode))
- valid_block_count += sbi->root_reserved_blocks;
+ if (!__allow_reserved_blocks(sbi, inode, false))
+ valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
if (unlikely(valid_block_count > sbi->user_block_count)) {
spin_unlock(&sbi->stat_lock);
@@ -2489,7 +2531,17 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
}
if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
file_keep_isize(inode) ||
- i_size_read(inode) & PAGE_MASK)
+ i_size_read(inode) & ~PAGE_MASK)
+ return false;
+
+ if (!timespec_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
+ return false;
+ if (!timespec_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
+ return false;
+ if (!timespec_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
+ return false;
+ if (!timespec_equal(F2FS_I(inode)->i_disk_time + 3,
+ &F2FS_I(inode)->i_crtime))
return false;
down_read(&F2FS_I(inode)->i_sem);
@@ -2499,8 +2551,7 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
return ret;
}
-#define sb_rdonly f2fs_readonly
-static inline int f2fs_readonly(struct super_block *sb)
+static inline bool f2fs_readonly(struct super_block *sb)
{
return sb->s_flags & MS_RDONLY;
}
@@ -2562,15 +2613,6 @@ static inline void *kvzalloc(size_t size, gfp_t flags)
return ret;
}
-enum rw_hint {
- WRITE_LIFE_NOT_SET = 0,
- WRITE_LIFE_NONE = 1, /* RWH_WRITE_LIFE_NONE */
- WRITE_LIFE_SHORT = 2, /* RWH_WRITE_LIFE_SHORT */
- WRITE_LIFE_MEDIUM = 3, /* RWH_WRITE_LIFE_MEDIUM */
- WRITE_LIFE_LONG = 4, /* RWH_WRITE_LIFE_LONG */
- WRITE_LIFE_EXTREME = 5, /* RWH_WRITE_LIFE_EXTREME */
-};
-
static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags)
{
@@ -2679,6 +2721,8 @@ void handle_failed_inode(struct inode *inode);
/*
* namei.c
*/
+int update_extension_list(struct f2fs_sb_info *sbi, const char *name,
+ bool hot, bool set);
struct dentry *f2fs_get_parent(struct dentry *child);
/*
@@ -2851,6 +2895,8 @@ void destroy_segment_manager(struct f2fs_sb_info *sbi);
int __init create_segment_manager_caches(void);
void destroy_segment_manager_caches(void);
int rw_hint_to_seg_type(enum rw_hint hint);
+enum rw_hint io_type_to_rw_hint(struct f2fs_sb_info *sbi, enum page_type type,
+ enum temp_type temp);
/*
* checkpoint.c
@@ -2891,6 +2937,8 @@ void destroy_checkpoint_caches(void);
/*
* data.c
*/
+int f2fs_init_post_read_processing(void);
+void f2fs_destroy_post_read_processing(void);
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
struct inode *inode, nid_t ino, pgoff_t idx,
@@ -2922,7 +2970,6 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
bool should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
bool should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
-void f2fs_set_page_dirty_nobuffers(struct page *page);
int __f2fs_write_data_pages(struct address_space *mapping,
struct writeback_control *wbc,
enum iostat_type io_type);
@@ -2933,6 +2980,7 @@ int f2fs_release_page(struct page *page, gfp_t wait);
int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
struct page *page, enum migrate_mode mode);
#endif
+bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
/*
* gc.c
@@ -3250,50 +3298,30 @@ static inline void f2fs_set_encrypted_inode(struct inode *inode)
#endif
}
-static inline bool f2fs_bio_encrypted(struct bio *bio)
-{
- return bio->bi_private != NULL;
-}
-
-static inline int f2fs_sb_has_crypto(struct super_block *sb)
-{
- return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_ENCRYPT);
-}
-
-static inline int f2fs_sb_mounted_blkzoned(struct super_block *sb)
-{
- return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_BLKZONED);
-}
-
-static inline int f2fs_sb_has_extra_attr(struct super_block *sb)
-{
- return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_EXTRA_ATTR);
-}
-
-static inline int f2fs_sb_has_project_quota(struct super_block *sb)
-{
- return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_PRJQUOTA);
-}
-
-static inline int f2fs_sb_has_inode_chksum(struct super_block *sb)
-{
- return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_INODE_CHKSUM);
-}
-
-static inline int f2fs_sb_has_flexible_inline_xattr(struct super_block *sb)
+/*
+ * Returns true if the reads of the inode's data need to undergo some
+ * postprocessing step, like decryption or authenticity verification.
+ */
+static inline bool f2fs_post_read_required(struct inode *inode)
{
- return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR);
+ return f2fs_encrypted_file(inode);
}
-static inline int f2fs_sb_has_quota_ino(struct super_block *sb)
-{
- return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_QUOTA_INO);
+#define F2FS_FEATURE_FUNCS(name, flagname) \
+static inline int f2fs_sb_has_##name(struct super_block *sb) \
+{ \
+ return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_##flagname); \
}
-static inline int f2fs_sb_has_inode_crtime(struct super_block *sb)
-{
- return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_INODE_CRTIME);
-}
+F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
+F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
+F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
+F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
+F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
+F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
+F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
+F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
+F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
#ifdef CONFIG_BLK_DEV_ZONED
static inline int get_blkz_type(struct f2fs_sb_info *sbi,
@@ -3313,7 +3341,7 @@ static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
{
struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
- return blk_queue_discard(q) || f2fs_sb_mounted_blkzoned(sbi->sb);
+ return blk_queue_discard(q) || f2fs_sb_has_blkzoned(sbi->sb);
}
static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
@@ -3342,4 +3370,11 @@ static inline bool f2fs_may_encrypt(struct inode *inode)
#endif
}
+static inline bool f2fs_force_buffered_io(struct inode *inode, int rw)
+{
+ return (f2fs_post_read_required(inode) ||
+ (rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
+ F2FS_I_SB(inode)->s_ndevs);
+}
+
#endif
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 65cda5bc61b7..7587758a285f 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -113,8 +113,8 @@ mapped:
/* fill the page */
f2fs_wait_on_page_writeback(page, DATA, false);
- /* wait for GCed encrypted page writeback */
- if (f2fs_encrypted_file(inode))
+ /* wait for GCed page writeback via META_MAPPING */
+ if (f2fs_post_read_required(inode))
f2fs_wait_on_block_writeback(sbi, dn.data_blkaddr);
out_sem:
@@ -166,9 +166,10 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
cp_reason = CP_NODE_NEED_CP;
else if (test_opt(sbi, FASTBOOT))
cp_reason = CP_FASTBOOT_MODE;
- else if (sbi->active_logs == 2)
+ else if (F2FS_OPTION(sbi).active_logs == 2)
cp_reason = CP_SPEC_LOG_NUM;
- else if (need_dentry_mark(sbi, inode->i_ino) &&
+ else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
+ need_dentry_mark(sbi, inode->i_ino) &&
exist_written_data(sbi, F2FS_I(inode)->i_pino, TRANS_DIR_INO))
cp_reason = CP_RECOVER_DIR;
@@ -481,6 +482,9 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
if (err)
return err;
+
+ filp->f_mode |= FMODE_NOWAIT;
+
return dquot_file_open(inode, filp);
}
@@ -571,7 +575,6 @@ truncate_out:
int truncate_blocks(struct inode *inode, u64 from, bool lock)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- unsigned int blocksize = inode->i_sb->s_blocksize;
struct dnode_of_data dn;
pgoff_t free_from;
int count = 0, err = 0;
@@ -580,7 +583,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
trace_f2fs_truncate_blocks_enter(inode, from);
- free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
+ free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
if (free_from >= sbi->max_file_blocks)
goto free_partial;
@@ -1354,8 +1357,12 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
}
out:
- if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
- f2fs_i_size_write(inode, new_size);
+ if (new_size > i_size_read(inode)) {
+ if (mode & FALLOC_FL_KEEP_SIZE)
+ file_set_keep_isize(inode);
+ else
+ f2fs_i_size_write(inode, new_size);
+ }
out_sem:
up_write(&F2FS_I(inode)->i_mmap_sem);
@@ -1709,6 +1716,8 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
inode_lock(inode);
+ down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+
if (f2fs_is_volatile_file(inode))
goto err_out;
@@ -1727,6 +1736,7 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
}
err_out:
+ up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
@@ -1936,7 +1946,7 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
- if (!f2fs_sb_has_crypto(inode->i_sb))
+ if (!f2fs_sb_has_encrypt(inode->i_sb))
return -EOPNOTSUPP;
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
@@ -1946,7 +1956,7 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
{
- if (!f2fs_sb_has_crypto(file_inode(filp)->i_sb))
+ if (!f2fs_sb_has_encrypt(file_inode(filp)->i_sb))
return -EOPNOTSUPP;
return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
}
@@ -1957,16 +1967,18 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int err;
- if (!f2fs_sb_has_crypto(inode->i_sb))
+ if (!f2fs_sb_has_encrypt(inode->i_sb))
return -EOPNOTSUPP;
- if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
- goto got_it;
-
err = mnt_want_write_file(filp);
if (err)
return err;
+ down_write(&sbi->sb_lock);
+
+ if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
+ goto got_it;
+
/* update superblock with uuid */
generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
@@ -1974,15 +1986,16 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
if (err) {
/* undo new data */
memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
- mnt_drop_write_file(filp);
- return err;
+ goto out_err;
}
- mnt_drop_write_file(filp);
got_it:
if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
16))
- return -EFAULT;
- return 0;
+ err = -EFAULT;
+out_err:
+ up_write(&sbi->sb_lock);
+ mnt_drop_write_file(filp);
+ return err;
}
static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
@@ -2043,8 +2056,10 @@ static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
return ret;
end = range.start + range.len;
- if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi))
- return -EINVAL;
+ if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) {
+ ret = -EINVAL;
+ goto out;
+ }
do_more:
if (!range.sync) {
if (!mutex_trylock(&sbi->gc_mutex)) {
@@ -2685,25 +2700,54 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
return -EIO;
- inode_lock(inode);
+ if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
+ return -EINVAL;
+
+ if (!inode_trylock(inode)) {
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ return -EAGAIN;
+ inode_lock(inode);
+ }
+
ret = generic_write_checks(iocb, from);
if (ret > 0) {
+ bool preallocated = false;
+ size_t target_size = 0;
int err;
if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
set_inode_flag(inode, FI_NO_PREALLOC);
- err = f2fs_preallocate_blocks(iocb, from);
- if (err) {
- clear_inode_flag(inode, FI_NO_PREALLOC);
- inode_unlock(inode);
- return err;
+ if ((iocb->ki_flags & IOCB_NOWAIT) &&
+ (iocb->ki_flags & IOCB_DIRECT)) {
+ if (!f2fs_overwrite_io(inode, iocb->ki_pos,
+ iov_iter_count(from)) ||
+ f2fs_has_inline_data(inode) ||
+ f2fs_force_buffered_io(inode, WRITE)) {
+ inode_unlock(inode);
+ return -EAGAIN;
+ }
+
+ } else {
+ preallocated = true;
+ target_size = iocb->ki_pos + iov_iter_count(from);
+
+ err = f2fs_preallocate_blocks(iocb, from);
+ if (err) {
+ clear_inode_flag(inode, FI_NO_PREALLOC);
+ inode_unlock(inode);
+ return err;
+ }
}
blk_start_plug(&plug);
ret = __generic_file_write_iter(iocb, from);
blk_finish_plug(&plug);
clear_inode_flag(inode, FI_NO_PREALLOC);
+ /* if we couldn't write data, we should deallocate blocks. */
+ if (preallocated && i_size_read(inode) < target_size)
+ f2fs_truncate(inode);
+
if (ret > 0)
f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index d0de3429c26c..c009b50d69f5 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -76,14 +76,15 @@ static int gc_thread_func(void *data)
* invalidated soon after by user update or deletion.
* So, I'd like to wait some time to collect dirty segments.
*/
- if (!mutex_trylock(&sbi->gc_mutex))
- goto next;
-
if (gc_th->gc_urgent) {
wait_ms = gc_th->urgent_sleep_time;
+ mutex_lock(&sbi->gc_mutex);
goto do_gc;
}
+ if (!mutex_trylock(&sbi->gc_mutex))
+ goto next;
+
if (!is_idle(sbi)) {
increase_sleep_time(gc_th, &wait_ms);
mutex_unlock(&sbi->gc_mutex);
@@ -161,12 +162,17 @@ static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
{
int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
- if (gc_th && gc_th->gc_idle) {
+ if (!gc_th)
+ return gc_mode;
+
+ if (gc_th->gc_idle) {
if (gc_th->gc_idle == 1)
gc_mode = GC_CB;
else if (gc_th->gc_idle == 2)
gc_mode = GC_GREEDY;
}
+ if (gc_th->gc_urgent)
+ gc_mode = GC_GREEDY;
return gc_mode;
}
@@ -188,11 +194,14 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
}
/* we need to check every dirty segments in the FG_GC case */
- if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
+ if (gc_type != FG_GC &&
+ (sbi->gc_thread && !sbi->gc_thread->gc_urgent) &&
+ p->max_search > sbi->max_victim_search)
p->max_search = sbi->max_victim_search;
- /* let's select beginning hot/small space first */
- if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
+ /* let's select beginning hot/small space first in no_heap mode*/
+ if (test_opt(sbi, NOHEAP) &&
+ (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
p->offset = 0;
else
p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
@@ -841,8 +850,8 @@ next_step:
if (IS_ERR(inode) || is_bad_inode(inode))
continue;
- /* if encrypted inode, let's go phase 3 */
- if (f2fs_encrypted_file(inode)) {
+ /* if inode uses special I/O path, let's go phase 3 */
+ if (f2fs_post_read_required(inode)) {
add_gc_inode(gc_list, inode);
continue;
}
@@ -890,7 +899,7 @@ next_step:
start_bidx = start_bidx_of_node(nofs, inode)
+ ofs_in_node;
- if (f2fs_encrypted_file(inode))
+ if (f2fs_post_read_required(inode))
move_data_block(inode, start_bidx, segno, off);
else
move_data_page(inode, start_bidx, gc_type,
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 37ab2159506a..922a213693c1 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -26,7 +26,7 @@ bool f2fs_may_inline_data(struct inode *inode)
if (i_size_read(inode) > MAX_INLINE_DATA(inode))
return false;
- if (f2fs_encrypted_file(inode))
+ if (f2fs_post_read_required(inode))
return false;
return true;
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 10be247ca421..51846fc54fbd 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -284,6 +284,10 @@ static int do_read_inode(struct inode *inode)
fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
}
+ F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
+ F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
+ F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
+ F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
f2fs_put_page(node_page, 1);
stat_inc_inline_xattr(inode);
@@ -439,12 +443,15 @@ void update_inode(struct inode *inode, struct page *node_page)
}
__set_inode_rdev(inode, ri);
- set_cold_node(inode, node_page);
/* deleted inode */
if (inode->i_nlink == 0)
clear_inline_node(node_page);
+ F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
+ F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
+ F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
+ F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
}
void update_inode_page(struct inode *inode)
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 6bb1adb84324..fecae8685d2a 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -78,7 +78,8 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
set_inode_flag(inode, FI_NEW_INODE);
/* If the directory encrypted, then we should encrypt the inode. */
- if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode))
+ if ((f2fs_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
+ f2fs_may_encrypt(inode))
f2fs_set_encrypted_inode(inode);
if (f2fs_sb_has_extra_attr(sbi->sb)) {
@@ -97,7 +98,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) {
f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode));
if (f2fs_has_inline_xattr(inode))
- xattr_size = sbi->inline_xattr_size;
+ xattr_size = F2FS_OPTION(sbi).inline_xattr_size;
/* Otherwise, will be 0 */
} else if (f2fs_has_inline_xattr(inode) ||
f2fs_has_inline_dentry(inode)) {
@@ -142,7 +143,7 @@ fail_drop:
return ERR_PTR(err);
}
-static int is_multimedia_file(const unsigned char *s, const char *sub)
+static int is_extension_exist(const unsigned char *s, const char *sub)
{
size_t slen = strlen(s);
size_t sublen = strlen(sub);
@@ -168,19 +169,94 @@ static int is_multimedia_file(const unsigned char *s, const char *sub)
/*
* Set multimedia files as cold files for hot/cold data separation
*/
-static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode,
+static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *inode,
const unsigned char *name)
{
- int i;
- __u8 (*extlist)[8] = sbi->raw_super->extension_list;
+ __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
+ int i, cold_count, hot_count;
+
+ down_read(&sbi->sb_lock);
+
+ cold_count = le32_to_cpu(sbi->raw_super->extension_count);
+ hot_count = sbi->raw_super->hot_ext_count;
- int count = le32_to_cpu(sbi->raw_super->extension_count);
- for (i = 0; i < count; i++) {
- if (is_multimedia_file(name, extlist[i])) {
+ for (i = 0; i < cold_count + hot_count; i++) {
+ if (!is_extension_exist(name, extlist[i]))
+ continue;
+ if (i < cold_count)
file_set_cold(inode);
- break;
- }
+ else
+ file_set_hot(inode);
+ break;
+ }
+
+ up_read(&sbi->sb_lock);
+}
+
+int update_extension_list(struct f2fs_sb_info *sbi, const char *name,
+ bool hot, bool set)
+{
+ __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
+ int cold_count = le32_to_cpu(sbi->raw_super->extension_count);
+ int hot_count = sbi->raw_super->hot_ext_count;
+ int total_count = cold_count + hot_count;
+ int start, count;
+ int i;
+
+ if (set) {
+ if (total_count == F2FS_MAX_EXTENSION)
+ return -EINVAL;
+ } else {
+ if (!hot && !cold_count)
+ return -EINVAL;
+ if (hot && !hot_count)
+ return -EINVAL;
+ }
+
+ if (hot) {
+ start = cold_count;
+ count = total_count;
+ } else {
+ start = 0;
+ count = cold_count;
+ }
+
+ for (i = start; i < count; i++) {
+ if (strcmp(name, extlist[i]))
+ continue;
+
+ if (set)
+ return -EINVAL;
+
+ memcpy(extlist[i], extlist[i + 1],
+ F2FS_EXTENSION_LEN * (total_count - i - 1));
+ memset(extlist[total_count - 1], 0, F2FS_EXTENSION_LEN);
+ if (hot)
+ sbi->raw_super->hot_ext_count = hot_count - 1;
+ else
+ sbi->raw_super->extension_count =
+ cpu_to_le32(cold_count - 1);
+ return 0;
+ }
+
+ if (!set)
+ return -EINVAL;
+
+ if (hot) {
+ strncpy(extlist[count], name, strlen(name));
+ sbi->raw_super->hot_ext_count = hot_count + 1;
+ } else {
+ char buf[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];
+
+ memcpy(buf, &extlist[cold_count],
+ F2FS_EXTENSION_LEN * hot_count);
+ memset(extlist[cold_count], 0, F2FS_EXTENSION_LEN);
+ strncpy(extlist[cold_count], name, strlen(name));
+ memcpy(&extlist[cold_count + 1], buf,
+ F2FS_EXTENSION_LEN * hot_count);
+ sbi->raw_super->extension_count = cpu_to_le32(cold_count + 1);
}
+ return 0;
}
static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
@@ -203,7 +279,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
return PTR_ERR(inode);
if (!test_opt(sbi, DISABLE_EXT_IDENTIFY))
- set_cold_files(sbi, inode, dentry->d_name.name);
+ set_file_temperature(sbi, inode, dentry->d_name.name);
inode->i_op = &f2fs_file_inode_operations;
inode->i_fop = &f2fs_file_operations;
@@ -218,8 +294,8 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
alloc_nid_done(sbi, ino);
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
@@ -478,27 +554,16 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode;
size_t len = strlen(symname);
- struct fscrypt_str disk_link = FSTR_INIT((char *)symname, len + 1);
- struct fscrypt_symlink_data *sd = NULL;
+ struct fscrypt_str disk_link;
int err;
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- if (f2fs_encrypted_inode(dir)) {
- err = fscrypt_get_encryption_info(dir);
- if (err)
- return err;
-
- if (!fscrypt_has_encryption_key(dir))
- return -ENOKEY;
-
- disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
- sizeof(struct fscrypt_symlink_data));
- }
-
- if (disk_link.len > dir->i_sb->s_blocksize)
- return -ENAMETOOLONG;
+ err = fscrypt_prepare_symlink(dir, symname, len, dir->i_sb->s_blocksize,
+ &disk_link);
+ if (err)
+ return err;
err = dquot_initialize(dir);
if (err)
@@ -508,7 +573,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
if (IS_ERR(inode))
return PTR_ERR(inode);
- if (f2fs_encrypted_inode(inode))
+ if (IS_ENCRYPTED(inode))
inode->i_op = &f2fs_encrypted_symlink_inode_operations;
else
inode->i_op = &f2fs_symlink_inode_operations;
@@ -518,44 +583,19 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
- goto out;
+ goto out_handle_failed_inode;
f2fs_unlock_op(sbi);
alloc_nid_done(sbi, inode->i_ino);
- if (f2fs_encrypted_inode(inode)) {
- struct qstr istr = QSTR_INIT(symname, len);
- struct fscrypt_str ostr;
-
- sd = f2fs_kzalloc(sbi, disk_link.len, GFP_NOFS);
- if (!sd) {
- err = -ENOMEM;
- goto err_out;
- }
-
- err = fscrypt_get_encryption_info(inode);
- if (err)
- goto err_out;
-
- if (!fscrypt_has_encryption_key(inode)) {
- err = -ENOKEY;
- goto err_out;
- }
-
- ostr.name = sd->encrypted_path;
- ostr.len = disk_link.len;
- err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
- if (err)
- goto err_out;
-
- sd->len = cpu_to_le16(ostr.len);
- disk_link.name = (char *)sd;
- }
+ err = fscrypt_encrypt_symlink(inode, symname, len, &disk_link);
+ if (err)
+ goto err_out;
err = page_symlink(inode, disk_link.name, disk_link.len);
err_out:
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
/*
* Let's flush symlink data in order to avoid broken symlink as much as
@@ -576,12 +616,14 @@ err_out:
f2fs_unlink(dir, dentry);
}
- kfree(sd);
-
f2fs_balance_fs(sbi, true);
- return err;
-out:
+ goto out_free_encrypted_link;
+
+out_handle_failed_inode:
handle_failed_inode(inode);
+out_free_encrypted_link:
+ if (disk_link.name != (unsigned char *)symname)
+ kfree(disk_link.name);
return err;
}
@@ -616,8 +658,8 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
alloc_nid_done(sbi, inode->i_ino);
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
@@ -668,8 +710,8 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
alloc_nid_done(sbi, inode->i_ino);
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
@@ -743,10 +785,12 @@ out:
static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
- if (unlikely(f2fs_cp_error(F2FS_I_SB(dir))))
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+
+ if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- if (f2fs_encrypted_inode(dir)) {
+ if (f2fs_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) {
int err = fscrypt_get_encryption_info(dir);
if (err)
return err;
@@ -926,7 +970,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_put_page(old_dir_page, 0);
f2fs_i_links_write(old_dir, false);
}
- add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
+ if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
+ add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
f2fs_unlock_op(sbi);
@@ -1076,8 +1121,10 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
}
f2fs_mark_inode_dirty_sync(new_dir, false);
- add_ino_entry(sbi, old_dir->i_ino, TRANS_DIR_INO);
- add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
+ if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) {
+ add_ino_entry(sbi, old_dir->i_ino, TRANS_DIR_INO);
+ add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
+ }
f2fs_unlock_op(sbi);
@@ -1127,65 +1174,21 @@ static int f2fs_rename2(struct inode *old_dir, struct dentry *old_dentry,
static const char *f2fs_encrypted_follow_link(struct dentry *dentry, void **cookie)
{
- struct page *cpage = NULL;
- char *caddr, *paddr = NULL;
- struct fscrypt_str cstr = FSTR_INIT(NULL, 0);
- struct fscrypt_str pstr = FSTR_INIT(NULL, 0);
- struct fscrypt_symlink_data *sd;
struct inode *inode = d_inode(dentry);
- u32 max_size = inode->i_sb->s_blocksize;
- int res;
-
- res = fscrypt_get_encryption_info(inode);
- if (res)
- return ERR_PTR(res);
-
- cpage = read_mapping_page(inode->i_mapping, 0, NULL);
- if (IS_ERR(cpage))
- return ERR_CAST(cpage);
- caddr = page_address(cpage);
-
- /* Symlink is encrypted */
- sd = (struct fscrypt_symlink_data *)caddr;
- cstr.name = sd->encrypted_path;
- cstr.len = le16_to_cpu(sd->len);
-
- /* this is broken symlink case */
- if (unlikely(cstr.len == 0)) {
- res = -ENOENT;
- goto errout;
- }
-
- if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) {
- /* Symlink data on the disk is corrupted */
- res = -EIO;
- goto errout;
- }
- res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
- if (res)
- goto errout;
-
- res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
- if (res)
- goto errout;
-
- /* this is broken symlink case */
- if (unlikely(pstr.name[0] == 0)) {
- res = -ENOENT;
- goto errout;
- }
+ struct page *page;
+ void *target;
- paddr = pstr.name;
+ if (!dentry)
+ return ERR_PTR(-ECHILD);
- /* Null-terminate the name */
- paddr[pstr.len] = '\0';
+ page = read_mapping_page(inode->i_mapping, 0, NULL);
+ if (IS_ERR(page))
+ return ERR_CAST(page);
- put_page(cpage);
- return *cookie = paddr;
-errout:
- fscrypt_fname_free_buffer(&pstr);
- put_page(cpage);
- return ERR_PTR(res);
+ target = fscrypt_get_symlink(inode, page_address(page),
+ inode->i_sb->s_blocksize);
+ put_page(page);
+ return *cookie = target;
}
const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index c294d0feea08..3871e7d3f69e 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -193,8 +193,8 @@ static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
__free_nat_entry(e);
}
-static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
- struct nat_entry *ne)
+static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
+ struct nat_entry *ne)
{
nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
struct nat_entry_set *head;
@@ -209,15 +209,36 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
head->entry_cnt = 0;
f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
}
+ return head;
+}
+
+static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
+ struct nat_entry *ne)
+{
+ struct nat_entry_set *head;
+ bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
+
+ if (!new_ne)
+ head = __grab_nat_entry_set(nm_i, ne);
+
+ /*
+ * update entry_cnt in below condition:
+ * 1. update NEW_ADDR to valid block address;
+ * 2. update old block address to new one;
+ */
+ if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
+ !get_nat_flag(ne, IS_DIRTY)))
+ head->entry_cnt++;
+
+ set_nat_flag(ne, IS_PREALLOC, new_ne);
if (get_nat_flag(ne, IS_DIRTY))
goto refresh_list;
nm_i->dirty_nat_cnt++;
- head->entry_cnt++;
set_nat_flag(ne, IS_DIRTY, true);
refresh_list:
- if (nat_get_blkaddr(ne) == NEW_ADDR)
+ if (new_ne)
list_del_init(&ne->list);
else
list_move_tail(&ne->list, &head->entry_list);
@@ -1076,7 +1097,7 @@ struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
f2fs_wait_on_page_writeback(page, NODE, true);
fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
- set_cold_node(dn->inode, page);
+ set_cold_node(page, S_ISDIR(dn->inode->i_mode));
if (!PageUptodate(page))
SetPageUptodate(page);
if (set_page_dirty(page))
@@ -1754,7 +1775,7 @@ static int f2fs_set_node_page_dirty(struct page *page)
if (!PageUptodate(page))
SetPageUptodate(page);
if (!PageDirty(page)) {
- f2fs_set_page_dirty_nobuffers(page);
+ __set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
SetPagePrivate(page);
f2fs_trace_pid(page);
@@ -2313,6 +2334,7 @@ retry:
if (!PageUptodate(ipage))
SetPageUptodate(ipage);
fill_node_footer(ipage, ino, ino, 0, true);
+ set_cold_node(page, false);
src = F2FS_INODE(page);
dst = F2FS_INODE(ipage);
@@ -2602,8 +2624,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
if (!enabled_nat_bits(sbi, NULL))
return 0;
- nm_i->nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
- F2FS_BLKSIZE - 1);
+ nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
nm_i->nat_bits = f2fs_kzalloc(sbi,
nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
if (!nm_i->nat_bits)
@@ -2729,12 +2750,20 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
static int init_free_nid_cache(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
+ int i;
- nm_i->free_nid_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks *
- NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL);
+ nm_i->free_nid_bitmap = f2fs_kzalloc(sbi, nm_i->nat_blocks *
+ sizeof(unsigned char *), GFP_KERNEL);
if (!nm_i->free_nid_bitmap)
return -ENOMEM;
+ for (i = 0; i < nm_i->nat_blocks; i++) {
+ nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
+ NAT_ENTRY_BITMAP_SIZE_ALIGNED, GFP_KERNEL);
+ if (!nm_i->free_nid_bitmap)
+ return -ENOMEM;
+ }
+
nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
GFP_KERNEL);
if (!nm_i->nat_block_bitmap)
@@ -2825,7 +2854,13 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
up_write(&nm_i->nat_tree_lock);
kvfree(nm_i->nat_block_bitmap);
- kvfree(nm_i->free_nid_bitmap);
+ if (nm_i->free_nid_bitmap) {
+ int i;
+
+ for (i = 0; i < nm_i->nat_blocks; i++)
+ kvfree(nm_i->free_nid_bitmap[i]);
+ kfree(nm_i->free_nid_bitmap);
+ }
kvfree(nm_i->free_nid_count);
kfree(nm_i->nat_bitmap);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 081ef0d672bf..b95e49e4a928 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -44,6 +44,7 @@ enum {
HAS_FSYNCED_INODE, /* is the inode fsynced before? */
HAS_LAST_FSYNC, /* has the latest node fsync mark? */
IS_DIRTY, /* this nat entry is dirty? */
+ IS_PREALLOC, /* nat entry is preallocated */
};
/*
@@ -422,12 +423,12 @@ static inline void clear_inline_node(struct page *page)
ClearPageChecked(page);
}
-static inline void set_cold_node(struct inode *inode, struct page *page)
+static inline void set_cold_node(struct page *page, bool is_dir)
{
struct f2fs_node *rn = F2FS_NODE(page);
unsigned int flag = le32_to_cpu(rn->footer.flag);
- if (S_ISDIR(inode->i_mode))
+ if (is_dir)
flag &= ~(0x1 << COLD_BIT_SHIFT);
else
flag |= (0x1 << COLD_BIT_SHIFT);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 210de28c9cd2..4ddc2262baf1 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -242,6 +242,9 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
struct curseg_info *curseg;
struct page *page = NULL;
block_t blkaddr;
+ unsigned int loop_cnt = 0;
+ unsigned int free_blocks = sbi->user_block_count -
+ valid_user_blocks(sbi);
int err = 0;
/* get node pages in the current segment */
@@ -294,6 +297,17 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
if (IS_INODE(page) && is_dent_dnode(page))
entry->last_dentry = blkaddr;
next:
+ /* sanity check in order to detect looped node chain */
+ if (++loop_cnt >= free_blocks ||
+ blkaddr == next_blkaddr_of_node(page)) {
+ f2fs_msg(sbi->sb, KERN_NOTICE,
+ "%s: detect looped node chain, "
+ "blkaddr:%u, next:%u",
+ __func__, blkaddr, next_blkaddr_of_node(page));
+ err = -EINVAL;
+ break;
+ }
+
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
f2fs_put_page(page, 1);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index bf98f6f34b7e..d7bac60ad719 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1491,12 +1491,11 @@ static int issue_discard_thread(void *data)
if (kthread_should_stop())
return 0;
- if (dcc->discard_wake) {
+ if (dcc->discard_wake)
dcc->discard_wake = 0;
- if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
- init_discard_policy(&dpolicy,
- DPOLICY_FORCE, 1);
- }
+
+ if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
+ init_discard_policy(&dpolicy, DPOLICY_FORCE, 1);
sb_start_intwrite(sbi->sb);
@@ -1565,7 +1564,7 @@ static int __issue_discard_async(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t blkstart, block_t blklen)
{
#ifdef CONFIG_BLK_DEV_ZONED
- if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
+ if (f2fs_sb_has_blkzoned(sbi->sb) &&
bdev_zoned_model(bdev) != BLK_ZONED_NONE)
return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
#endif
@@ -1763,7 +1762,7 @@ find_next:
sbi->blocks_per_seg, cur_pos);
len = next_pos - cur_pos;
- if (f2fs_sb_mounted_blkzoned(sbi->sb) ||
+ if (f2fs_sb_has_blkzoned(sbi->sb) ||
(force && len < cpc->trim_minlen))
goto skip;
@@ -1807,7 +1806,7 @@ void init_discard_policy(struct discard_policy *dpolicy,
} else if (discard_type == DPOLICY_FORCE) {
dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
- dpolicy->io_aware = true;
+ dpolicy->io_aware = false;
} else if (discard_type == DPOLICY_FSTRIM) {
dpolicy->io_aware = false;
} else if (discard_type == DPOLICY_UMOUNT) {
@@ -1943,7 +1942,7 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
sbi->discard_blks--;
/* don't overwrite by SSR to keep node chain */
- if (se->type == CURSEG_WARM_NODE) {
+ if (IS_NODESEG(se->type)) {
if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
se->ckpt_valid_blocks++;
}
@@ -2244,11 +2243,17 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
if (sbi->segs_per_sec != 1)
return CURSEG_I(sbi, type)->segno;
- if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
+ if (test_opt(sbi, NOHEAP) &&
+ (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
return 0;
if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
return SIT_I(sbi)->last_victim[ALLOC_NEXT];
+
+ /* find segments from 0 to reuse freed segments */
+ if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
+ return 0;
+
return CURSEG_I(sbi, type)->segno;
}
@@ -2535,6 +2540,101 @@ int rw_hint_to_seg_type(enum rw_hint hint)
}
}
+/* This returns write hints for each segment type. This hints will be
+ * passed down to block layer. There are mapping tables which depend on
+ * the mount option 'whint_mode'.
+ *
+ * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
+ *
+ * 2) whint_mode=user-based. F2FS tries to pass down hints given by users.
+ *
+ * User F2FS Block
+ * ---- ---- -----
+ * META WRITE_LIFE_NOT_SET
+ * HOT_NODE "
+ * WARM_NODE "
+ * COLD_NODE "
+ * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
+ * extension list " "
+ *
+ * -- buffered io
+ * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
+ * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
+ * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
+ * WRITE_LIFE_NONE " "
+ * WRITE_LIFE_MEDIUM " "
+ * WRITE_LIFE_LONG " "
+ *
+ * -- direct io
+ * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
+ * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
+ * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
+ * WRITE_LIFE_NONE " WRITE_LIFE_NONE
+ * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
+ * WRITE_LIFE_LONG " WRITE_LIFE_LONG
+ *
+ * 3) whint_mode=fs-based. F2FS passes down hints with its policy.
+ *
+ * User F2FS Block
+ * ---- ---- -----
+ * META WRITE_LIFE_MEDIUM;
+ * HOT_NODE WRITE_LIFE_NOT_SET
+ * WARM_NODE "
+ * COLD_NODE WRITE_LIFE_NONE
+ * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
+ * extension list " "
+ *
+ * -- buffered io
+ * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
+ * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
+ * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_LONG
+ * WRITE_LIFE_NONE " "
+ * WRITE_LIFE_MEDIUM " "
+ * WRITE_LIFE_LONG " "
+ *
+ * -- direct io
+ * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
+ * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
+ * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
+ * WRITE_LIFE_NONE " WRITE_LIFE_NONE
+ * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
+ * WRITE_LIFE_LONG " WRITE_LIFE_LONG
+ */
+
+enum rw_hint io_type_to_rw_hint(struct f2fs_sb_info *sbi,
+ enum page_type type, enum temp_type temp)
+{
+ if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
+ if (type == DATA) {
+ if (temp == WARM)
+ return WRITE_LIFE_NOT_SET;
+ else if (temp == HOT)
+ return WRITE_LIFE_SHORT;
+ else if (temp == COLD)
+ return WRITE_LIFE_EXTREME;
+ } else {
+ return WRITE_LIFE_NOT_SET;
+ }
+ } else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) {
+ if (type == DATA) {
+ if (temp == WARM)
+ return WRITE_LIFE_LONG;
+ else if (temp == HOT)
+ return WRITE_LIFE_SHORT;
+ else if (temp == COLD)
+ return WRITE_LIFE_EXTREME;
+ } else if (type == NODE) {
+ if (temp == WARM || temp == HOT)
+ return WRITE_LIFE_NOT_SET;
+ else if (temp == COLD)
+ return WRITE_LIFE_NONE;
+ } else if (type == META) {
+ return WRITE_LIFE_MEDIUM;
+ }
+ }
+ return WRITE_LIFE_NOT_SET;
+}
+
static int __get_segment_type_2(struct f2fs_io_info *fio)
{
if (fio->type == DATA)
@@ -2567,7 +2667,8 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
if (is_cold_data(fio->page) || file_is_cold(inode))
return CURSEG_COLD_DATA;
- if (is_inode_flag_set(inode, FI_HOT_DATA))
+ if (file_is_hot(inode) ||
+ is_inode_flag_set(inode, FI_HOT_DATA))
return CURSEG_HOT_DATA;
/* rw_hint_to_seg_type(inode->i_write_hint); */
return CURSEG_WARM_DATA;
@@ -2583,7 +2684,7 @@ static int __get_segment_type(struct f2fs_io_info *fio)
{
int type = 0;
- switch (fio->sbi->active_logs) {
+ switch (F2FS_OPTION(fio->sbi).active_logs) {
case 2:
type = __get_segment_type_2(fio);
break;
@@ -2723,6 +2824,7 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
+ .temp = HOT,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_NOIDLE | REQ_META | REQ_PRIO,
.old_blkaddr = page->index,
@@ -2769,8 +2871,15 @@ void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
int rewrite_data_page(struct f2fs_io_info *fio)
{
int err;
+ struct f2fs_sb_info *sbi = fio->sbi;
fio->new_blkaddr = fio->old_blkaddr;
+ /* i/o temperature is needed for passing down write hints */
+ __get_segment_type(fio);
+
+ f2fs_bug_on(sbi, !IS_DATASEG(get_seg_entry(sbi,
+ GET_SEGNO(sbi, fio->new_blkaddr))->type));
+
stat_inc_inplace_blocks(fio->sbi);
err = f2fs_submit_page_bio(fio);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 5d6d3e72be31..96a2d57ba8a4 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -53,13 +53,19 @@
((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
(sbi)->segs_per_sec)) \
-#define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr)
-#define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr)
+#define MAIN_BLKADDR(sbi) \
+ (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
+#define SEG0_BLKADDR(sbi) \
+ (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
#define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
#define MAIN_SECS(sbi) ((sbi)->total_sections)
-#define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count)
+#define TOTAL_SEGS(sbi) \
+ (SM_I(sbi) ? SM_I(sbi)->segment_count : \
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
#define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
@@ -596,6 +602,8 @@ static inline int utilization(struct f2fs_sb_info *sbi)
#define DEF_MIN_FSYNC_BLOCKS 8
#define DEF_MIN_HOT_BLOCKS 16
+#define SMALL_VOLUME_SEGMENTS (16 * 512) /* 16GB */
+
enum {
F2FS_IPU_FORCE,
F2FS_IPU_SSR,
@@ -630,10 +638,17 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
}
-static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
+static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
{
- BUG_ON(blk_addr < SEG0_BLKADDR(sbi)
- || blk_addr >= MAX_BLKADDR(sbi));
+ struct f2fs_sb_info *sbi = fio->sbi;
+
+ if (PAGE_TYPE_OF_BIO(fio->type) == META &&
+ (!is_read_io(fio->op) || fio->is_meta))
+ BUG_ON(blk_addr < SEG0_BLKADDR(sbi) ||
+ blk_addr >= MAIN_BLKADDR(sbi));
+ else
+ BUG_ON(blk_addr < MAIN_BLKADDR(sbi) ||
+ blk_addr >= MAX_BLKADDR(sbi));
}
/*
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 872f9c1078f0..55b2bad55671 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -60,7 +60,7 @@ char *fault_name[FAULT_MAX] = {
static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
unsigned int rate)
{
- struct f2fs_fault_info *ffi = &sbi->fault_info;
+ struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
if (rate) {
atomic_set(&ffi->inject_ops, 0);
@@ -129,6 +129,10 @@ enum {
Opt_jqfmt_vfsold,
Opt_jqfmt_vfsv0,
Opt_jqfmt_vfsv1,
+ Opt_whint,
+ Opt_alloc,
+ Opt_fsync,
+ Opt_test_dummy_encryption,
Opt_err,
};
@@ -182,6 +186,10 @@ static match_table_t f2fs_tokens = {
{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
+ {Opt_whint, "whint_mode=%s"},
+ {Opt_alloc, "alloc_mode=%s"},
+ {Opt_fsync, "fsync_mode=%s"},
+ {Opt_test_dummy_encryption, "test_dummy_encryption"},
{Opt_err, NULL},
};
@@ -202,21 +210,24 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
block_t limit = (sbi->user_block_count << 1) / 1000;
/* limit is 0.2% */
- if (test_opt(sbi, RESERVE_ROOT) && sbi->root_reserved_blocks > limit) {
- sbi->root_reserved_blocks = limit;
+ if (test_opt(sbi, RESERVE_ROOT) &&
+ F2FS_OPTION(sbi).root_reserved_blocks > limit) {
+ F2FS_OPTION(sbi).root_reserved_blocks = limit;
f2fs_msg(sbi->sb, KERN_INFO,
"Reduce reserved blocks for root = %u",
- sbi->root_reserved_blocks);
+ F2FS_OPTION(sbi).root_reserved_blocks);
}
if (!test_opt(sbi, RESERVE_ROOT) &&
- (!uid_eq(sbi->s_resuid,
+ (!uid_eq(F2FS_OPTION(sbi).s_resuid,
make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
- !gid_eq(sbi->s_resgid,
+ !gid_eq(F2FS_OPTION(sbi).s_resgid,
make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
f2fs_msg(sbi->sb, KERN_INFO,
"Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
- from_kuid_munged(&init_user_ns, sbi->s_resuid),
- from_kgid_munged(&init_user_ns, sbi->s_resgid));
+ from_kuid_munged(&init_user_ns,
+ F2FS_OPTION(sbi).s_resuid),
+ from_kgid_munged(&init_user_ns,
+ F2FS_OPTION(sbi).s_resgid));
}
static void init_once(void *foo)
@@ -236,7 +247,7 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype,
char *qname;
int ret = -EINVAL;
- if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) {
+ if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
f2fs_msg(sb, KERN_ERR,
"Cannot change journaled "
"quota options when quota turned on");
@@ -254,8 +265,8 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype,
"Not enough memory for storing quotafile name");
return -EINVAL;
}
- if (sbi->s_qf_names[qtype]) {
- if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
+ if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
+ if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
ret = 0;
else
f2fs_msg(sb, KERN_ERR,
@@ -268,7 +279,7 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype,
"quotafile must be on filesystem root");
goto errout;
}
- sbi->s_qf_names[qtype] = qname;
+ F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
set_opt(sbi, QUOTA);
return 0;
errout:
@@ -280,13 +291,13 @@ static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
- if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) {
+ if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options"
" when quota turned on");
return -EINVAL;
}
- kfree(sbi->s_qf_names[qtype]);
- sbi->s_qf_names[qtype] = NULL;
+ kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
+ F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
return 0;
}
@@ -302,15 +313,19 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
"Cannot enable project quota enforcement.");
return -1;
}
- if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA] ||
- sbi->s_qf_names[PRJQUOTA]) {
- if (test_opt(sbi, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
+ if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
+ F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
+ F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
+ if (test_opt(sbi, USRQUOTA) &&
+ F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
clear_opt(sbi, USRQUOTA);
- if (test_opt(sbi, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
+ if (test_opt(sbi, GRPQUOTA) &&
+ F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
clear_opt(sbi, GRPQUOTA);
- if (test_opt(sbi, PRJQUOTA) && sbi->s_qf_names[PRJQUOTA])
+ if (test_opt(sbi, PRJQUOTA) &&
+ F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
clear_opt(sbi, PRJQUOTA);
if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
@@ -320,19 +335,19 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
return -1;
}
- if (!sbi->s_jquota_fmt) {
+ if (!F2FS_OPTION(sbi).s_jquota_fmt) {
f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
"not specified");
return -1;
}
}
- if (f2fs_sb_has_quota_ino(sbi->sb) && sbi->s_jquota_fmt) {
+ if (f2fs_sb_has_quota_ino(sbi->sb) && F2FS_OPTION(sbi).s_jquota_fmt) {
f2fs_msg(sbi->sb, KERN_INFO,
"QUOTA feature is enabled, so ignore jquota_fmt");
- sbi->s_jquota_fmt = 0;
+ F2FS_OPTION(sbi).s_jquota_fmt = 0;
}
- if (f2fs_sb_has_quota_ino(sbi->sb) && sb_rdonly(sbi->sb)) {
+ if (f2fs_sb_has_quota_ino(sbi->sb) && f2fs_readonly(sbi->sb)) {
f2fs_msg(sbi->sb, KERN_INFO,
"Filesystem with quota feature cannot be mounted RDWR "
"without CONFIG_QUOTA");
@@ -403,14 +418,14 @@ static int parse_options(struct super_block *sb, char *options)
q = bdev_get_queue(sb->s_bdev);
if (blk_queue_discard(q)) {
set_opt(sbi, DISCARD);
- } else if (!f2fs_sb_mounted_blkzoned(sb)) {
+ } else if (!f2fs_sb_has_blkzoned(sb)) {
f2fs_msg(sb, KERN_WARNING,
"mounting with \"discard\" option, but "
"the device does not support discard");
}
break;
case Opt_nodiscard:
- if (f2fs_sb_mounted_blkzoned(sb)) {
+ if (f2fs_sb_has_blkzoned(sb)) {
f2fs_msg(sb, KERN_WARNING,
"discard is required for zoned block devices");
return -EINVAL;
@@ -440,7 +455,7 @@ static int parse_options(struct super_block *sb, char *options)
if (args->from && match_int(args, &arg))
return -EINVAL;
set_opt(sbi, INLINE_XATTR_SIZE);
- sbi->inline_xattr_size = arg;
+ F2FS_OPTION(sbi).inline_xattr_size = arg;
break;
#else
case Opt_user_xattr:
@@ -480,7 +495,7 @@ static int parse_options(struct super_block *sb, char *options)
return -EINVAL;
if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
return -EINVAL;
- sbi->active_logs = arg;
+ F2FS_OPTION(sbi).active_logs = arg;
break;
case Opt_disable_ext_identify:
set_opt(sbi, DISABLE_EXT_IDENTIFY);
@@ -524,9 +539,9 @@ static int parse_options(struct super_block *sb, char *options)
if (test_opt(sbi, RESERVE_ROOT)) {
f2fs_msg(sb, KERN_INFO,
"Preserve previous reserve_root=%u",
- sbi->root_reserved_blocks);
+ F2FS_OPTION(sbi).root_reserved_blocks);
} else {
- sbi->root_reserved_blocks = arg;
+ F2FS_OPTION(sbi).root_reserved_blocks = arg;
set_opt(sbi, RESERVE_ROOT);
}
break;
@@ -539,7 +554,7 @@ static int parse_options(struct super_block *sb, char *options)
"Invalid uid value %d", arg);
return -EINVAL;
}
- sbi->s_resuid = uid;
+ F2FS_OPTION(sbi).s_resuid = uid;
break;
case Opt_resgid:
if (args->from && match_int(args, &arg))
@@ -550,7 +565,7 @@ static int parse_options(struct super_block *sb, char *options)
"Invalid gid value %d", arg);
return -EINVAL;
}
- sbi->s_resgid = gid;
+ F2FS_OPTION(sbi).s_resgid = gid;
break;
case Opt_mode:
name = match_strdup(&args[0]);
@@ -559,7 +574,7 @@ static int parse_options(struct super_block *sb, char *options)
return -ENOMEM;
if (strlen(name) == 8 &&
!strncmp(name, "adaptive", 8)) {
- if (f2fs_sb_mounted_blkzoned(sb)) {
+ if (f2fs_sb_has_blkzoned(sb)) {
f2fs_msg(sb, KERN_WARNING,
"adaptive mode is not allowed with "
"zoned block device feature");
@@ -585,7 +600,7 @@ static int parse_options(struct super_block *sb, char *options)
1 << arg, BIO_MAX_PAGES);
return -EINVAL;
}
- sbi->write_io_size_bits = arg;
+ F2FS_OPTION(sbi).write_io_size_bits = arg;
break;
case Opt_fault_injection:
if (args->from && match_int(args, &arg))
@@ -646,13 +661,13 @@ static int parse_options(struct super_block *sb, char *options)
return ret;
break;
case Opt_jqfmt_vfsold:
- sbi->s_jquota_fmt = QFMT_VFS_OLD;
+ F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
break;
case Opt_jqfmt_vfsv0:
- sbi->s_jquota_fmt = QFMT_VFS_V0;
+ F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
break;
case Opt_jqfmt_vfsv1:
- sbi->s_jquota_fmt = QFMT_VFS_V1;
+ F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
break;
case Opt_noquota:
clear_opt(sbi, QUOTA);
@@ -679,6 +694,73 @@ static int parse_options(struct super_block *sb, char *options)
"quota operations not supported");
break;
#endif
+ case Opt_whint:
+ name = match_strdup(&args[0]);
+ if (!name)
+ return -ENOMEM;
+ if (strlen(name) == 10 &&
+ !strncmp(name, "user-based", 10)) {
+ F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
+ } else if (strlen(name) == 3 &&
+ !strncmp(name, "off", 3)) {
+ F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
+ } else if (strlen(name) == 8 &&
+ !strncmp(name, "fs-based", 8)) {
+ F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
+ case Opt_alloc:
+ name = match_strdup(&args[0]);
+ if (!name)
+ return -ENOMEM;
+
+ if (strlen(name) == 7 &&
+ !strncmp(name, "default", 7)) {
+ F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
+ } else if (strlen(name) == 5 &&
+ !strncmp(name, "reuse", 5)) {
+ F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
+ case Opt_fsync:
+ name = match_strdup(&args[0]);
+ if (!name)
+ return -ENOMEM;
+ if (strlen(name) == 5 &&
+ !strncmp(name, "posix", 5)) {
+ F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
+ } else if (strlen(name) == 6 &&
+ !strncmp(name, "strict", 6)) {
+ F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
+ case Opt_test_dummy_encryption:
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+ if (!f2fs_sb_has_encrypt(sb)) {
+ f2fs_msg(sb, KERN_ERR, "Encrypt feature is off");
+ return -EINVAL;
+ }
+
+ F2FS_OPTION(sbi).test_dummy_encryption = true;
+ f2fs_msg(sb, KERN_INFO,
+ "Test dummy encryption mode enabled");
+#else
+ f2fs_msg(sb, KERN_INFO,
+ "Test dummy encryption mount option ignored");
+#endif
+ break;
default:
f2fs_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" or missing value",
@@ -699,14 +781,22 @@ static int parse_options(struct super_block *sb, char *options)
}
if (test_opt(sbi, INLINE_XATTR_SIZE)) {
+ if (!f2fs_sb_has_extra_attr(sb) ||
+ !f2fs_sb_has_flexible_inline_xattr(sb)) {
+ f2fs_msg(sb, KERN_ERR,
+ "extra_attr or flexible_inline_xattr "
+ "feature is off");
+ return -EINVAL;
+ }
if (!test_opt(sbi, INLINE_XATTR)) {
f2fs_msg(sb, KERN_ERR,
"inline_xattr_size option should be "
"set with inline_xattr option");
return -EINVAL;
}
- if (!sbi->inline_xattr_size ||
- sbi->inline_xattr_size >= DEF_ADDRS_PER_INODE -
+ if (!F2FS_OPTION(sbi).inline_xattr_size ||
+ F2FS_OPTION(sbi).inline_xattr_size >=
+ DEF_ADDRS_PER_INODE -
F2FS_TOTAL_EXTRA_ATTR_SIZE -
DEF_INLINE_RESERVED_SIZE -
DEF_MIN_INLINE_SIZE) {
@@ -715,6 +805,12 @@ static int parse_options(struct super_block *sb, char *options)
return -EINVAL;
}
}
+
+ /* Not pass down write hints if the number of active logs is lesser
+ * than NR_CURSEG_TYPE.
+ */
+ if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
+ F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
return 0;
}
@@ -731,7 +827,6 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
/* Initialize f2fs-specific inode info */
atomic_set(&fi->dirty_pages, 0);
fi->i_current_depth = 1;
- fi->i_advise = 0;
init_rwsem(&fi->i_sem);
INIT_LIST_HEAD(&fi->dirty_list);
INIT_LIST_HEAD(&fi->gdirty_list);
@@ -743,10 +838,6 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
init_rwsem(&fi->i_mmap_sem);
init_rwsem(&fi->i_xattr_sem);
-#ifdef CONFIG_QUOTA
- memset(&fi->i_dquot, 0, sizeof(fi->i_dquot));
- fi->i_reserved_quota = 0;
-#endif
/* Will be used by directory only */
fi->i_dir_level = F2FS_SB(sb)->dir_level;
@@ -957,7 +1048,7 @@ static void f2fs_put_super(struct super_block *sb)
mempool_destroy(sbi->write_io_dummy);
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
- kfree(sbi->s_qf_names[i]);
+ kfree(F2FS_OPTION(sbi).s_qf_names[i]);
#endif
destroy_percpu_info(sbi);
for (i = 0; i < NR_PAGE_TYPE; i++)
@@ -1071,8 +1162,9 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_blocks = total_count - start_count;
buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
sbi->current_reserved_blocks;
- if (buf->f_bfree > sbi->root_reserved_blocks)
- buf->f_bavail = buf->f_bfree - sbi->root_reserved_blocks;
+ if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
+ buf->f_bavail = buf->f_bfree -
+ F2FS_OPTION(sbi).root_reserved_blocks;
else
buf->f_bavail = 0;
@@ -1107,10 +1199,10 @@ static inline void f2fs_show_quota_options(struct seq_file *seq,
#ifdef CONFIG_QUOTA
struct f2fs_sb_info *sbi = F2FS_SB(sb);
- if (sbi->s_jquota_fmt) {
+ if (F2FS_OPTION(sbi).s_jquota_fmt) {
char *fmtname = "";
- switch (sbi->s_jquota_fmt) {
+ switch (F2FS_OPTION(sbi).s_jquota_fmt) {
case QFMT_VFS_OLD:
fmtname = "vfsold";
break;
@@ -1124,14 +1216,17 @@ static inline void f2fs_show_quota_options(struct seq_file *seq,
seq_printf(seq, ",jqfmt=%s", fmtname);
}
- if (sbi->s_qf_names[USRQUOTA])
- seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
+ if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
+ seq_show_option(seq, "usrjquota",
+ F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
- if (sbi->s_qf_names[GRPQUOTA])
- seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
+ if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
+ seq_show_option(seq, "grpjquota",
+ F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
- if (sbi->s_qf_names[PRJQUOTA])
- seq_show_option(seq, "prjjquota", sbi->s_qf_names[PRJQUOTA]);
+ if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
+ seq_show_option(seq, "prjjquota",
+ F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
#endif
}
@@ -1166,7 +1261,7 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",noinline_xattr");
if (test_opt(sbi, INLINE_XATTR_SIZE))
seq_printf(seq, ",inline_xattr_size=%u",
- sbi->inline_xattr_size);
+ F2FS_OPTION(sbi).inline_xattr_size);
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
if (test_opt(sbi, POSIX_ACL))
@@ -1202,18 +1297,20 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, "adaptive");
else if (test_opt(sbi, LFS))
seq_puts(seq, "lfs");
- seq_printf(seq, ",active_logs=%u", sbi->active_logs);
+ seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
if (test_opt(sbi, RESERVE_ROOT))
seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
- sbi->root_reserved_blocks,
- from_kuid_munged(&init_user_ns, sbi->s_resuid),
- from_kgid_munged(&init_user_ns, sbi->s_resgid));
+ F2FS_OPTION(sbi).root_reserved_blocks,
+ from_kuid_munged(&init_user_ns,
+ F2FS_OPTION(sbi).s_resuid),
+ from_kgid_munged(&init_user_ns,
+ F2FS_OPTION(sbi).s_resgid));
if (F2FS_IO_SIZE_BITS(sbi))
seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (test_opt(sbi, FAULT_INJECTION))
seq_printf(seq, ",fault_injection=%u",
- sbi->fault_info.inject_rate);
+ F2FS_OPTION(sbi).fault_info.inject_rate);
#endif
#ifdef CONFIG_QUOTA
if (test_opt(sbi, QUOTA))
@@ -1226,15 +1323,37 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",prjquota");
#endif
f2fs_show_quota_options(seq, sbi->sb);
+ if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
+ seq_printf(seq, ",whint_mode=%s", "user-based");
+ else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
+ seq_printf(seq, ",whint_mode=%s", "fs-based");
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+ if (F2FS_OPTION(sbi).test_dummy_encryption)
+ seq_puts(seq, ",test_dummy_encryption");
+#endif
+
+ if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
+ seq_printf(seq, ",alloc_mode=%s", "default");
+ else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
+ seq_printf(seq, ",alloc_mode=%s", "reuse");
+ if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
+ seq_printf(seq, ",fsync_mode=%s", "posix");
+ else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
+ seq_printf(seq, ",fsync_mode=%s", "strict");
return 0;
}
static void default_options(struct f2fs_sb_info *sbi)
{
/* init some FS parameters */
- sbi->active_logs = NR_CURSEG_TYPE;
- sbi->inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
+ F2FS_OPTION(sbi).active_logs = NR_CURSEG_TYPE;
+ F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
+ F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
+ F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
+ F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
+ F2FS_OPTION(sbi).test_dummy_encryption = false;
+ sbi->readdir_ra = 1;
set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_XATTR);
@@ -1244,7 +1363,7 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, NOHEAP);
sbi->sb->s_flags |= MS_LAZYTIME;
set_opt(sbi, FLUSH_MERGE);
- if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
+ if (f2fs_sb_has_blkzoned(sbi->sb)) {
set_opt_mode(sbi, F2FS_MOUNT_LFS);
set_opt(sbi, DISCARD);
} else {
@@ -1271,16 +1390,11 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct f2fs_mount_info org_mount_opt;
unsigned long old_sb_flags;
- int err, active_logs;
+ int err;
bool need_restart_gc = false;
bool need_stop_gc = false;
bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- struct f2fs_fault_info ffi = sbi->fault_info;
-#endif
#ifdef CONFIG_QUOTA
- int s_jquota_fmt;
- char *s_qf_names[MAXQUOTAS];
int i, j;
#endif
@@ -1290,21 +1404,21 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
*/
org_mount_opt = sbi->mount_opt;
old_sb_flags = sb->s_flags;
- active_logs = sbi->active_logs;
#ifdef CONFIG_QUOTA
- s_jquota_fmt = sbi->s_jquota_fmt;
+ org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
for (i = 0; i < MAXQUOTAS; i++) {
- if (sbi->s_qf_names[i]) {
- s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
- GFP_KERNEL);
- if (!s_qf_names[i]) {
+ if (F2FS_OPTION(sbi).s_qf_names[i]) {
+ org_mount_opt.s_qf_names[i] =
+ kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
+ GFP_KERNEL);
+ if (!org_mount_opt.s_qf_names[i]) {
for (j = 0; j < i; j++)
- kfree(s_qf_names[j]);
+ kfree(org_mount_opt.s_qf_names[j]);
return -ENOMEM;
}
} else {
- s_qf_names[i] = NULL;
+ org_mount_opt.s_qf_names[i] = NULL;
}
}
#endif
@@ -1374,7 +1488,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
need_stop_gc = true;
}
- if (*flags & MS_RDONLY) {
+ if (*flags & MS_RDONLY ||
+ F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
writeback_inodes_sb(sb, WB_REASON_SYNC);
sync_inodes_sb(sb);
@@ -1400,7 +1515,7 @@ skip:
#ifdef CONFIG_QUOTA
/* Release old quota file names */
for (i = 0; i < MAXQUOTAS; i++)
- kfree(s_qf_names[i]);
+ kfree(org_mount_opt.s_qf_names[i]);
#endif
/* Update the POSIXACL Flag */
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
@@ -1418,18 +1533,14 @@ restore_gc:
}
restore_opts:
#ifdef CONFIG_QUOTA
- sbi->s_jquota_fmt = s_jquota_fmt;
+ F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
for (i = 0; i < MAXQUOTAS; i++) {
- kfree(sbi->s_qf_names[i]);
- sbi->s_qf_names[i] = s_qf_names[i];
+ kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+ F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
}
#endif
sbi->mount_opt = org_mount_opt;
- sbi->active_logs = active_logs;
sb->s_flags = old_sb_flags;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- sbi->fault_info = ffi;
-#endif
return err;
}
@@ -1551,8 +1662,8 @@ static qsize_t *f2fs_get_reserved_space(struct inode *inode)
static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
{
- return dquot_quota_on_mount(sbi->sb, sbi->s_qf_names[type],
- sbi->s_jquota_fmt, type);
+ return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
+ F2FS_OPTION(sbi).s_jquota_fmt, type);
}
int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
@@ -1571,7 +1682,7 @@ int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
}
for (i = 0; i < MAXQUOTAS; i++) {
- if (sbi->s_qf_names[i]) {
+ if (F2FS_OPTION(sbi).s_qf_names[i]) {
err = f2fs_quota_on_mount(sbi, i);
if (!err) {
enabled = 1;
@@ -1801,11 +1912,28 @@ static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
void *fs_data)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ /*
+ * Encrypting the root directory is not allowed because fsck
+ * expects lost+found directory to exist and remain unencrypted
+ * if LOST_FOUND feature is enabled.
+ *
+ */
+ if (f2fs_sb_has_lost_found(sbi->sb) &&
+ inode->i_ino == F2FS_ROOT_INO(sbi))
+ return -EPERM;
+
return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
ctx, len, fs_data, XATTR_CREATE);
}
+static bool f2fs_dummy_context(struct inode *inode)
+{
+ return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
+}
+
static unsigned f2fs_max_namelen(struct inode *inode)
{
return S_ISLNK(inode->i_mode) ?
@@ -1816,6 +1944,7 @@ static const struct fscrypt_operations f2fs_cryptops = {
.key_prefix = "f2fs:",
.get_context = f2fs_get_context,
.set_context = f2fs_set_context,
+ .dummy_context = f2fs_dummy_context,
.empty_dir = f2fs_empty_dir,
.max_namelen = f2fs_max_namelen,
};
@@ -1898,7 +2027,6 @@ static int __f2fs_commit_super(struct buffer_head *bh,
lock_buffer(bh);
if (super)
memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
- set_buffer_uptodate(bh);
set_buffer_dirty(bh);
unlock_buffer(bh);
@@ -2185,6 +2313,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->dirty_device = 0;
spin_lock_init(&sbi->dev_lock);
+
+ init_rwsem(&sbi->sb_lock);
}
static int init_percpu_info(struct f2fs_sb_info *sbi)
@@ -2210,7 +2340,7 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
unsigned int n = 0;
int err = -EIO;
- if (!f2fs_sb_mounted_blkzoned(sbi->sb))
+ if (!f2fs_sb_has_blkzoned(sbi->sb))
return 0;
if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
@@ -2338,7 +2468,7 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
}
/* write back-up superblock first */
- bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
+ bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
if (!bh)
return -EIO;
err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
@@ -2349,7 +2479,7 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
return err;
/* write current valid superblock */
- bh = sb_getblk(sbi->sb, sbi->valid_super_block);
+ bh = sb_bread(sbi->sb, sbi->valid_super_block);
if (!bh)
return -EIO;
err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
@@ -2421,7 +2551,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
#ifdef CONFIG_BLK_DEV_ZONED
if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
- !f2fs_sb_mounted_blkzoned(sbi->sb)) {
+ !f2fs_sb_has_blkzoned(sbi->sb)) {
f2fs_msg(sbi->sb, KERN_ERR,
"Zoned block device feature not enabled\n");
return -EINVAL;
@@ -2455,6 +2585,18 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
return 0;
}
+static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_sm_info *sm_i = SM_I(sbi);
+
+ /* adjust parameters according to the volume size */
+ if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
+ F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
+ sm_i->dcc_info->discard_granularity = 1;
+ sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
+ }
+}
+
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
struct f2fs_sb_info *sbi;
@@ -2502,8 +2644,8 @@ try_onemore:
sb->s_fs_info = sbi;
sbi->raw_super = raw_super;
- sbi->s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
- sbi->s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
+ F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
+ F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
/* precompute checksum seed for metadata */
if (f2fs_sb_has_inode_chksum(sb))
@@ -2516,7 +2658,7 @@ try_onemore:
* devices, but mandatory for host-managed zoned block devices.
*/
#ifndef CONFIG_BLK_DEV_ZONED
- if (f2fs_sb_mounted_blkzoned(sb)) {
+ if (f2fs_sb_has_blkzoned(sb)) {
f2fs_msg(sb, KERN_ERR,
"Zoned block device support is not enabled\n");
err = -EOPNOTSUPP;
@@ -2732,7 +2874,7 @@ try_onemore:
* Turn on quotas which were not enabled for read-only mounts if
* filesystem has quota feature, so that they are updated correctly.
*/
- if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb)) {
+ if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
err = f2fs_enable_quotas(sb);
if (err) {
f2fs_msg(sb, KERN_ERR,
@@ -2807,6 +2949,8 @@ skip_recovery:
f2fs_join_shrinker(sbi);
+ f2fs_tuning_parameters(sbi);
+
f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
cur_cp_version(F2FS_CKPT(sbi)));
f2fs_update_time(sbi, CP_TIME);
@@ -2815,7 +2959,7 @@ skip_recovery:
free_meta:
#ifdef CONFIG_QUOTA
- if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb))
+ if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb))
f2fs_quota_off_umount(sbi->sb);
#endif
f2fs_sync_inode_meta(sbi);
@@ -2859,7 +3003,7 @@ free_bio_info:
free_options:
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
- kfree(sbi->s_qf_names[i]);
+ kfree(F2FS_OPTION(sbi).s_qf_names[i]);
#endif
kfree(options);
free_sb_buf:
@@ -2956,8 +3100,13 @@ static int __init init_f2fs_fs(void)
err = f2fs_create_root_stats();
if (err)
goto free_filesystem;
+ err = f2fs_init_post_read_processing();
+ if (err)
+ goto free_root_stats;
return 0;
+free_root_stats:
+ f2fs_destroy_root_stats();
free_filesystem:
unregister_filesystem(&f2fs_fs_type);
free_shrinker:
@@ -2980,6 +3129,7 @@ fail:
static void __exit exit_f2fs_fs(void)
{
+ f2fs_destroy_post_read_processing();
f2fs_destroy_root_stats();
unregister_filesystem(&f2fs_fs_type);
unregister_shrinker(&f2fs_shrinker_info);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index d978c7b6ea04..f33a56d6e6dd 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -58,7 +58,7 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
#ifdef CONFIG_F2FS_FAULT_INJECTION
else if (struct_type == FAULT_INFO_RATE ||
struct_type == FAULT_INFO_TYPE)
- return (unsigned char *)&sbi->fault_info;
+ return (unsigned char *)&F2FS_OPTION(sbi).fault_info;
#endif
return NULL;
}
@@ -92,10 +92,10 @@ static ssize_t features_show(struct f2fs_attr *a,
if (!sb->s_bdev->bd_part)
return snprintf(buf, PAGE_SIZE, "0\n");
- if (f2fs_sb_has_crypto(sb))
+ if (f2fs_sb_has_encrypt(sb))
len += snprintf(buf, PAGE_SIZE - len, "%s",
"encryption");
- if (f2fs_sb_mounted_blkzoned(sb))
+ if (f2fs_sb_has_blkzoned(sb))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "blkzoned");
if (f2fs_sb_has_extra_attr(sb))
@@ -116,6 +116,9 @@ static ssize_t features_show(struct f2fs_attr *a,
if (f2fs_sb_has_inode_crtime(sb))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "inode_crtime");
+ if (f2fs_sb_has_lost_found(sb))
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "lost_found");
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
@@ -136,6 +139,27 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
if (!ptr)
return -EINVAL;
+ if (!strcmp(a->attr.name, "extension_list")) {
+ __u8 (*extlist)[F2FS_EXTENSION_LEN] =
+ sbi->raw_super->extension_list;
+ int cold_count = le32_to_cpu(sbi->raw_super->extension_count);
+ int hot_count = sbi->raw_super->hot_ext_count;
+ int len = 0, i;
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "cold file extenstion:\n");
+ for (i = 0; i < cold_count; i++)
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s\n",
+ extlist[i]);
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "hot file extenstion:\n");
+ for (i = cold_count; i < cold_count + hot_count; i++)
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s\n",
+ extlist[i]);
+ return len;
+ }
+
ui = (unsigned int *)(ptr + a->offset);
return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
@@ -154,6 +178,41 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
if (!ptr)
return -EINVAL;
+ if (!strcmp(a->attr.name, "extension_list")) {
+ const char *name = strim((char *)buf);
+ bool set = true, hot;
+
+ if (!strncmp(name, "[h]", 3))
+ hot = true;
+ else if (!strncmp(name, "[c]", 3))
+ hot = false;
+ else
+ return -EINVAL;
+
+ name += 3;
+
+ if (*name == '!') {
+ name++;
+ set = false;
+ }
+
+ if (strlen(name) >= F2FS_EXTENSION_LEN)
+ return -EINVAL;
+
+ down_write(&sbi->sb_lock);
+
+ ret = update_extension_list(sbi, name, hot, set);
+ if (ret)
+ goto out;
+
+ ret = f2fs_commit_super(sbi, false);
+ if (ret)
+ update_extension_list(sbi, name, hot, !set);
+out:
+ up_write(&sbi->sb_lock);
+ return ret ? ret : count;
+ }
+
ui = (unsigned int *)(ptr + a->offset);
ret = kstrtoul(skip_spaces(buf), 0, &t);
@@ -166,7 +225,7 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
if (a->struct_type == RESERVED_BLOCKS) {
spin_lock(&sbi->stat_lock);
if (t > (unsigned long)(sbi->user_block_count -
- sbi->root_reserved_blocks)) {
+ F2FS_OPTION(sbi).root_reserved_blocks)) {
spin_unlock(&sbi->stat_lock);
return -EINVAL;
}
@@ -236,6 +295,7 @@ enum feat_id {
FEAT_FLEXIBLE_INLINE_XATTR,
FEAT_QUOTA_INO,
FEAT_INODE_CRTIME,
+ FEAT_LOST_FOUND,
};
static ssize_t f2fs_feature_show(struct f2fs_attr *a,
@@ -251,6 +311,7 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a,
case FEAT_FLEXIBLE_INLINE_XATTR:
case FEAT_QUOTA_INO:
case FEAT_INODE_CRTIME:
+ case FEAT_LOST_FOUND:
return snprintf(buf, PAGE_SIZE, "supported\n");
}
return 0;
@@ -307,6 +368,7 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_super_block, extension_list, extension_list);
#ifdef CONFIG_F2FS_FAULT_INJECTION
F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
@@ -329,6 +391,7 @@ F2FS_FEATURE_RO_ATTR(inode_checksum, FEAT_INODE_CHECKSUM);
F2FS_FEATURE_RO_ATTR(flexible_inline_xattr, FEAT_FLEXIBLE_INLINE_XATTR);
F2FS_FEATURE_RO_ATTR(quota_ino, FEAT_QUOTA_INO);
F2FS_FEATURE_RO_ATTR(inode_crtime, FEAT_INODE_CRTIME);
+F2FS_FEATURE_RO_ATTR(lost_found, FEAT_LOST_FOUND);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -357,6 +420,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(iostat_enable),
ATTR_LIST(readdir_ra),
ATTR_LIST(gc_pin_file_thresh),
+ ATTR_LIST(extension_list),
#ifdef CONFIG_F2FS_FAULT_INJECTION
ATTR_LIST(inject_rate),
ATTR_LIST(inject_type),
@@ -383,6 +447,7 @@ static struct attribute *f2fs_feat_attrs[] = {
ATTR_LIST(flexible_inline_xattr),
ATTR_LIST(quota_ino),
ATTR_LIST(inode_crtime),
+ ATTR_LIST(lost_found),
NULL,
};
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 48fe91e86c2a..da43c4a22e1b 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -747,11 +747,12 @@ int inode_congested(struct inode *inode, int cong_bits)
*/
if (inode && inode_to_wb_is_valid(inode)) {
struct bdi_writeback *wb;
- bool locked, congested;
+ struct wb_lock_cookie lock_cookie = {};
+ bool congested;
- wb = unlocked_inode_to_wb_begin(inode, &locked);
+ wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
congested = wb_congested(wb, cong_bits);
- unlocked_inode_to_wb_end(inode, locked);
+ unlocked_inode_to_wb_end(inode, &lock_cookie);
return congested;
}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index c6a499b7547e..9398d1b70545 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -914,7 +914,7 @@ out:
}
/*
- * This is a variaon of __jbd2_update_log_tail which checks for validity of
+ * This is a variation of __jbd2_update_log_tail which checks for validity of
* provided log tail and locks j_checkpoint_mutex. So it is safe against races
* with other threads updating log tail.
*/
@@ -1384,6 +1384,9 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
journal_superblock_t *sb = journal->j_superblock;
int ret;
+ if (is_journal_aborted(journal))
+ return -EIO;
+
BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
tail_block, tail_tid);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index a2e724053919..f3a31f55f372 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -527,6 +527,7 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
*/
ret = start_this_handle(journal, handle, GFP_NOFS);
if (ret < 0) {
+ handle->h_journal = journal;
jbd2_journal_free_reserved(handle);
return ret;
}
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index d86c5e3176a1..600da1a4df29 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -345,7 +345,7 @@ static void jffs2_put_super (struct super_block *sb)
static void jffs2_kill_sb(struct super_block *sb)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
- if (!(sb->s_flags & MS_RDONLY))
+ if (c && !(sb->s_flags & MS_RDONLY))
jffs2_stop_garbage_collect_thread(c);
kill_mtd_super(sb);
kfree(c);
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 5f31ebd96c06..a2edb0049eb5 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -129,6 +129,8 @@ lockd(void *vrqstp)
{
int err = 0;
struct svc_rqst *rqstp = vrqstp;
+ struct net *net = &init_net;
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
/* try_to_freeze() is called from svc_recv() */
set_freezable();
@@ -173,6 +175,8 @@ lockd(void *vrqstp)
if (nlmsvc_ops)
nlmsvc_invalidate_all();
nlm_shutdown_hosts();
+ cancel_delayed_work_sync(&ln->grace_period_end);
+ locks_end_grace(&ln->lockd_manager);
return 0;
}
@@ -267,8 +271,6 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
if (ln->nlmsvc_users) {
if (--ln->nlmsvc_users == 0) {
nlm_shutdown_hosts_net(net);
- cancel_delayed_work_sync(&ln->grace_period_end);
- locks_end_grace(&ln->lockd_manager);
svc_shutdown_net(serv, net);
dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
}
diff --git a/fs/namei.c b/fs/namei.c
index e82d2955de00..ea6050b6134a 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -219,9 +219,10 @@ getname_kernel(const char * filename)
if (len <= EMBEDDED_NAME_MAX) {
result->name = (char *)result->iname;
} else if (len <= PATH_MAX) {
+ const size_t size = offsetof(struct filename, iname[1]);
struct filename *tmp;
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+ tmp = kmalloc(size, GFP_KERNEL);
if (unlikely(!tmp)) {
__putname(result);
return ERR_PTR(-ENOMEM);
diff --git a/fs/namespace.c b/fs/namespace.c
index adbe44dda88f..b3d8d3d8f05f 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1036,7 +1036,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
goto out_free;
}
- mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
+ mnt->mnt.mnt_flags = old->mnt.mnt_flags;
+ mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
/* Don't allow unprivileged users to change mount flags */
if (flag & CL_UNPRIVILEGED) {
mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 54313322ee5b..c8e90152b61b 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -461,6 +461,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
goto out_err_free;
/* fh */
+ rc = -EIO;
p = xdr_inline_decode(&stream, 4);
if (!p)
goto out_err_free;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 8ef6f70c9e25..0f397e62de5a 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3025,6 +3025,7 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
.rpc_resp = &res,
};
int status;
+ int i;
bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
FATTR4_WORD0_FH_EXPIRE_TYPE |
@@ -3090,8 +3091,13 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
server->cache_consistency_bitmask[2] = 0;
+
+ /* Avoid a regression due to buggy server */
+ for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
+ res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
sizeof(server->exclcreat_bitmask));
+
server->acl_bitmask = res.acl_bitmask;
server->fh_expire_type = res.fh_expire_type;
}
@@ -7670,6 +7676,12 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
/* fall through */
case -NFS4ERR_RETRY_UNCACHED_REP:
return -EAGAIN;
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ nfs4_schedule_session_recovery(clp->cl_session,
+ task->tk_status);
+ break;
default:
nfs4_schedule_lease_recovery(clp);
}
@@ -7748,7 +7760,6 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
if (status == 0)
status = task->tk_status;
rpc_put_task(task);
- return 0;
out:
dprintk("<-- %s status=%d\n", __func__, status);
return status;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 9a0b219ff74d..83fba40396ae 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1593,13 +1593,14 @@ static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
}
-static void nfs4_reclaim_complete(struct nfs_client *clp,
+static int nfs4_reclaim_complete(struct nfs_client *clp,
const struct nfs4_state_recovery_ops *ops,
struct rpc_cred *cred)
{
/* Notify the server we're done reclaiming our state */
if (ops->reclaim_complete)
- (void)ops->reclaim_complete(clp, cred);
+ return ops->reclaim_complete(clp, cred);
+ return 0;
}
static void nfs4_clear_reclaim_server(struct nfs_server *server)
@@ -1646,13 +1647,16 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
{
const struct nfs4_state_recovery_ops *ops;
struct rpc_cred *cred;
+ int err;
if (!nfs4_state_clear_reclaim_reboot(clp))
return;
ops = clp->cl_mvops->reboot_recovery_ops;
cred = nfs4_get_clid_cred(clp);
- nfs4_reclaim_complete(clp, ops, cred);
+ err = nfs4_reclaim_complete(clp, ops, cred);
put_rpccred(cred);
+ if (err == -NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
+ set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
}
static void nfs_delegation_clear_all(struct nfs_client *clp)
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index e0e5f7c3c99f..8a459b179183 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -92,7 +92,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
u32 event_mask,
void *data, int data_type)
{
- __u32 marks_mask, marks_ignored_mask;
+ __u32 marks_mask = 0, marks_ignored_mask = 0;
struct path *path = data;
pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
@@ -108,24 +108,20 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
!d_can_lookup(path->dentry))
return false;
- if (inode_mark && vfsmnt_mark) {
- marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
- marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
- } else if (inode_mark) {
- /*
- * if the event is for a child and this inode doesn't care about
- * events on the child, don't send it!
- */
- if ((event_mask & FS_EVENT_ON_CHILD) &&
- !(inode_mark->mask & FS_EVENT_ON_CHILD))
- return false;
- marks_mask = inode_mark->mask;
- marks_ignored_mask = inode_mark->ignored_mask;
- } else if (vfsmnt_mark) {
- marks_mask = vfsmnt_mark->mask;
- marks_ignored_mask = vfsmnt_mark->ignored_mask;
- } else {
- BUG();
+ /*
+ * if the event is for a child and this inode doesn't care about
+ * events on the child, don't send it!
+ */
+ if (inode_mark &&
+ (!(event_mask & FS_EVENT_ON_CHILD) ||
+ (inode_mark->mask & FS_EVENT_ON_CHILD))) {
+ marks_mask |= inode_mark->mask;
+ marks_ignored_mask |= inode_mark->ignored_mask;
+ }
+
+ if (vfsmnt_mark) {
+ marks_mask |= vfsmnt_mark->mask;
+ marks_ignored_mask |= vfsmnt_mark->ignored_mask;
}
if (d_is_dir(path->dentry) &&
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 220b04f04523..985a4cdae06d 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -272,6 +272,16 @@ ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
return vfs_getxattr(realpath.dentry, name, value, size);
}
+static bool ovl_can_list(const char *s)
+{
+ /* List all non-trusted xatts */
+ if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
+ return true;
+
+ /* Never list trusted.overlay, list other trusted for superuser only */
+ return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
+}
+
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
{
struct path realpath;
@@ -296,7 +306,7 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
return -EIO;
len -= slen;
- if (ovl_is_private_xattr(s)) {
+ if (!ovl_can_list(s)) {
res -= slen;
memmove(s, s + slen, len);
} else {
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 62facaf3971f..072dac45b102 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3090,8 +3090,8 @@ static const struct pid_entry tgid_base_stuff[] = {
ONE("cgroup", S_IRUGO, proc_cgroup_show),
#endif
ONE("oom_score", S_IRUGO, proc_oom_score),
- REG("oom_adj", S_IRUSR, proc_oom_adj_operations),
- REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations),
+ REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations),
+ REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
REG("sessionid", S_IRUGO, proc_sessionid_operations),
@@ -3484,8 +3484,8 @@ static const struct pid_entry tid_base_stuff[] = {
ONE("cgroup", S_IRUGO, proc_cgroup_show),
#endif
ONE("oom_score", S_IRUGO, proc_oom_score),
- REG("oom_adj", S_IRUSR, proc_oom_adj_operations),
- REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations),
+ REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations),
+ REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
REG("sessionid", S_IRUGO, proc_sessionid_operations),
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 9155a5a0d3b9..b7594b9fa5fa 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -79,6 +79,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
available += global_page_state(NR_SLAB_RECLAIMABLE) -
min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
+ /*
+ * Part of the kernel memory, which can be released under memory
+ * pressure.
+ */
+ available += global_page_state(NR_INDIRECTLY_RECLAIMABLE_BYTES) >>
+ PAGE_SHIFT;
+
if (available < 0)
available = 0;
diff --git a/fs/proc/uid.c b/fs/proc/uid.c
index 616d99b157c3..040591d341f8 100644
--- a/fs/proc/uid.c
+++ b/fs/proc/uid.c
@@ -286,6 +286,8 @@ static const struct inode_operations proc_uid_inode_operations = {
int __init proc_uid_init(void)
{
proc_uid = proc_mkdir("uid", NULL);
+ if (!proc_uid)
+ return -ENOMEM;
proc_uid->proc_iops = &proc_uid_inode_operations;
proc_uid->proc_fops = &proc_uid_operations;
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index a72097b625ef..00985f9db9f7 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2643,7 +2643,7 @@ static int journal_init_dev(struct super_block *super,
if (IS_ERR(journal->j_dev_bd)) {
result = PTR_ERR(journal->j_dev_bd);
journal->j_dev_bd = NULL;
- reiserfs_warning(super,
+ reiserfs_warning(super, "sh-457",
"journal_init_dev: Cannot open '%s': %i",
jdev_name, result);
return result;
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
index 13da7e5245bd..642627161cad 100644
--- a/fs/sdcardfs/dentry.c
+++ b/fs/sdcardfs/dentry.c
@@ -131,6 +131,8 @@ out:
static void sdcardfs_d_release(struct dentry *dentry)
{
+ if (!dentry || !dentry->d_fsdata)
+ return;
/* release and reset the lower paths */
if (has_graft_path(dentry))
sdcardfs_put_reset_orig_path(dentry);
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index 676e394e07be..206f8cbc7d7d 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -41,8 +41,6 @@ void sdcardfs_destroy_dentry_cache(void)
void free_dentry_private_data(struct dentry *dentry)
{
- if (!dentry || !dentry->d_fsdata)
- return;
kmem_cache_free(sdcardfs_dentry_cachep, dentry->d_fsdata);
dentry->d_fsdata = NULL;
}
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index e4fd3fbb05e6..30e0c431a1ea 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -316,7 +316,7 @@ static int sdcardfs_read_super(struct vfsmount *mnt, struct super_block *sb,
sb->s_root = d_make_root(inode);
if (!sb->s_root) {
err = -ENOMEM;
- goto out_iput;
+ goto out_sput;
}
d_set_d_op(sb->s_root, &sdcardfs_ci_dops);
@@ -361,8 +361,7 @@ static int sdcardfs_read_super(struct vfsmount *mnt, struct super_block *sb,
/* no longer needed: free_dentry_private_data(sb->s_root); */
out_freeroot:
dput(sb->s_root);
-out_iput:
- iput(inode);
+ sb->s_root = NULL;
out_sput:
/* drop refs we took earlier */
atomic_dec(&lower_sb->s_active);
@@ -422,7 +421,7 @@ void sdcardfs_kill_sb(struct super_block *sb)
{
struct sdcardfs_sb_info *sbi;
- if (sb->s_magic == SDCARDFS_SUPER_MAGIC) {
+ if (sb->s_magic == SDCARDFS_SUPER_MAGIC && sb->s_fs_info) {
sbi = SDCARDFS_SB(sb);
mutex_lock(&sdcardfs_super_list_lock);
list_del(&sbi->list);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1fd90c079537..0bb6de356451 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1728,8 +1728,11 @@ static void ubifs_remount_ro(struct ubifs_info *c)
dbg_save_space_info(c);
- for (i = 0; i < c->jhead_cnt; i++)
- ubifs_wbuf_sync(&c->jheads[i].wbuf);
+ for (i = 0; i < c->jhead_cnt; i++) {
+ err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
+ if (err)
+ ubifs_ro_mode(c, err);
+ }
c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
@@ -1795,8 +1798,11 @@ static void ubifs_put_super(struct super_block *sb)
int err;
/* Synchronize write-buffers */
- for (i = 0; i < c->jhead_cnt; i++)
- ubifs_wbuf_sync(&c->jheads[i].wbuf);
+ for (i = 0; i < c->jhead_cnt; i++) {
+ err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
+ if (err)
+ ubifs_ro_mode(c, err);
+ }
/*
* We are being cleanly unmounted which means the
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index c104d4aed62a..de1414ada5a1 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -193,6 +193,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
set_wb_congested(bdi->wb.congested, sync);
}
+struct wb_lock_cookie {
+ bool locked;
+ unsigned long flags;
+};
+
#ifdef CONFIG_CGROUP_WRITEBACK
/**
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 125bc67319b4..4bc6540d426b 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -374,7 +374,7 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
/**
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
* @inode: target inode
- * @lockedp: temp bool output param, to be passed to the end function
+ * @cookie: output param, to be passed to the end function
*
* The caller wants to access the wb associated with @inode but isn't
* holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
@@ -382,12 +382,12 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
* association doesn't change until the transaction is finished with
* unlocked_inode_to_wb_end().
*
- * The caller must call unlocked_inode_to_wb_end() with *@lockdep
- * afterwards and can't sleep during transaction. IRQ may or may not be
- * disabled on return.
+ * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
+ * can't sleep during the transaction. IRQs may or may not be disabled on
+ * return.
*/
static inline struct bdi_writeback *
-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
rcu_read_lock();
@@ -395,10 +395,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
* Paired with store_release in inode_switch_wb_work_fn() and
* ensures that we see the new wb if we see cleared I_WB_SWITCH.
*/
- *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
+ cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
- if (unlikely(*lockedp))
- spin_lock_irq(&inode->i_mapping->tree_lock);
+ if (unlikely(cookie->locked))
+ spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
/*
* Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
@@ -410,12 +410,14 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
/**
* unlocked_inode_to_wb_end - end inode wb access transaction
* @inode: target inode
- * @locked: *@lockedp from unlocked_inode_to_wb_begin()
+ * @cookie: @cookie from unlocked_inode_to_wb_begin()
*/
-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+static inline void unlocked_inode_to_wb_end(struct inode *inode,
+ struct wb_lock_cookie *cookie)
{
- if (unlikely(locked))
- spin_unlock_irq(&inode->i_mapping->tree_lock);
+ if (unlikely(cookie->locked))
+ spin_unlock_irqrestore(&inode->i_mapping->tree_lock,
+ cookie->flags);
rcu_read_unlock();
}
@@ -462,12 +464,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
}
static inline struct bdi_writeback *
-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
return inode_to_wb(inode);
}
-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+static inline void unlocked_inode_to_wb_end(struct inode *inode,
+ struct wb_lock_cookie *cookie)
{
}
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 6d73a04d0150..e7fd9490bcf6 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -56,6 +56,7 @@ struct bio {
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev;
unsigned int bi_flags; /* status, command, etc */
+ unsigned short bi_write_hint;
int bi_error;
unsigned long bi_rw; /* bottom bits READ/WRITE,
* top bits priority
diff --git a/include/linux/clk/msm-clock-generic.h b/include/linux/clk/msm-clock-generic.h
index fe019d366d0b..cb2d8787b84f 100644
--- a/include/linux/clk/msm-clock-generic.h
+++ b/include/linux/clk/msm-clock-generic.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -311,6 +311,7 @@ extern struct clk_ops clk_ops_mux_div_clk;
struct virtclk_front {
int id;
struct clk c;
+ u32 flag;
};
extern struct clk_ops virtclk_front_ops;
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 7c92113e20c3..2cd41e0cd394 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -145,10 +145,10 @@ the appropriate macros. */
/* This needs to be modified manually now, when we add
a new RANGE of SSIDs to the msg_mask_tbl */
#define MSG_MASK_TBL_CNT 26
-#define APPS_EVENT_LAST_ID 0x0B3F
+#define APPS_EVENT_LAST_ID 0x0C5B
#define MSG_SSID_0 0
-#define MSG_SSID_0_LAST 121
+#define MSG_SSID_0_LAST 125
#define MSG_SSID_1 500
#define MSG_SSID_1_LAST 506
#define MSG_SSID_2 1000
@@ -160,11 +160,11 @@ the appropriate macros. */
#define MSG_SSID_5 4000
#define MSG_SSID_5_LAST 4010
#define MSG_SSID_6 4500
-#define MSG_SSID_6_LAST 4583
+#define MSG_SSID_6_LAST 4584
#define MSG_SSID_7 4600
-#define MSG_SSID_7_LAST 4615
+#define MSG_SSID_7_LAST 4616
#define MSG_SSID_8 5000
-#define MSG_SSID_8_LAST 5033
+#define MSG_SSID_8_LAST 5034
#define MSG_SSID_9 5500
#define MSG_SSID_9_LAST 5516
#define MSG_SSID_10 6000
@@ -264,7 +264,7 @@ static const uint32_t msg_bld_masks_0[] = {
MSG_MASK_6|MSG_MASK_7|MSG_MASK_8|MSG_MASK_9|MSG_MASK_10,
MSG_LVL_MED,
MSG_LVL_LOW,
- MSG_LVL_LOW,
+ MSG_LVL_MED,
MSG_LVL_MED,
MSG_LVL_LOW,
MSG_LVL_LOW,
@@ -317,7 +317,7 @@ static const uint32_t msg_bld_masks_0[] = {
MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
MSG_LVL_MED,
MSG_LVL_HIGH,
- MSG_LVL_LOW,
+ MSG_LVL_MED,
MSG_LVL_HIGH,
MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR,
@@ -486,6 +486,7 @@ static const uint32_t msg_bld_masks_6[] = {
MSG_LVL_LOW,
MSG_LVL_LOW,
MSG_LVL_LOW,
+ MSG_LVL_LOW,
MSG_LVL_LOW
};
@@ -505,7 +506,9 @@ static const uint32_t msg_bld_masks_7[] = {
MSG_LVL_LOW,
MSG_LVL_LOW,
MSG_LVL_LOW,
- MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_LOW
};
static const uint32_t msg_bld_masks_8[] = {
@@ -525,9 +528,6 @@ static const uint32_t msg_bld_masks_8[] = {
MSG_LVL_MED,
MSG_LVL_MED,
MSG_LVL_MED,
- MSG_LVL_LOW,
- MSG_LVL_LOW,
- MSG_LVL_LOW,
MSG_LVL_MED,
MSG_LVL_MED,
MSG_LVL_MED,
@@ -542,6 +542,10 @@ static const uint32_t msg_bld_masks_8[] = {
MSG_LVL_MED,
MSG_LVL_MED,
MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_HIGH,
MSG_LVL_HIGH
};
@@ -644,14 +648,14 @@ static const uint32_t msg_bld_masks_10[] = {
MSG_LVL_MED,
MSG_LVL_MED,
MSG_LVL_LOW,
- MSG_LVL_LOW,
- MSG_LVL_LOW,
- MSG_LVL_LOW,
- MSG_LVL_LOW,
- MSG_LVL_LOW,
- MSG_LVL_LOW,
- MSG_LVL_LOW,
- MSG_LVL_LOW,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
MSG_LVL_MED
};
@@ -797,7 +801,9 @@ static const uint32_t msg_bld_masks_19[] = {
};
static const uint32_t msg_bld_masks_20[] = {
- MSG_LVL_LOW,
+ MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 |
+ MSG_MASK_8 | MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 |
+ MSG_MASK_12,
MSG_LVL_LOW,
MSG_LVL_LOW,
MSG_LVL_LOW,
@@ -875,7 +881,7 @@ static const uint32_t msg_bld_masks_25[] = {
/* LOG CODES */
static const uint32_t log_code_last_tbl[] = {
0x0, /* EQUIP ID 0 */
- 0x1A11, /* EQUIP ID 1 */
+ 0x1C6A, /* EQUIP ID 1 */
0x0, /* EQUIP ID 2 */
0x0, /* EQUIP ID 3 */
0x4910, /* EQUIP ID 4 */
@@ -885,7 +891,7 @@ static const uint32_t log_code_last_tbl[] = {
0x0, /* EQUIP ID 8 */
0x0, /* EQUIP ID 9 */
0xA38A, /* EQUIP ID 10 */
- 0xB201, /* EQUIP ID 11 */
+ 0xB9FF, /* EQUIP ID 11 */
0x0, /* EQUIP ID 12 */
0xD1FF, /* EQUIP ID 13 */
0x0, /* EQUIP ID 14 */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 073365c9808a..2ebfa01b7091 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -21,6 +21,7 @@
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
+#define F2FS_EXTENSION_LEN 8 /* max size of extension */
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
@@ -38,10 +39,10 @@
#define F2FS_MAX_QUOTAS 3
-#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */
-#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */
-#define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */
-#define F2FS_IO_SIZE_BITS(sbi) ((sbi)->write_io_size_bits) /* power of 2 */
+#define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */
+#define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */
+#define F2FS_IO_SIZE_BYTES(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */
+#define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */
#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1)
/* This flag is used by node and meta inodes, and by recovery */
@@ -101,7 +102,7 @@ struct f2fs_super_block {
__u8 uuid[16]; /* 128-bit uuid for volume */
__le16 volume_name[MAX_VOLUME_NAME]; /* volume name */
__le32 extension_count; /* # of extensions below */
- __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
+ __u8 extension_list[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];/* extension array */
__le32 cp_payload;
__u8 version[VERSION_LEN]; /* the kernel version */
__u8 init_version[VERSION_LEN]; /* the initial kernel version */
@@ -110,12 +111,14 @@ struct f2fs_super_block {
__u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
struct f2fs_device devs[MAX_DEVICES]; /* device list */
__le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */
- __u8 reserved[315]; /* valid reserved region */
+ __u8 hot_ext_count; /* # of hot file extension */
+ __u8 reserved[314]; /* valid reserved region */
} __packed;
/*
* For checkpoint
*/
+#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400
#define CP_NOCRC_RECOVERY_FLAG 0x00000200
#define CP_TRIMMED_FLAG 0x00000100
#define CP_NAT_BITS_FLAG 0x00000080
@@ -302,6 +305,10 @@ struct f2fs_node {
*/
#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8)
+#define NAT_ENTRY_BITMAP_SIZE_ALIGNED \
+ ((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) / \
+ BITS_PER_LONG * BITS_PER_LONG)
+
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 20b7b1b41630..250f4d1ce9c5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -20,6 +20,7 @@
#include <linux/rwsem.h>
#include <linux/capability.h>
#include <linux/semaphore.h>
+#include <linux/fcntl.h>
#include <linux/fiemap.h>
#include <linux/rculist_bl.h>
#include <linux/atomic.h>
@@ -143,6 +144,9 @@ typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate);
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
+/* File is capable of returning -EAGAIN if I/O will block */
+#define FMODE_NOWAIT ((__force fmode_t)0x8000000)
+
/*
* Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
* that indicates that they should check the contents of the iovec are
@@ -326,9 +330,22 @@ struct page;
struct address_space;
struct writeback_control;
+/*
+ * Write life time hint values.
+ */
+enum rw_hint {
+ WRITE_LIFE_NOT_SET = 0,
+ WRITE_LIFE_NONE = RWH_WRITE_LIFE_NONE,
+ WRITE_LIFE_SHORT = RWH_WRITE_LIFE_SHORT,
+ WRITE_LIFE_MEDIUM = RWH_WRITE_LIFE_MEDIUM,
+ WRITE_LIFE_LONG = RWH_WRITE_LIFE_LONG,
+ WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME,
+};
+
#define IOCB_EVENTFD (1 << 0)
#define IOCB_APPEND (1 << 1)
#define IOCB_DIRECT (1 << 2)
+#define IOCB_NOWAIT (1 << 7)
struct kiocb {
struct file *ki_filp;
@@ -336,6 +353,7 @@ struct kiocb {
void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
void *private;
int ki_flags;
+ enum rw_hint ki_hint;
};
static inline bool is_sync_kiocb(struct kiocb *kiocb)
@@ -635,6 +653,7 @@ struct inode {
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
unsigned int i_blkbits;
+ enum rw_hint i_write_hint;
blkcnt_t i_blocks;
#ifdef __NEED_I_SIZE_ORDERED
@@ -1073,8 +1092,6 @@ struct file_lock_context {
#define OFFT_OFFSET_MAX INT_LIMIT(off_t)
#endif
-#include <linux/fcntl.h>
-
extern void send_sigio(struct fown_struct *fown, int fd, int band);
#ifdef CONFIG_FILE_LOCKING
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 8641e56b8f8a..9e535af579e8 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -13,42 +13,13 @@
#ifndef _LINUX_FSCRYPT_H
#define _LINUX_FSCRYPT_H
-#include <linux/key.h>
#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/bio.h>
-#include <linux/dcache.h>
-#include <crypto/skcipher.h>
-#include <uapi/linux/fs.h>
#define FS_CRYPTO_BLOCK_SIZE 16
+struct fscrypt_ctx;
struct fscrypt_info;
-struct fscrypt_ctx {
- union {
- struct {
- struct page *bounce_page; /* Ciphertext page */
- struct page *control_page; /* Original page */
- } w;
- struct {
- struct bio *bio;
- struct work_struct work;
- } r;
- struct list_head free_list; /* Free list */
- };
- u8 flags; /* Flags */
-};
-
-/**
- * For encrypted symlinks, the ciphertext length is stored at the beginning
- * of the string in little-endian format.
- */
-struct fscrypt_symlink_data {
- __le16 len;
- char encrypted_path[1];
-} __packed;
-
struct fscrypt_str {
unsigned char *name;
u32 len;
@@ -67,86 +38,11 @@ struct fscrypt_name {
#define fname_name(p) ((p)->disk_name.name)
#define fname_len(p) ((p)->disk_name.len)
-/*
- * fscrypt superblock flags
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-
-/*
- * crypto opertions for filesystems
- */
-struct fscrypt_operations {
- unsigned int flags;
- const char *key_prefix;
- int (*get_context)(struct inode *, void *, size_t);
- int (*set_context)(struct inode *, const void *, size_t, void *);
- bool (*dummy_context)(struct inode *);
- bool (*empty_dir)(struct inode *);
- unsigned (*max_namelen)(struct inode *);
-};
-
-static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
-{
- if (inode->i_sb->s_cop->dummy_context &&
- inode->i_sb->s_cop->dummy_context(inode))
- return true;
- return false;
-}
-
-static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
- u32 filenames_mode)
-{
- if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
- filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
- return true;
-
- if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS &&
- filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
- return true;
-
- return false;
-}
-
-static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
-{
- if (str->len == 1 && str->name[0] == '.')
- return true;
-
- if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
- return true;
-
- return false;
-}
-
#if __FS_HAS_ENCRYPTION
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
- return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
-}
-
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
-{
- return (inode->i_crypt_info != NULL);
-}
-
#include <linux/fscrypt_supp.h>
-
-#else /* !__FS_HAS_ENCRYPTION */
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
- WARN_ON_ONCE(1);
- return ERR_PTR(-EINVAL);
-}
-
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
-{
- return 0;
-}
-
+#else
#include <linux/fscrypt_notsupp.h>
-#endif /* __FS_HAS_ENCRYPTION */
+#endif
/**
* fscrypt_require_key - require an inode's encryption key
@@ -287,4 +183,68 @@ static inline int fscrypt_prepare_setattr(struct dentry *dentry,
return 0;
}
+/**
+ * fscrypt_prepare_symlink - prepare to create a possibly-encrypted symlink
+ * @dir: directory in which the symlink is being created
+ * @target: plaintext symlink target
+ * @len: length of @target excluding null terminator
+ * @max_len: space the filesystem has available to store the symlink target
+ * @disk_link: (out) the on-disk symlink target being prepared
+ *
+ * This function computes the size the symlink target will require on-disk,
+ * stores it in @disk_link->len, and validates it against @max_len. An
+ * encrypted symlink may be longer than the original.
+ *
+ * Additionally, @disk_link->name is set to @target if the symlink will be
+ * unencrypted, but left NULL if the symlink will be encrypted. For encrypted
+ * symlinks, the filesystem must call fscrypt_encrypt_symlink() to create the
+ * on-disk target later. (The reason for the two-step process is that some
+ * filesystems need to know the size of the symlink target before creating the
+ * inode, e.g. to determine whether it will be a "fast" or "slow" symlink.)
+ *
+ * Return: 0 on success, -ENAMETOOLONG if the symlink target is too long,
+ * -ENOKEY if the encryption key is missing, or another -errno code if a problem
+ * occurred while setting up the encryption key.
+ */
+static inline int fscrypt_prepare_symlink(struct inode *dir,
+ const char *target,
+ unsigned int len,
+ unsigned int max_len,
+ struct fscrypt_str *disk_link)
+{
+ if (IS_ENCRYPTED(dir) || fscrypt_dummy_context_enabled(dir))
+ return __fscrypt_prepare_symlink(dir, len, max_len, disk_link);
+
+ disk_link->name = (unsigned char *)target;
+ disk_link->len = len + 1;
+ if (disk_link->len > max_len)
+ return -ENAMETOOLONG;
+ return 0;
+}
+
+/**
+ * fscrypt_encrypt_symlink - encrypt the symlink target if needed
+ * @inode: symlink inode
+ * @target: plaintext symlink target
+ * @len: length of @target excluding null terminator
+ * @disk_link: (in/out) the on-disk symlink target being prepared
+ *
+ * If the symlink target needs to be encrypted, then this function encrypts it
+ * into @disk_link->name. fscrypt_prepare_symlink() must have been called
+ * previously to compute @disk_link->len. If the filesystem did not allocate a
+ * buffer for @disk_link->name after calling fscrypt_prepare_link(), then one
+ * will be kmalloc()'ed and the filesystem will be responsible for freeing it.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static inline int fscrypt_encrypt_symlink(struct inode *inode,
+ const char *target,
+ unsigned int len,
+ struct fscrypt_str *disk_link)
+{
+ if (IS_ENCRYPTED(inode))
+ return __fscrypt_encrypt_symlink(inode, target, len, disk_link);
+ return 0;
+}
+
#endif /* _LINUX_FSCRYPT_H */
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index c4c6bf2c390e..44bd4fbd3ec5 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -13,7 +13,21 @@
#ifndef _LINUX_FSCRYPT_NOTSUPP_H
#define _LINUX_FSCRYPT_NOTSUPP_H
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+ return false;
+}
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+ return false;
+}
+
/* crypto.c */
+static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
+{
+}
+
static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
gfp_t gfp_flags)
{
@@ -42,6 +56,11 @@ static inline int fscrypt_decrypt_page(const struct inode *inode,
return -EOPNOTSUPP;
}
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+}
static inline void fscrypt_restore_control_page(struct page *page)
{
@@ -115,16 +134,8 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname)
return;
}
-static inline u32 fscrypt_fname_encrypted_size(const struct inode *inode,
- u32 ilen)
-{
- /* never happens */
- WARN_ON(1);
- return 0;
-}
-
static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
- u32 ilen,
+ u32 max_encrypted_len,
struct fscrypt_str *crypto_str)
{
return -EOPNOTSUPP;
@@ -143,13 +154,6 @@ static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
return -EOPNOTSUPP;
}
-static inline int fscrypt_fname_usr_to_disk(struct inode *inode,
- const struct qstr *iname,
- struct fscrypt_str *oname)
-{
- return -EOPNOTSUPP;
-}
-
static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
const u8 *de_name, u32 de_name_len)
{
@@ -160,10 +164,13 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
}
/* bio.c */
-static inline void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx,
- struct bio *bio)
+static inline void fscrypt_decrypt_bio(struct bio *bio)
+{
+}
+
+static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
+ struct bio *bio)
{
- return;
}
static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
@@ -207,4 +214,27 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir,
return -EOPNOTSUPP;
}
+static inline int __fscrypt_prepare_symlink(struct inode *dir,
+ unsigned int len,
+ unsigned int max_len,
+ struct fscrypt_str *disk_link)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_encrypt_symlink(struct inode *inode,
+ const char *target,
+ unsigned int len,
+ struct fscrypt_str *disk_link)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void *fscrypt_get_symlink(struct inode *inode,
+ const void *caddr,
+ unsigned int max_size)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
#endif /* _LINUX_FSCRYPT_NOTSUPP_H */
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index 2db5e9706f60..9d1857302b73 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -10,8 +10,55 @@
#ifndef _LINUX_FSCRYPT_SUPP_H
#define _LINUX_FSCRYPT_SUPP_H
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+/*
+ * fscrypt superblock flags
+ */
+#define FS_CFLG_OWN_PAGES (1U << 1)
+
+/*
+ * crypto operations for filesystems
+ */
+struct fscrypt_operations {
+ unsigned int flags;
+ const char *key_prefix;
+ int (*get_context)(struct inode *, void *, size_t);
+ int (*set_context)(struct inode *, const void *, size_t, void *);
+ bool (*dummy_context)(struct inode *);
+ bool (*empty_dir)(struct inode *);
+ unsigned (*max_namelen)(struct inode *);
+};
+
+struct fscrypt_ctx {
+ union {
+ struct {
+ struct page *bounce_page; /* Ciphertext page */
+ struct page *control_page; /* Original page */
+ } w;
+ struct {
+ struct bio *bio;
+ struct work_struct work;
+ } r;
+ struct list_head free_list; /* Free list */
+ };
+ u8 flags; /* Flags */
+};
+
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+ return (inode->i_crypt_info != NULL);
+}
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+ return inode->i_sb->s_cop->dummy_context &&
+ inode->i_sb->s_cop->dummy_context(inode);
+}
+
/* crypto.c */
-extern struct kmem_cache *fscrypt_info_cachep;
+extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
@@ -19,6 +66,12 @@ extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
u64, gfp_t);
extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
unsigned int, u64);
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+ return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
+}
+
extern void fscrypt_restore_control_page(struct page *);
extern const struct dentry_operations fscrypt_d_ops;
@@ -54,14 +107,11 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname)
kfree(fname->crypto_buf.name);
}
-extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
struct fscrypt_str *);
extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
const struct fscrypt_str *, struct fscrypt_str *);
-extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
- struct fscrypt_str *);
#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
@@ -138,7 +188,9 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
}
/* bio.c */
-extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
+extern void fscrypt_decrypt_bio(struct bio *);
+extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
+ struct bio *bio);
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
@@ -152,5 +204,13 @@ extern int __fscrypt_prepare_rename(struct inode *old_dir,
struct dentry *new_dentry,
unsigned int flags);
extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
+extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
+ unsigned int max_len,
+ struct fscrypt_str *disk_link);
+extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
+ unsigned int len,
+ struct fscrypt_str *disk_link);
+extern void *fscrypt_get_symlink(struct inode *inode, const void *caddr,
+ unsigned int max_size);
#endif /* _LINUX_FSCRYPT_SUPP_H */
diff --git a/include/linux/habmm.h b/include/linux/habmm.h
index 966c5ee91be4..842cd27fd372 100644
--- a/include/linux/habmm.h
+++ b/include/linux/habmm.h
@@ -214,6 +214,11 @@ int32_t habmm_socket_recvfrom(int32_t handle, void *dst_buff,
*/
#define HABMM_EXP_MEM_TYPE_DMA 0x00000001
+/*
+ * this flag is used for export from dma_buf fd or import to dma_buf fd
+ */
+#define HABMM_EXPIMP_FLAGS_FD 0x00010000
+
#define HAB_MAX_EXPORT_SIZE 0x8000000
/*
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 251a1d382e23..fd86687f8119 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -793,7 +793,7 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force);
extern void hidinput_disconnect(struct hid_device *);
int hid_set_field(struct hid_field *, unsigned, __s32);
-int hid_input_report(struct hid_device *, int type, u8 *, int, int);
+int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
@@ -1098,13 +1098,13 @@ static inline void hid_hw_wait(struct hid_device *hdev)
*
* @report: the report we want to know the length
*/
-static inline int hid_report_len(struct hid_report *report)
+static inline u32 hid_report_len(struct hid_report *report)
{
/* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
return ((report->size - 1) >> 3) + 1 + (report->id > 0);
}
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt);
/* HID quirks API */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 19db03dbbd00..dd676ba758ee 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -585,7 +585,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
* Returns true if the skb is tagged with multiple vlan headers, regardless
* of whether it is hardware accelerated or not.
*/
-static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
@@ -596,6 +596,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
protocol != htons(ETH_P_8021AD)))
return false;
+ if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
+ return false;
+
veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
}
@@ -613,7 +616,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
*
* Returns features without unsafe ones if the skb has multiple tags.
*/
-static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
+static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
netdev_features_t features)
{
if (skb_vlan_tagged_multi(skb)) {
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index fe052e234906..bb1018882199 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -465,6 +465,7 @@ struct mlx4_update_qp_params {
u16 rate_val;
};
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index a91b67b18a73..5c93f4a89afa 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -635,8 +635,14 @@ enum {
};
enum {
- CQE_RSS_HTYPE_IP = 0x3 << 6,
- CQE_RSS_HTYPE_L4 = 0x3 << 2,
+ CQE_RSS_HTYPE_IP = 0x3 << 2,
+ /* cqe->rss_hash_type[3:2] - IP destination selected for hash
+ * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved)
+ */
+ CQE_RSS_HTYPE_L4 = 0x3 << 6,
+ /* cqe->rss_hash_type[7:6] - L4 destination selected for hash
+ * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
+ */
};
enum {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d2dcc8727bc8..34fcdede4604 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -241,10 +241,14 @@ extern pgprot_t protection_map[16];
* ->fault function. The vma's ->fault is responsible for returning a bitmask
* of VM_FAULT_xxx flags that give details about how the fault was handled.
*
+ * MM layer fills up gfp_mask for page allocations but fault handler might
+ * alter it if its implementation requires a different allocation context.
+ *
* pgoff should be used in favour of virtual_address, if possible.
*/
struct vm_fault {
unsigned int flags; /* FAULT_FLAG_xxx flags */
+ gfp_t gfp_mask; /* gfp mask to be used for allocations */
pgoff_t pgoff; /* Logical page offset based on vma */
void __user *virtual_address; /* Faulting virtual address */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0db2f3cb1b6c..f09c5b28ed70 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -176,6 +176,7 @@ enum zone_stat_item {
NR_ANON_TRANSPARENT_HUGEPAGES,
NR_FREE_CMA_PAGES,
NR_SWAPCACHE,
+ NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */
NR_VM_ZONE_STAT_ITEMS };
/*
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index b63fa457febd..3529683f691e 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -85,6 +85,7 @@ struct flchip {
unsigned int write_suspended:1;
unsigned int erase_suspended:1;
unsigned long in_progress_block_addr;
+ unsigned long in_progress_block_mask;
struct mutex mutex;
wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 8b8a46ce32d0..64d0797cc3a7 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -103,6 +103,9 @@ enum {
POWER_SUPPLY_DP_DM_HVDCP3_SUPPORTED = 10,
POWER_SUPPLY_DP_DM_ICL_DOWN = 11,
POWER_SUPPLY_DP_DM_ICL_UP = 12,
+ POWER_SUPPLY_DP_DM_FORCE_5V = 13,
+ POWER_SUPPLY_DP_DM_FORCE_9V = 14,
+ POWER_SUPPLY_DP_DM_FORCE_12V = 15,
};
enum {
diff --git a/include/linux/qdsp6v2/apr.h b/include/linux/qdsp6v2/apr.h
index adcdbcbc5907..98432952f278 100644
--- a/include/linux/qdsp6v2/apr.h
+++ b/include/linux/qdsp6v2/apr.h
@@ -69,7 +69,9 @@ struct apr_hdr {
#define APR_DOMAIN_MODEM 0x3
#define APR_DOMAIN_ADSP 0x4
#define APR_DOMAIN_APPS 0x5
-#define APR_DOMAIN_MAX 0x6
+#define APR_DOMAIN_SDSP 0x8
+#define APR_DOMAIN_MAX 0x9
+
/* ADSP service IDs */
#define APR_SVC_TEST_CLIENT 0x2
@@ -94,6 +96,9 @@ struct apr_hdr {
#define APR_SVC_CVP 0x6
#define APR_SVC_SRD 0x7
+/* Sensor DSP Micro Audio Service IDs */
+#define APR_SVC_MAS 0x3
+
/* APR Port IDs */
#define APR_MAX_PORTS 0x80
diff --git a/include/linux/qdsp6v2/apr_tal.h b/include/linux/qdsp6v2/apr_tal.h
index bf324064960b..9b35c9f9882d 100644
--- a/include/linux/qdsp6v2/apr_tal.h
+++ b/include/linux/qdsp6v2/apr_tal.h
@@ -27,7 +27,8 @@
#define APR_DEST_MODEM 0
#define APR_DEST_QDSP6 1
-#define APR_DEST_MAX 2
+#define APR_DEST_DSPS 3
+#define APR_DEST_MAX 4
#if defined(CONFIG_MSM_QDSP6_APRV2_GLINK) || \
defined(CONFIG_MSM_QDSP6_APRV3_GLINK)
diff --git a/include/linux/qdsp6v2/audio-anc-dev-mgr.h b/include/linux/qdsp6v2/audio-anc-dev-mgr.h
new file mode 100644
index 000000000000..dfa6752bc31b
--- /dev/null
+++ b/include/linux/qdsp6v2/audio-anc-dev-mgr.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _AUDIO_ANC_DEV_MGR_H_
+#define _AUDIO_ANC_DEV_MGR_H_
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/of_device.h>
+#include <linux/clk/msm-clk.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/q6afe-v2.h>
+#include <sound/msm-dai-q6-v2.h>
+#include <linux/msm_audio_anc.h>
+
+int msm_anc_dev_init(void);
+int msm_anc_dev_deinit(void);
+
+int msm_anc_dev_start(void);
+int msm_anc_dev_stop(void);
+
+int msm_anc_dev_set_info(void *info_p, int32_t anc_cmd);
+
+int msm_anc_dev_create(struct platform_device *pdev);
+
+int msm_anc_dev_destroy(struct platform_device *pdev);
+
+#endif
diff --git a/include/linux/qdsp6v2/sdsp_anc.h b/include/linux/qdsp6v2/sdsp_anc.h
new file mode 100644
index 000000000000..3b236e827e3d
--- /dev/null
+++ b/include/linux/qdsp6v2/sdsp_anc.h
@@ -0,0 +1,302 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __SDSP_ANC_H__
+#define __SDSP_ANC_H__
+
+#include <sound/q6afe-v2.h>
+#include <sound/apr_audio-v2.h>
+
+
+#define AUD_MSVC_MODULE_AUDIO_DEV_RESOURCE_SHARE 0x0001028A
+#define AUD_MSVC_PARAM_ID_PORT_SHARE_RESOURCE_CONFIG 0x00010297
+#define AUD_MSVC_API_VERSION_SHARE_RESOURCE_CONFIG 0x1
+#define AUD_MSVC_MODULE_AUDIO_DEV_ANC_REFS 0x00010254
+#define AUD_MSVC_PARAM_ID_DEV_ANC_REFS_CONFIG 0x00010286
+#define AUD_MSVC_API_VERSION_DEV_ANC_REFS_CONFIG 0x1
+#define AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO 0x00010234
+#define AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_RPM 0x00010235
+#define AUD_MSVC_API_VERSION_DEV_ANC_ALGO_RPM 0x1
+
+struct aud_msvc_port_param_data_v2 {
+ /* ID of the module to be configured.
+ * Supported values: Valid module ID
+ */
+ u32 module_id;
+
+ /* ID of the parameter corresponding to the supported parameters
+ * for the module ID.
+ * Supported values: Valid parameter ID
+ */
+ u32 param_id;
+
+ /* Actual size of the data for the
+ * module_id/param_id pair. The size is a
+ * multiple of four bytes.
+ * Supported values: > 0
+ */
+ u16 param_size;
+
+ /* This field must be set to zero.
+ */
+ u16 reserved;
+} __packed;
+
+
+/* Payload of the #AFE_PORT_CMD_SET_PARAM_V2 command's
+ * configuration/calibration settings for the AFE port.
+ */
+struct aud_msvc_port_cmd_set_param_v2 {
+ /* Port interface and direction (Rx or Tx) to start.
+ */
+ u16 port_id;
+
+ /* Actual size of the payload in bytes.
+ * This is used for parsing the parameter payload.
+ * Supported values: > 0
+ */
+ u16 payload_size;
+
+ /* LSW of 64 bit Payload address.
+ * Address should be 32-byte,
+ * 4kbyte aligned and must be contiguous memory.
+ */
+ u32 payload_address_lsw;
+
+ /* MSW of 64 bit Payload address.
+ * In case of 32-bit shared memory address,
+ * this field must be set to zero.
+ * In case of 36-bit shared memory address,
+ * bit-4 to bit-31 must be set to zero.
+ * Address should be 32-byte, 4kbyte aligned
+ * and must be contiguous memory.
+ */
+ u32 payload_address_msw;
+
+ /* Memory map handle returned by
+ * AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS commands.
+ * Supported Values:
+ * - NULL -- Message. The parameter data is in-band.
+ * - Non-NULL -- The parameter data is Out-band.Pointer to
+ * the physical address
+ * in shared memory of the payload data.
+ * An optional field is available if parameter
+ * data is in-band:
+ * aud_msvc_param_data_v2 param_data[...].
+ * For detailed payload content, see the
+ * aud_msvc_port_param_data_v2 structure.
+ */
+ u32 mem_map_handle;
+
+} __packed;
+
+/* Payload of the #AFE_PORT_CMD_GET_PARAM_V2 command,
+ * which queries for one post/preprocessing parameter of a
+ * stream.
+ */
+struct aud_msvc_port_cmd_get_param_v2 {
+ /* Port interface and direction (Rx or Tx) to start. */
+ u16 port_id;
+
+ /* Maximum data size of the parameter ID/module ID combination.
+ * This is a multiple of four bytes
+ * Supported values: > 0
+ */
+ u16 payload_size;
+
+ /* LSW of 64 bit Payload address. Address should be 32-byte,
+ * 4kbyte aligned and must be contig memory.
+ */
+ u32 payload_address_lsw;
+
+ /* MSW of 64 bit Payload address. In case of 32-bit shared
+ * memory address, this field must be set to zero. In case of 36-bit
+ * shared memory address, bit-4 to bit-31 must be set to zero.
+ * Address should be 32-byte, 4kbyte aligned and must be contiguous
+ * memory.
+ */
+ u32 payload_address_msw;
+
+ /* Memory map handle returned by
+ * AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS commands.
+ * Supported Values: - NULL -- Message. The parameter data is
+ * in-band. - Non-NULL -- The parameter data is Out-band.Pointer to
+ * - the physical address in shared memory of the payload data.
+ * For detailed payload content, see the aud_msvc_port_param_data_v2
+ * structure
+ */
+ u32 mem_map_handle;
+
+ /* ID of the module to be queried.
+ * Supported values: Valid module ID
+ */
+ u32 module_id;
+
+ /* ID of the parameter to be queried.
+ * Supported values: Valid parameter ID
+ */
+ u32 param_id;
+
+} __packed;
+
+struct aud_audioif_config_command {
+ struct apr_hdr hdr;
+ struct aud_msvc_port_cmd_set_param_v2 param;
+ struct aud_msvc_port_param_data_v2 pdata;
+ union afe_port_config port;
+} __packed;
+
+struct aud_msvc_param_id_dev_share_resource_cfg {
+ u32 minor_version;
+ u16 rddma_idx;
+ u16 wrdma_idx;
+ u32 lpm_start_addr;
+ u32 lpm_length;
+} __packed;
+
+
+struct aud_msvc_param_id_dev_anc_algo_rpm {
+ u32 minor_version;
+ u32 rpm;
+} __packed;
+
+
+struct aud_msvc_param_id_dev_anc_refs_cfg {
+ u32 minor_version;
+ u16 port_id;
+ u16 num_channel;
+ u32 sample_rate;
+ u32 bit_width;
+} __packed;
+
+
+struct anc_share_resource_command {
+ struct apr_hdr hdr;
+ struct aud_msvc_port_cmd_set_param_v2 param;
+ struct aud_msvc_port_param_data_v2 pdata;
+ struct aud_msvc_param_id_dev_share_resource_cfg resource;
+} __packed;
+
+
+struct anc_config_ref_command {
+ struct apr_hdr hdr;
+ struct aud_msvc_port_cmd_set_param_v2 param;
+ struct aud_msvc_port_param_data_v2 pdata;
+ struct aud_msvc_param_id_dev_anc_refs_cfg refs;
+} __packed;
+
+
+
+struct anc_set_rpm_command {
+ struct apr_hdr hdr;
+ struct aud_msvc_port_cmd_set_param_v2 param;
+ struct aud_msvc_port_param_data_v2 pdata;
+ struct aud_msvc_param_id_dev_anc_algo_rpm set_rpm;
+} __packed;
+
+struct anc_get_rpm_command {
+ struct apr_hdr hdr;
+ struct aud_msvc_port_cmd_get_param_v2 param;
+ struct aud_msvc_port_param_data_v2 pdata;
+ struct aud_msvc_param_id_dev_anc_algo_rpm get_rpm;
+} __packed;
+
+struct anc_get_rpm_resp {
+ uint32_t status;
+ struct aud_msvc_port_param_data_v2 pdata;
+ struct aud_msvc_param_id_dev_anc_algo_rpm res_rpm;
+} __packed;
+
+#define AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_BYPASS_MODE 0x0001029B
+
+#define AUD_MSVC_API_VERSION_DEV_ANC_ALGO_BYPASS_MODE 0x1
+
+#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_NO 0x0
+#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_REFS_TO_ANC_SPKR 0x1
+#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_ANC_MIC_TO_ANC_SPKR 0x2
+#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_REFS_MIXED_ANC_MIC_TO_ANC_SPKR 0x3
+
+struct aud_msvc_param_id_dev_anc_algo_bypass_mode {
+ uint32_t minor_version;
+ uint32_t bypass_mode;
+} __packed;
+
+struct anc_set_bypass_mode_command {
+ struct apr_hdr hdr;
+ struct aud_msvc_port_cmd_set_param_v2 param;
+ struct aud_msvc_port_param_data_v2 pdata;
+ struct aud_msvc_param_id_dev_anc_algo_bypass_mode set_bypass_mode;
+} __packed;
+
+#define AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_MODULE_ID 0x0001023A
+
+struct aud_msvc_param_id_dev_anc_algo_module_id {
+ uint32_t minor_version;
+ uint32_t module_id;
+} __packed;
+
+struct anc_set_algo_module_id_command {
+ struct apr_hdr hdr;
+ struct aud_msvc_port_cmd_set_param_v2 param;
+ struct aud_msvc_port_param_data_v2 pdata;
+ struct aud_msvc_param_id_dev_anc_algo_module_id set_algo_module_id;
+} __packed;
+
+
+#define AUD_MSVC_PARAM_ID_PORT_ANC_MIC_SPKR_LAYOUT_INFO 0x0001029C
+
+#define AUD_MSVC_API_VERSION_DEV_ANC_MIC_SPKR_LAYOUT_INFO 0x1
+
+#define AUD_MSVC_ANC_MAX_NUM_OF_MICS 16
+#define AUD_MSVC_ANC_MAX_NUM_OF_SPKRS 16
+
+struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info {
+ uint32_t minor_version;
+ uint16_t mic_layout_array[AUD_MSVC_ANC_MAX_NUM_OF_MICS];
+ uint16_t spkr_layout_array[AUD_MSVC_ANC_MAX_NUM_OF_SPKRS];
+ uint16_t num_anc_mic;
+ uint16_t num_anc_spkr;
+ uint16_t num_add_mic_signal;
+ uint16_t num_add_spkr_signal;
+} __packed;
+
+struct anc_set_mic_spkr_layout_info_command {
+ struct apr_hdr hdr;
+ struct aud_msvc_port_cmd_set_param_v2 param;
+ struct aud_msvc_port_param_data_v2 pdata;
+ struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info
+ set_mic_spkr_layout;
+} __packed;
+
+int anc_if_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port);
+
+int anc_if_tdm_port_stop(u16 port_id);
+
+int anc_if_share_resource(u16 port_id, u16 rddma_idx, u16 wrdma_idx,
+ u32 lpm_start_addr, u32 lpm_length);
+
+int anc_if_config_ref(u16 port_id, u32 sample_rate, u32 bit_width,
+ u16 num_channel);
+
+int anc_if_set_rpm(u16 port_id, u32 rpm);
+
+int anc_if_set_bypass_mode(u16 port_id, u32 bypass_mode);
+
+int anc_if_set_algo_module_id(u16 port_id, u32 module_id);
+
+int anc_if_set_anc_mic_spkr_layout(u16 port_id,
+struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info *set_mic_spkr_layout_p);
+
+int anc_if_shared_mem_map(void);
+
+int anc_if_shared_mem_unmap(void);
+
+#endif /* __SDSP_ANC_H__ */
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index ca7c8041b894..52d5609bff8e 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -85,6 +85,11 @@ extern unsigned int sysctl_sched_short_sleep;
#endif /* CONFIG_SCHED_HMP */
+#if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_IRQSOFF_TRACER)
+extern unsigned int sysctl_preemptoff_tracing_threshold_ns;
+extern unsigned int sysctl_irqsoff_tracing_threshold_ns;
+#endif
+
enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE,
SCHED_TUNABLESCALING_LOG,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b5421f6f155a..a6da214d0584 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -879,10 +879,10 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
unsigned int headroom);
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
int newtailroom, gfp_t priority);
-int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
- int offset, int len);
-int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
- int len);
+int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
+ int offset, int len);
+int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
+ int offset, int len);
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
int skb_pad(struct sk_buff *skb, int pad);
#define dev_kfree_skb(a) consume_skb(a)
diff --git a/include/linux/tty.h b/include/linux/tty.h
index a1042afff99a..812cdd8cff22 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -342,6 +342,7 @@ struct tty_file_private {
#define TTY_PTY_LOCK 16 /* pty private */
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
#define TTY_HUPPED 18 /* Post driver->hangup() */
+#define TTY_HUPPING 19 /* Hangup in progress */
#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
@@ -585,7 +586,7 @@ extern int tty_unregister_ldisc(int disc);
extern int tty_set_ldisc(struct tty_struct *tty, int ldisc);
extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
extern void tty_ldisc_release(struct tty_struct *tty);
-extern void tty_ldisc_init(struct tty_struct *tty);
+extern int __must_check tty_ldisc_init(struct tty_struct *tty);
extern void tty_ldisc_deinit(struct tty_struct *tty);
extern void tty_ldisc_begin(void);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 1821d34c24a5..d88c31c8e6a4 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -753,9 +753,9 @@ extern int usb_sec_event_ring_cleanup(struct usb_device *dev,
extern dma_addr_t
usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
unsigned intr_num);
+extern dma_addr_t usb_get_dcba_dma_addr(struct usb_device *dev);
extern dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
struct usb_host_endpoint *ep);
-extern int usb_get_controller_id(struct usb_device *dev);
/* Sets up a group of bulk endpoints to support multiple stream IDs. */
extern int usb_alloc_streams(struct usb_interface *interface,
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 8cbf59e6406b..dff7adbc60bb 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -403,7 +403,8 @@ struct hc_driver {
unsigned intr_num);
dma_addr_t (*get_xfer_ring_dma_addr)(struct usb_hcd *hcd,
struct usb_device *udev, struct usb_host_endpoint *ep);
- int (*get_core_id)(struct usb_hcd *hcd);
+ dma_addr_t (*get_dcba_dma_addr)(struct usb_hcd *hcd,
+ struct usb_device *udev);
};
static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
@@ -449,10 +450,10 @@ extern int usb_hcd_sec_event_ring_cleanup(struct usb_device *udev,
extern dma_addr_t
usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
unsigned intr_num);
+extern dma_addr_t usb_hcd_get_dcba_dma_addr(struct usb_device *udev);
extern dma_addr_t
usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
struct usb_host_endpoint *ep);
-extern int usb_hcd_get_controller_id(struct usb_device *udev);
extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
struct device *dev, const char *bus_name);
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 8f4d4bfa6d46..d7844d215381 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -124,6 +124,9 @@ int virtio_device_freeze(struct virtio_device *dev);
int virtio_device_restore(struct virtio_device *dev);
#endif
+#define virtio_device_for_each_vq(vdev, vq) \
+ list_for_each_entry(vq, &vdev->vqs, list)
+
/**
* virtio_driver - operations for a virtio I/O driver
* @driver: underlying device driver (populate name and owner).
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 07f27b15e6fe..705364a8e9c6 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -82,8 +82,8 @@ struct wiphy;
/* Indicate support for including KEK length in rekey data */
#define CFG80211_REKEY_DATA_KEK_LEN 1
-/* Indicate support for regulatory update sync event */
-#define CFG80211_REG_UPDATE_SYNC_EVENT 1
+/* Indicate backport support for processing user cell base hint */
+#define CFG80211_USER_HINT_CELL_BASE_SELF_MANAGED 1
/*
* wireless hardware capability structures
@@ -996,9 +996,9 @@ enum rate_info_flags {
* @RATE_INFO_BW_160: 160 MHz bandwidth
*/
enum rate_info_bw {
+ RATE_INFO_BW_20 = 0,
RATE_INFO_BW_5,
RATE_INFO_BW_10,
- RATE_INFO_BW_20,
RATE_INFO_BW_40,
RATE_INFO_BW_80,
RATE_INFO_BW_160,
@@ -5774,14 +5774,6 @@ void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
*/
void cfg80211_ap_stopped(struct net_device *netdev, gfp_t gfp);
-/**
- * cfg80211_send_reg_change_event - notify user-space of regulatory change
- * @request: regulatory request
- * @wiphy: the wiphy to use
- */
-void cfg80211_send_reg_change_event(struct regulatory_request *request,
- struct wiphy *wiphy);
-
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index fe994d2e5286..ea985aa7a6c5 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
struct proto *prot, int kern);
+void llc_sk_stop_all_timers(struct sock *sk, bool sync);
void llc_sk_free(struct sock *sk);
void llc_sk_reset(struct sock *sk);
diff --git a/include/net/slhc_vj.h b/include/net/slhc_vj.h
index 8716d5942b65..8fcf8908a694 100644
--- a/include/net/slhc_vj.h
+++ b/include/net/slhc_vj.h
@@ -127,6 +127,7 @@ typedef __u32 int32;
*/
struct cstate {
byte_t cs_this; /* connection id number (xmit) */
+ bool initialized; /* true if initialized */
struct cstate *next; /* next in ring (xmit) */
struct iphdr cs_ip; /* ip/tcp hdr from most recent packet */
struct tcphdr cs_tcp;
diff --git a/include/net/x25.h b/include/net/x25.h
index c383aa4edbf0..6d30a01d281d 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -298,10 +298,10 @@ void x25_check_rbuf(struct sock *);
/* sysctl_net_x25.c */
#ifdef CONFIG_SYSCTL
-void x25_register_sysctl(void);
+int x25_register_sysctl(void);
void x25_unregister_sysctl(void);
#else
-static inline void x25_register_sysctl(void) {};
+static inline int x25_register_sysctl(void) { return 0; };
static inline void x25_unregister_sysctl(void) {};
#endif /* CONFIG_SYSCTL */
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 0393c8869b8f..ee65bdae9971 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -3512,6 +3512,263 @@ struct afe_param_id_set_topology_cfg {
u32 topology_id;
} __packed;
+/*
+ * This command is used by client to request the LPASS resources.
+ * Currently this command supports only LPAIF DMA resources.
+ * Allocated resources will be in control of remote client until
+ * they get released.
+ *
+ * If all the requested resources are available then response status in
+ * AFE_CMDRSP_REQUEST_LPASS_RESOURCES payload will
+ * be updated with ADSP_EOK, otherwise it will be ADSP_EFAILED.
+ *
+ * This command is variable payload size command, and size depends
+ * on the type of resource requested.
+ *
+ * For example, if client requests AFE_LPAIF_DMA_RESOURCE_ID
+ * resources, afe_cmd_request_lpass_resources structure will
+ * be followed with the afe_cmd_request_lpass_dma_resources
+ * structure.
+ *
+ * AFE_CMDRSP_REQUEST_LPASS_RESOURCES is the response for
+ * this command, which returns the allocated resources.
+ *
+ * @apr_hdr_fields
+ * Opcode -- AFE_CMD_REQUEST_LPASS_RESOURCES
+ *
+ * @return
+ * #AFE_CMDRSP_REQUEST_LPASS_RESOURCES
+ */
+#define AFE_CMD_REQUEST_LPASS_RESOURCES 0x00010109
+
+/* Macro for requesting LPAIF DMA resources */
+#define AFE_LPAIF_DMA_RESOURCE_ID 0x00000001
+
+struct afe_cmd_request_lpass_resources {
+ /*
+ * LPASS Resource ID
+ * @values:
+ * - AFE_LPAIF_DMA_RESOURCE_ID
+ */
+ u32 resource_id;
+} __packed;
+
+/*
+ * AFE_CMD_REQUEST_LPASS_RESOURCES uses this structure when
+ * client is requesting LPAIF DMA resources.
+ *
+ * Number of read DMA channels and write DMA channels varies from chipset to
+ * chipset. HLOS needs to make sure that when it requests LPASS DMA
+ * resources, it should not impact the concurrencies which
+ * are mandatory for a given chipset.
+ */
+
+/* Macro for AFE LPAIF default DMA data type */
+#define AFE_LPAIF_DEFAULT_DMA_TYPE 0x0
+
+struct afe_cmd_request_lpass_dma_resources {
+ /*
+ * LPASS DMA Type
+ * @values:
+ * - AFE_LPAIF_DEFAULT_DMA_TYPE
+ */
+ u8 dma_type;
+ /*
+ * Number of read DMA channels required
+ * @values: >=0
+ * - 0 indicates channels are not requested
+ */
+ u8 num_read_dma_channels;
+ /*
+ * Number of write DMA channels required
+ * @values: >=0
+ * - 0 indicates channels are not requested
+ */
+ u8 num_write_dma_channels;
+ /*
+ * Reserved field for 4 byte alignment
+ * @values: 0
+ */
+ u8 reserved;
+} __packed;
+
+struct afe_request_lpass_dma_resources_command {
+ struct apr_hdr hdr;
+ struct afe_cmd_request_lpass_resources resources;
+ struct afe_cmd_request_lpass_dma_resources dma_resources;
+} __packed;
+
+/*
+ * This is the response for the command AFE_CMD_REQUEST_LPASS_RESOURCES.
+ * Payload of this command is variable.
+ *
+ * Resources allocated successfully or not, are determined by the "status"
+ * in the payload. If status is ADSP_EOK, then resources are
+ * allocated successfully and allocated resource information
+ * follows.
+ *
+ * For example, if the response resource id is AFE_LPAIF_DMA_RESOURCE_ID,
+ * afe_cmdrsp_request_lpass_dma_resources structure will
+ * follow after afe_cmdrsp_request_lpass_resources.
+ *
+ * If status is ADSP_EFAILED, this indicates requested resources
+ * are not allocated successfully. In this case the payload following
+ * this structure is invalid.
+ * @apr_hdr_fields
+ * Opcode -- AFE_CMDRSP_REQUEST_LPASS_RESOURCES
+*/
+#define AFE_CMDRSP_REQUEST_LPASS_RESOURCES 0x0001010A
+
+struct afe_cmdrsp_request_lpass_resources {
+ /*
+ * ADSP_EOK if all requested resources are allocated.
+ * ADSP_EFAILED if resource allocation is failed.
+ */
+ u32 status;
+ /*
+ * Returned LPASS DMA resource ID
+ * @values:
+ * - AFE_LPAIF_DMA_RESOURCE_ID
+ */
+ u32 resource_id;
+} __packed;
+
+/*
+ * This command will be sent as a payload for
+ * AFE_CMDRSP_REQUEST_LPASS_RESOURCES, when the LPAIF DMA resources
+ * were requested. Payload of this command is variable, which
+ * follows after the afe_cmdrsp_request_lpass_dma_resources structure.
+ * The size in bytes following this structure is sum of
+ * num_read_dma_channels and num_write_dma_channels.
+ *
+ * If the resource allocation is successful, then the payload contains
+ * the valid DMA channel indices.
+ *
+ * For example, if number of requested DMA read channels is 2, and they
+ * are successfully allocated, the variable payload contains
+ * valid DMA channel index values in first two bytes array.
+ *
+ * In the failure case this payload can be ignored, and all the values will be
+ * initialized with zeros.
+ *
+ * An example payload of the command response is below:
+ * <struct afe_cmdrsp_request_lpass_resources>
+ * <struct afe_cmdrsp_request_lpass_dma_resources>
+ * read DMA index value for each byte.
+ * write DMA index value for each byte.
+ * padded zeros, if sum of num_read_dma_channels and num_write_dma_channels
+ * are not multiples of 4.
+*/
+
+struct afe_cmdrsp_request_lpass_dma_resources {
+ /*
+ * LPASS DMA Type
+ * @values:
+ * - AFE_LPAIF_DEFAULT_DMA_TYPE
+ */
+ u8 dma_type;
+ /*
+ * Returned number of read DMA channels allocated
+ * @values: >=0
+ */
+ u8 num_read_dma_channels;
+ /*
+ * Returned number of write DMA channels allocated
+ * @values: >=0
+ */
+ u8 num_write_dma_channels;
+ /*
+ * Reserved field for 4 byte alignment
+ * @values: 0
+ */
+ u8 reserved;
+} __packed;
+
+/*
+ * This command is for releasing resources which are allocated as
+ * part of AFE_CMD_REQUEST_LPASS_RESOURCES.
+ *
+ * Payload of this command is variable, which follows
+ * after the afe_cmd_release_lpass_resources structure.
+ *
+ * If release resource is AFE_LPAIF_DMA_RESOURCE_ID
+ * afe_cmd_release_lpass_dma_resources structure will be
+ * followed after afe_cmd_release_lpass_resources.
+ *
+ *
+ * @apr_hdr_fields
+ * Opcode -- AFE_CMD_RELEASE_LPASS_RESOURCES
+
+ * @return
+ * #APRv2 IBASIC RSP Result
+*/
+#define AFE_CMD_RELEASE_LPASS_RESOURCES 0x0001010B
+
+struct afe_cmd_release_lpass_resources {
+ /*
+ * LPASS DMA resource ID
+ * @values:
+ * - AFE_LPAIF_DMA_RESOURCE_ID
+ */
+ u32 resource_id;
+} __packed;
+
+/*
+ * This payload to be appended as part of AFE_CMD_RELEASE_LPASS_RESOURCES
+ * when resource id AFE_LPAIF_DMA_RESOURCE_ID is used.
+ *
+ * Payload of this command is variable, which will be followed after the
+ * afe_cmd_release_lpass_dma_resources structure.
+ * The variable payload's size in bytes is sum of
+ * num_read_dma_channels and num_write_dma_channels.
+ * Variable payload data contains the valid DMA channel indices which are
+ * allocated as part of AFE_CMD_REQUEST_LPASS_RESOURCES.
+ *
+ * For example, if number of DMA read channels released are 2,
+ * the variable payload contains valid DMA channel
+ * index values in first two bytes of variable payload.
+ * Client needs to fill the same DMA channel indices were returned
+ * as part of AFE_CMD_RELEASE_LPASS_RESOURCES, otherwise
+ * ADSP will return the error.
+ *
+ * An example payload of the release command is below:
+ * <struct afe_cmd_release_lpass_resources>
+ * <struct afe_cmd_release_lpass_dma_resources>
+ * read DMA index value for each byte.
+ * write DMA index value for each byte.
+*/
+
+struct afe_cmd_release_lpass_dma_resources {
+ /*
+ * LPASS DMA Type
+ * @values:
+ * - AFE_LPAIF_DEFAULT_DMA_TYPE
+ */
+ u8 dma_type;
+ /*
+ * Number of read DMA channels to be released
+ * @values: >=0
+ * - 0 indicates channels are not released
+ */
+ u8 num_read_dma_channels;
+ /*
+ * Number of write DMA channels to be released
+ * @values: >=0
+ * - 0 indicates channels are not released
+ */
+ u8 num_write_dma_channels;
+ /*
+ * Reserved field for 4 byte alignment
+ * @values: 0
+ */
+ u8 reserved;
+} __packed;
+
+struct afe_release_lpass_dma_resources_command {
+ struct apr_hdr hdr;
+ struct afe_cmd_release_lpass_resources resources;
+ struct afe_cmd_release_lpass_dma_resources dma_resources;
+} __packed;
/*
* Generic encoder module ID.
diff --git a/include/sound/control.h b/include/sound/control.h
index 21d047f229a1..4142757080f8 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -22,6 +22,7 @@
*
*/
+#include <linux/nospec.h>
#include <sound/asound.h>
#define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data)
@@ -147,12 +148,14 @@ int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type);
static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
{
- return id->numid - kctl->id.numid;
+ unsigned int ioff = id->numid - kctl->id.numid;
+ return array_index_nospec(ioff, kctl->count);
}
static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
{
- return id->index - kctl->id.index;
+ unsigned int ioff = id->index - kctl->id.index;
+ return array_index_nospec(ioff, kctl->count);
}
static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
diff --git a/include/sound/pcm_oss.h b/include/sound/pcm_oss.h
index 760c969d885d..12bbf8c81112 100644
--- a/include/sound/pcm_oss.h
+++ b/include/sound/pcm_oss.h
@@ -57,6 +57,7 @@ struct snd_pcm_oss_runtime {
char *buffer; /* vmallocated period */
size_t buffer_used; /* used length from period buffer */
struct mutex params_lock;
+ atomic_t rw_ref; /* concurrent read/write accesses */
#ifdef CONFIG_SND_PCM_OSS_PLUGINS
struct snd_pcm_plugin *plugin_first;
struct snd_pcm_plugin *plugin_last;
diff --git a/include/sound/q6afe-v2.h b/include/sound/q6afe-v2.h
index 5031e62beb17..cdbf97023f66 100644
--- a/include/sound/q6afe-v2.h
+++ b/include/sound/q6afe-v2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -42,6 +42,9 @@
#define AFE_CLK_VERSION_V1 1
#define AFE_CLK_VERSION_V2 2
+#define AFE_MAX_RDDMA 10
+#define AFE_MAX_WRDMA 10
+
typedef int (*routing_cb)(int port);
enum {
@@ -208,6 +211,87 @@ enum {
AFE_MAX_PORTS
};
+
+enum {
+ IDX_PRIMARY_TDM_RX_0,
+ IDX_PRIMARY_TDM_RX_1,
+ IDX_PRIMARY_TDM_RX_2,
+ IDX_PRIMARY_TDM_RX_3,
+ IDX_PRIMARY_TDM_RX_4,
+ IDX_PRIMARY_TDM_RX_5,
+ IDX_PRIMARY_TDM_RX_6,
+ IDX_PRIMARY_TDM_RX_7,
+ IDX_PRIMARY_TDM_TX_0,
+ IDX_PRIMARY_TDM_TX_1,
+ IDX_PRIMARY_TDM_TX_2,
+ IDX_PRIMARY_TDM_TX_3,
+ IDX_PRIMARY_TDM_TX_4,
+ IDX_PRIMARY_TDM_TX_5,
+ IDX_PRIMARY_TDM_TX_6,
+ IDX_PRIMARY_TDM_TX_7,
+ IDX_SECONDARY_TDM_RX_0,
+ IDX_SECONDARY_TDM_RX_1,
+ IDX_SECONDARY_TDM_RX_2,
+ IDX_SECONDARY_TDM_RX_3,
+ IDX_SECONDARY_TDM_RX_4,
+ IDX_SECONDARY_TDM_RX_5,
+ IDX_SECONDARY_TDM_RX_6,
+ IDX_SECONDARY_TDM_RX_7,
+ IDX_SECONDARY_TDM_TX_0,
+ IDX_SECONDARY_TDM_TX_1,
+ IDX_SECONDARY_TDM_TX_2,
+ IDX_SECONDARY_TDM_TX_3,
+ IDX_SECONDARY_TDM_TX_4,
+ IDX_SECONDARY_TDM_TX_5,
+ IDX_SECONDARY_TDM_TX_6,
+ IDX_SECONDARY_TDM_TX_7,
+ IDX_TERTIARY_TDM_RX_0,
+ IDX_TERTIARY_TDM_RX_1,
+ IDX_TERTIARY_TDM_RX_2,
+ IDX_TERTIARY_TDM_RX_3,
+ IDX_TERTIARY_TDM_RX_4,
+ IDX_TERTIARY_TDM_RX_5,
+ IDX_TERTIARY_TDM_RX_6,
+ IDX_TERTIARY_TDM_RX_7,
+ IDX_TERTIARY_TDM_TX_0,
+ IDX_TERTIARY_TDM_TX_1,
+ IDX_TERTIARY_TDM_TX_2,
+ IDX_TERTIARY_TDM_TX_3,
+ IDX_TERTIARY_TDM_TX_4,
+ IDX_TERTIARY_TDM_TX_5,
+ IDX_TERTIARY_TDM_TX_6,
+ IDX_TERTIARY_TDM_TX_7,
+ IDX_QUATERNARY_TDM_RX_0,
+ IDX_QUATERNARY_TDM_RX_1,
+ IDX_QUATERNARY_TDM_RX_2,
+ IDX_QUATERNARY_TDM_RX_3,
+ IDX_QUATERNARY_TDM_RX_4,
+ IDX_QUATERNARY_TDM_RX_5,
+ IDX_QUATERNARY_TDM_RX_6,
+ IDX_QUATERNARY_TDM_RX_7,
+ IDX_QUATERNARY_TDM_TX_0,
+ IDX_QUATERNARY_TDM_TX_1,
+ IDX_QUATERNARY_TDM_TX_2,
+ IDX_QUATERNARY_TDM_TX_3,
+ IDX_QUATERNARY_TDM_TX_4,
+ IDX_QUATERNARY_TDM_TX_5,
+ IDX_QUATERNARY_TDM_TX_6,
+ IDX_QUATERNARY_TDM_TX_7,
+ IDX_TDM_MAX,
+};
+
+enum {
+ IDX_GROUP_PRIMARY_TDM_RX,
+ IDX_GROUP_PRIMARY_TDM_TX,
+ IDX_GROUP_SECONDARY_TDM_RX,
+ IDX_GROUP_SECONDARY_TDM_TX,
+ IDX_GROUP_TERTIARY_TDM_RX,
+ IDX_GROUP_TERTIARY_TDM_TX,
+ IDX_GROUP_QUATERNARY_TDM_RX,
+ IDX_GROUP_QUATERNARY_TDM_TX,
+ IDX_GROUP_TDM_MAX,
+};
+
enum afe_mad_type {
MAD_HW_NONE = 0x00,
MAD_HW_AUDIO = 0x01,
@@ -369,4 +453,9 @@ void afe_set_routing_callback(routing_cb);
int afe_get_av_dev_drift(struct afe_param_id_dev_timing_stats *timing_stats,
u16 port);
int afe_get_svc_version(uint32_t service_id);
+int afe_request_dma_resources(uint8_t dma_type, uint8_t num_read_dma_channels,
+ uint8_t num_write_dma_channels);
+int afe_get_dma_idx(bool **ret_rddma_idx,
+ bool **ret_wrdma_idx);
+int afe_release_all_dma_resources(void);
#endif /* __Q6AFE_V2_H__ */
diff --git a/include/sound/q6core.h b/include/sound/q6core.h
index e70e4ba9b968..148df25cbd51 100644
--- a/include/sound/q6core.h
+++ b/include/sound/q6core.h
@@ -19,8 +19,10 @@
#define AVCS_CMD_ADSP_EVENT_GET_STATE 0x0001290C
#define AVCS_CMDRSP_ADSP_EVENT_GET_STATE 0x0001290D
+#define AVCS_SERVICES_AND_STATIC_MODULES_READY 0x1
+#define AVCS_SERVICE_AND_ALL_MODULES_READY 0x5
-bool q6core_is_adsp_ready(void);
+int q6core_is_adsp_ready(void);
int q6core_add_remove_pool_pages(phys_addr_t buf_add, uint32_t bufsz,
uint32_t mempool_id, bool add_pages);
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index 3b91ad5d5115..2dd096eea935 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -78,6 +78,7 @@ struct snd_rawmidi_runtime {
size_t xruns; /* over/underruns counter */
/* misc */
spinlock_t lock;
+ struct mutex realloc_mutex;
wait_queue_head_t sleep;
/* event handler (new bytes, input only) */
void (*event)(struct snd_rawmidi_substream *substream);
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
index f5024c560d8f..3c5118011a2c 100644
--- a/include/trace/events/preemptirq.h
+++ b/include/trace/events/preemptirq.h
@@ -52,19 +52,50 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
TP_ARGS(ip, parent_ip));
#endif
+TRACE_EVENT(irqs_disable,
+
+ TP_PROTO(u64 delta, unsigned long caddr0, unsigned long caddr1,
+ unsigned long caddr2, unsigned long caddr3),
+
+ TP_ARGS(delta, caddr0, caddr1, caddr2, caddr3),
+
+ TP_STRUCT__entry(
+ __field(u64, delta)
+ __field(void*, caddr0)
+ __field(void*, caddr1)
+ __field(void*, caddr2)
+ __field(void*, caddr3)
+ ),
+
+ TP_fast_assign(
+ __entry->delta = delta;
+ __entry->caddr0 = (void *)caddr0;
+ __entry->caddr1 = (void *)caddr1;
+ __entry->caddr2 = (void *)caddr2;
+ __entry->caddr3 = (void *)caddr3;
+ ),
+
+ TP_printk("delta=%llu(ns) Callers:(%pf<-%pf<-%pf<-%pf)", __entry->delta,
+ __entry->caddr0, __entry->caddr1,
+ __entry->caddr2, __entry->caddr3)
+);
+
#endif /* _TRACE_PREEMPTIRQ_H */
#include <trace/define_trace.h>
-#else /* !CONFIG_PREEMPTIRQ_EVENTS */
+#endif /* !CONFIG_PREEMPTIRQ_EVENTS */
+#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING)
#define trace_irq_enable(...)
#define trace_irq_disable(...)
-#define trace_preempt_enable(...)
-#define trace_preempt_disable(...)
#define trace_irq_enable_rcuidle(...)
#define trace_irq_disable_rcuidle(...)
+#endif
+
+#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT)
+#define trace_preempt_enable(...)
+#define trace_preempt_disable(...)
#define trace_preempt_enable_rcuidle(...)
#define trace_preempt_disable_rcuidle(...)
-
#endif
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index cc0ebe6867a5..8fd96aebfdee 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -1436,6 +1436,38 @@ TRACE_EVENT(sched_isolate,
__entry->time, __entry->isolate)
);
+TRACE_EVENT(sched_preempt_disable,
+
+ TP_PROTO(u64 delta, bool irqs_disabled,
+ unsigned long caddr0, unsigned long caddr1,
+ unsigned long caddr2, unsigned long caddr3),
+
+ TP_ARGS(delta, irqs_disabled, caddr0, caddr1, caddr2, caddr3),
+
+ TP_STRUCT__entry(
+ __field(u64, delta)
+ __field(bool, irqs_disabled)
+ __field(void*, caddr0)
+ __field(void*, caddr1)
+ __field(void*, caddr2)
+ __field(void*, caddr3)
+ ),
+
+ TP_fast_assign(
+ __entry->delta = delta;
+ __entry->irqs_disabled = irqs_disabled;
+ __entry->caddr0 = (void *)caddr0;
+ __entry->caddr1 = (void *)caddr1;
+ __entry->caddr2 = (void *)caddr2;
+ __entry->caddr3 = (void *)caddr3;
+ ),
+
+ TP_printk("delta=%llu(ns) irqs_d=%d Callers:(%pf<-%pf<-%pf<-%pf)",
+ __entry->delta, __entry->irqs_disabled,
+ __entry->caddr0, __entry->caddr1,
+ __entry->caddr2, __entry->caddr3)
+);
+
TRACE_EVENT(sched_contrib_scale_f,
TP_PROTO(int cpu, unsigned long freq_scale_factor,
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 2604d3f387ba..c06237170542 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -296,6 +296,7 @@ header-y += msm_audio_amrnb.h
header-y += msm_audio_amrwb.h
header-y += msm_audio_amrwbplus.h
header-y += msm_audio_calibration.h
+header-y += msm_audio_anc.h
header-y += msm_audio_mvs.h
header-y += msm_audio_qcp.h
header-y += msm_audio_sbc.h
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index beed138bd359..f85ed3a5ef4d 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -43,6 +43,27 @@
/* (1U << 31) is reserved for signed error codes */
/*
+ * Set/Get write life time hints. {GET,SET}_RW_HINT operate on the
+ * underlying inode, while {GET,SET}_FILE_RW_HINT operate only on
+ * the specific file.
+ */
+#define F_GET_RW_HINT (F_LINUX_SPECIFIC_BASE + 11)
+#define F_SET_RW_HINT (F_LINUX_SPECIFIC_BASE + 12)
+#define F_GET_FILE_RW_HINT (F_LINUX_SPECIFIC_BASE + 13)
+#define F_SET_FILE_RW_HINT (F_LINUX_SPECIFIC_BASE + 14)
+
+/*
+ * Valid hint values for F_{GET,SET}_RW_HINT. 0 is "not set", or can be
+ * used to clear any hints previously set.
+ */
+#define RWF_WRITE_LIFE_NOT_SET 0
+#define RWH_WRITE_LIFE_NONE 1
+#define RWH_WRITE_LIFE_SHORT 2
+#define RWH_WRITE_LIFE_MEDIUM 3
+#define RWH_WRITE_LIFE_LONG 4
+#define RWH_WRITE_LIFE_EXTREME 5
+
+/*
* Types of directory notifications that may be requested.
*/
#define DN_ACCESS 0x00000001 /* File accessed */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 03f3618612aa..376d0ab5b9f2 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -831,6 +831,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_GUEST_DEBUG_HW_WPS 120
#define KVM_CAP_SPLIT_IRQCHIP 121
#define KVM_CAP_IOEVENTFD_ANY_LENGTH 122
+#define KVM_CAP_S390_BPB 152
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/msm_audio_anc.h b/include/uapi/linux/msm_audio_anc.h
new file mode 100644
index 000000000000..028d381bc1a6
--- /dev/null
+++ b/include/uapi/linux/msm_audio_anc.h
@@ -0,0 +1,53 @@
+#ifndef _UAPI_MSM_AUDIO_ANC_H
+#define _UAPI_MSM_AUDIO_ANC_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define ANC_IOCTL_MAGIC 'a'
+
+#define AUDIO_ANC_SET_PARAM _IOWR(ANC_IOCTL_MAGIC, \
+ 300, struct audio_anc_packet *)
+#define AUDIO_ANC_GET_PARAM _IOWR(ANC_IOCTL_MAGIC, \
+ 301, struct audio_anc_packet *)
+
+#define ANC_CMD_START 0
+#define ANC_CMD_STOP 1
+#define ANC_CMD_RPM 2
+#define ANC_CMD_BYPASS_MODE 3
+#define ANC_CMD_ALGO_MODULE 4
+
+/* room for ANC_CMD define extend */
+#define ANC_CMD_MAX 0xFF
+
+struct audio_anc_header {
+ int32_t data_size;
+ int32_t version;
+ int32_t anc_cmd;
+ int32_t anc_cmd_size;
+};
+
+struct audio_anc_rpm_info {
+ int32_t rpm;
+};
+
+struct audio_anc_bypass_mode {
+ int32_t mode;
+};
+
+struct audio_anc_algo_module_info {
+ int32_t module_id;
+};
+
+union audio_anc_data {
+ struct audio_anc_rpm_info rpm_info;
+ struct audio_anc_bypass_mode bypass_mode_info;
+ struct audio_anc_algo_module_info algo_info;
+};
+
+struct audio_anc_packet {
+ struct audio_anc_header hdr;
+ union audio_anc_data anc_data;
+};
+
+#endif /* _UAPI_MSM_AUDIO_ANC_H */
diff --git a/include/uapi/media/msm_camera.h b/include/uapi/media/msm_camera.h
index 8fb69aeef9b4..fd0937ffb1e5 100644
--- a/include/uapi/media/msm_camera.h
+++ b/include/uapi/media/msm_camera.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2009-2012, 2014-2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2012, 2014-2016, 2018 The Linux Foundation.
+ * All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1386,7 +1387,7 @@ struct msm_camera_csiphy_params {
uint16_t lane_mask;
uint8_t combo_mode;
uint8_t csid_core;
- unsigned long data_rate;
+ uint64_t data_rate;
};
struct msm_camera_csi2_params {
diff --git a/include/uapi/media/msm_camsensor_sdk.h b/include/uapi/media/msm_camsensor_sdk.h
index 4d348c7bc373..ac454ca9a7fc 100644
--- a/include/uapi/media/msm_camsensor_sdk.h
+++ b/include/uapi/media/msm_camsensor_sdk.h
@@ -367,7 +367,7 @@ struct msm_camera_csiphy_params {
unsigned char csid_core;
unsigned int csiphy_clk;
unsigned char csi_3phase;
- unsigned long data_rate;
+ uint64_t data_rate;
};
struct msm_camera_i2c_seq_reg_array {
diff --git a/ipc/shm.c b/ipc/shm.c
index 09267be8d27b..9fa852a6473b 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -198,6 +198,12 @@ static int __shm_open(struct vm_area_struct *vma)
if (IS_ERR(shp))
return PTR_ERR(shp);
+ if (shp->shm_file != sfd->file) {
+ /* ID was reused */
+ shm_unlock(shp);
+ return -EINVAL;
+ }
+
shp->shm_atim = get_seconds();
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_nattch++;
@@ -414,8 +420,9 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
int ret;
/*
- * In case of remap_file_pages() emulation, the file can represent
- * removed IPC ID: propogate shm_lock() error to caller.
+ * In case of remap_file_pages() emulation, the file can represent an
+ * IPC ID that was removed, and possibly even reused by another shm
+ * segment already. Propagate this case as an error to caller.
*/
ret =__shm_open(vma);
if (ret)
@@ -439,6 +446,7 @@ static int shm_release(struct inode *ino, struct file *file)
struct shm_file_data *sfd = shm_file_data(file);
put_ipc_ns(sfd->ns);
+ fput(sfd->file);
shm_file_data(file) = NULL;
kfree(sfd);
return 0;
@@ -1198,7 +1206,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
file->f_mapping = shp->shm_file->f_mapping;
sfd->id = shp->shm_perm.id;
sfd->ns = get_ipc_ns(ns);
- sfd->file = shp->shm_file;
+ /*
+ * We need to take a reference to the real shm file to prevent the
+ * pointer from becoming stale in cases where the lifetime of the outer
+ * file extends beyond that of the shm segment. It's not usually
+ * possible, but it can happen during remap_file_pages() emulation as
+ * that unmaps the memory, then does ->mmap() via file reference only.
+ * We'll deny the ->mmap() if the shm segment was since removed, but to
+ * detect shm ID reuse we need to compare the file pointers.
+ */
+ sfd->file = get_file(shp->shm_file);
sfd->vm_ops = NULL;
err = security_mmap_file(file, prot, flags);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 322f63370038..5879a599e115 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5387,9 +5387,6 @@ static void perf_output_read_one(struct perf_output_handle *handle,
__output_copy(handle, values, n * sizeof(u64));
}
-/*
- * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
- */
static void perf_output_read_group(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
@@ -5434,6 +5431,13 @@ static void perf_output_read_group(struct perf_output_handle *handle,
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
PERF_FORMAT_TOTAL_TIME_RUNNING)
+/*
+ * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
+ *
+ * The problem is that its both hard and excessively expensive to iterate the
+ * child list, not to mention that its impossible to IPI the children running
+ * on another CPU, from interrupt/NMI context.
+ */
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
{
@@ -8167,9 +8171,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
local64_set(&hwc->period_left, hwc->sample_period);
/*
- * we currently do not support PERF_FORMAT_GROUP on inherited events
+ * We currently do not support PERF_SAMPLE_READ on inherited events.
+ * See perf_output_read().
*/
- if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
+ if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
goto err_ns;
if (!has_branch_stack(event))
@@ -8337,9 +8342,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
* __u16 sample size limit.
*/
if (attr->sample_stack_user >= USHRT_MAX)
- ret = -EINVAL;
+ return -EINVAL;
else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
- ret = -EINVAL;
+ return -EINVAL;
}
if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
diff --git a/kernel/futex.c b/kernel/futex.c
index a09c1dd1f659..760a97da1050 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -470,6 +470,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
struct page *page, *page_head;
+ struct address_space *mapping;
int err, ro = 0;
/*
@@ -555,7 +556,19 @@ again:
}
#endif
- lock_page(page_head);
+ /*
+ * The treatment of mapping from this point on is critical. The page
+ * lock protects many things but in this context the page lock
+ * stabilizes mapping, prevents inode freeing in the shared
+ * file-backed region case and guards against movement to swap cache.
+ *
+ * Strictly speaking the page lock is not needed in all cases being
+ * considered here and page lock forces unnecessarily serialization
+ * From this point on, mapping will be re-verified if necessary and
+ * page lock will be acquired only if it is unavoidable
+ */
+
+ mapping = READ_ONCE(page_head->mapping);
/*
* If page_head->mapping is NULL, then it cannot be a PageAnon
@@ -572,18 +585,31 @@ again:
* shmem_writepage move it from filecache to swapcache beneath us:
* an unlikely race, but we do need to retry for page_head->mapping.
*/
- if (!page_head->mapping) {
- int shmem_swizzled = PageSwapCache(page_head);
+ if (unlikely(!mapping)) {
+ int shmem_swizzled;
+
+ /*
+ * Page lock is required to identify which special case above
+ * applies. If this is really a shmem page then the page lock
+ * will prevent unexpected transitions.
+ */
+ lock_page(page);
+ shmem_swizzled = PageSwapCache(page) || page->mapping;
unlock_page(page_head);
put_page(page_head);
+
if (shmem_swizzled)
goto again;
+
return -EFAULT;
}
/*
* Private mappings are handled in a simple way.
*
+ * If the futex key is stored on an anonymous page, then the associated
+ * object is the mm which is implicitly pinned by the calling process.
+ *
* NOTE: When userspace waits on a MAP_SHARED mapping, even if
* it's a read-only handle, it's expected that futexes attach to
* the object not the particular process.
@@ -601,16 +627,74 @@ again:
key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
key->private.mm = mm;
key->private.address = address;
+
+ get_futex_key_refs(key); /* implies smp_mb(); (B) */
+
} else {
+ struct inode *inode;
+
+ /*
+ * The associated futex object in this case is the inode and
+ * the page->mapping must be traversed. Ordinarily this should
+ * be stabilised under page lock but it's not strictly
+ * necessary in this case as we just want to pin the inode, not
+ * update the radix tree or anything like that.
+ *
+ * The RCU read lock is taken as the inode is finally freed
+ * under RCU. If the mapping still matches expectations then the
+ * mapping->host can be safely accessed as being a valid inode.
+ */
+ rcu_read_lock();
+
+ if (READ_ONCE(page_head->mapping) != mapping) {
+ rcu_read_unlock();
+ put_page(page_head);
+
+ goto again;
+ }
+
+ inode = READ_ONCE(mapping->host);
+ if (!inode) {
+ rcu_read_unlock();
+ put_page(page_head);
+
+ goto again;
+ }
+
+ /*
+ * Take a reference unless it is about to be freed. Previously
+ * this reference was taken by ihold under the page lock
+ * pinning the inode in place so i_lock was unnecessary. The
+ * only way for this check to fail is if the inode was
+ * truncated in parallel so warn for now if this happens.
+ *
+ * We are not calling into get_futex_key_refs() in file-backed
+ * cases, therefore a successful atomic_inc return below will
+ * guarantee that get_futex_key() will still imply smp_mb(); (B).
+ */
+ if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
+ rcu_read_unlock();
+ put_page(page_head);
+
+ goto again;
+ }
+
+ /* Should be impossible but lets be paranoid for now */
+ if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
+ err = -EFAULT;
+ rcu_read_unlock();
+ iput(inode);
+
+ goto out;
+ }
+
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
- key->shared.inode = page_head->mapping->host;
+ key->shared.inode = inode;
key->shared.pgoff = basepage_index(page);
+ rcu_read_unlock();
}
- get_futex_key_refs(key); /* implies MB (B) */
-
out:
- unlock_page(page_head);
put_page(page_head);
return err;
}
diff --git a/kernel/pid.c b/kernel/pid.c
index b17263be9082..5fe7cdb6d05f 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -322,8 +322,10 @@ struct pid *alloc_pid(struct pid_namespace *ns)
}
if (unlikely(is_child_reaper(pid))) {
- if (pid_ns_prepare_proc(ns))
+ if (pid_ns_prepare_proc(ns)) {
+ disable_pid_allocation(ns);
goto out_free;
+ }
}
get_pid_ns(ns);
diff --git a/kernel/resource.c b/kernel/resource.c
index c09d484f7b5f..73348f574163 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -611,7 +611,8 @@ static int __find_resource(struct resource *root, struct resource *old,
alloc.start = constraint->alignf(constraint->alignf_data, &avail,
size, constraint->align);
alloc.end = alloc.start + size - 1;
- if (resource_contains(&avail, &alloc)) {
+ if (alloc.start <= alloc.end &&
+ resource_contains(&avail, &alloc)) {
new->start = alloc.start;
new->end = alloc.end;
return 0;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fffc50b0191f..c1ecb07de762 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3268,9 +3268,24 @@ notrace unsigned long get_parent_ip(unsigned long addr)
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_PREEMPT_TRACER))
+/*
+ * preemptoff stack tracing threshold in ns.
+ * default: 1ms
+ */
+unsigned int sysctl_preemptoff_tracing_threshold_ns = 1000000UL;
+
+struct preempt_store {
+ u64 ts;
+ unsigned long caddr[4];
+ bool irqs_disabled;
+};
+
+static DEFINE_PER_CPU(struct preempt_store, the_ps);
void preempt_count_add(int val)
{
+ struct preempt_store *ps = &per_cpu(the_ps, raw_smp_processor_id());
+
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
@@ -3291,6 +3306,13 @@ void preempt_count_add(int val)
#ifdef CONFIG_DEBUG_PREEMPT
current->preempt_disable_ip = ip;
#endif
+ ps->ts = sched_clock();
+ ps->caddr[0] = CALLER_ADDR0;
+ ps->caddr[1] = CALLER_ADDR1;
+ ps->caddr[2] = CALLER_ADDR2;
+ ps->caddr[3] = CALLER_ADDR3;
+ ps->irqs_disabled = irqs_disabled();
+
trace_preempt_off(CALLER_ADDR0, ip);
}
}
@@ -3313,8 +3335,22 @@ void preempt_count_sub(int val)
return;
#endif
- if (preempt_count() == val)
+ if (preempt_count() == val) {
+ struct preempt_store *ps = &per_cpu(the_ps,
+ raw_smp_processor_id());
+ u64 delta = sched_clock() - ps->ts;
+
+ /*
+ * Trace preempt disable stack if preemption
+ * is disabled for more than the threshold.
+ */
+ if (delta > sysctl_preemptoff_tracing_threshold_ns)
+ trace_sched_preempt_disable(delta, ps->irqs_disabled,
+ ps->caddr[0], ps->caddr[1],
+ ps->caddr[2], ps->caddr[3]);
+
trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
+ }
__preempt_count_sub(val);
}
EXPORT_SYMBOL(preempt_count_sub);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 23e37b0674df..f962ab1eb046 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2402,7 +2402,8 @@ void task_numa_work(struct callback_head *work)
return;
- down_read(&mm->mmap_sem);
+ if (!down_read_trylock(&mm->mmap_sem))
+ return;
vma = find_vma(mm, start);
if (!vma) {
reset_ptenuma_scan(p);
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index ea066ab8376b..d9f0669ff683 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2881,11 +2881,15 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
update_task_burst(p, rq, event, runtime);
update_cpu_busy_time(p, rq, event, wallclock, irqtime);
update_task_pred_demand(rq, p, event);
-done:
+
+ if (exiting_task(p))
+ goto done;
+
trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime,
rq->cc.cycles, rq->cc.time,
p->grp ? &rq->grp_time : NULL);
+done:
p->ravg.mark_start = wallclock;
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index bc4ca30ddc21..14f19af9d79a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -291,6 +291,22 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_IRQSOFF_TRACER)
+ {
+ .procname = "preemptoff_tracing_threshold_ns",
+ .data = &sysctl_preemptoff_tracing_threshold_ns,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "irqsoff_tracing_threshold_ns",
+ .data = &sysctl_irqsoff_tracing_threshold_ns,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
#ifdef CONFIG_SCHED_HMP
{
.procname = "sched_freq_reporting_policy",
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 21f82c29c914..11cc757795cd 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -54,7 +54,11 @@ static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1
[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
-static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
+/*
+ * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family.
+ * Make sure they are always aligned.
+ */
+static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
[CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
};
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index beafdf94b3b5..79fadcad21ff 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -435,6 +435,7 @@ void destroy_hrtimer_on_stack(struct hrtimer *timer)
{
debug_object_free(timer, &hrtimer_debug_descr);
}
+EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
#else
static inline void debug_hrtimer_init(struct hrtimer *timer) { }
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 21b162c07e83..c00137ea939e 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -13,6 +13,7 @@
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
+#include <linux/sched/sysctl.h>
#include "trace.h"
@@ -39,6 +40,12 @@ static int save_flags;
static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
static int start_irqsoff_tracer(struct trace_array *tr, int graph);
+/*
+ * irqsoff stack tracing threshold in ns.
+ * default: 1ms
+ */
+unsigned int sysctl_irqsoff_tracing_threshold_ns = 1000000UL;
+
#ifdef CONFIG_PREEMPT_TRACER
static inline int
preempt_trace(void)
@@ -454,17 +461,52 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
#else /* !CONFIG_PROVE_LOCKING */
+#ifdef CONFIG_PREEMPTIRQ_EVENTS
+struct irqsoff_store {
+ u64 ts;
+ unsigned long caddr[4];
+};
+
+static DEFINE_PER_CPU(struct irqsoff_store, the_irqsoff);
+#endif /* CONFIG_PREEMPTIRQ_EVENTS */
+
/*
* We are only interested in hardirq on/off events:
*/
static inline void tracer_hardirqs_on(void)
{
+#ifdef CONFIG_PREEMPTIRQ_EVENTS
+ struct irqsoff_store *is = &per_cpu(the_irqsoff,
+ raw_smp_processor_id());
+
+ if (!is->ts) {
+ is->ts = sched_clock();
+ is->caddr[0] = CALLER_ADDR0;
+ is->caddr[1] = CALLER_ADDR1;
+ is->caddr[2] = CALLER_ADDR2;
+ is->caddr[3] = CALLER_ADDR3;
+ }
+#endif /* CONFIG_PREEMPTIRQ_EVENTS */
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
static inline void tracer_hardirqs_off(void)
{
+#ifdef CONFIG_PREEMPTIRQ_EVENTS
+ struct irqsoff_store *is = &per_cpu(the_irqsoff,
+ raw_smp_processor_id());
+ u64 delta = 0;
+
+ if (is->ts) {
+ delta = sched_clock() - is->ts;
+ is->ts = 0;
+ }
+ if (delta > sysctl_irqsoff_tracing_threshold_ns)
+ trace_irqs_disable(delta, is->caddr[0], is->caddr[1],
+ is->caddr[2], is->caddr[3]);
+#endif /* CONFIG_PREEMPTIRQ_EVENTS */
+
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
diff --git a/lib/kobject.c b/lib/kobject.c
index 7cbccd2b4c72..895edb63fba4 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -234,14 +234,12 @@ static int kobject_add_internal(struct kobject *kobj)
/* be noisy on error issues */
if (error == -EEXIST)
- WARN(1, "%s failed for %s with "
- "-EEXIST, don't try to register things with "
- "the same name in the same directory.\n",
- __func__, kobject_name(kobj));
+ pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
+ __func__, kobject_name(kobj));
else
- WARN(1, "%s failed for %s (error: %d parent: %s)\n",
- __func__, kobject_name(kobj), error,
- parent ? kobject_name(parent) : "'none'");
+ pr_err("%s failed for %s (error: %d parent: %s)\n",
+ __func__, kobject_name(kobj), error,
+ parent ? kobject_name(parent) : "'none'");
} else
kobj->state_in_sysfs = 1;
diff --git a/mm/filemap.c b/mm/filemap.c
index 8b2cf0f6a529..750af2219081 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -573,7 +573,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
VM_BUG_ON_PAGE(!PageLocked(new), new);
VM_BUG_ON_PAGE(new->mapping, new);
- error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+ error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
if (!error) {
struct address_space *mapping = old->mapping;
void (*freepage)(struct page *);
@@ -632,7 +632,7 @@ static int __add_to_page_cache_locked(struct page *page,
return error;
}
- error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
+ error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
if (error) {
if (!huge)
mem_cgroup_cancel_charge(page, memcg);
@@ -1194,8 +1194,7 @@ no_page:
if (fgp_flags & FGP_ACCESSED)
__SetPageReferenced(page);
- err = add_to_page_cache_lru(page, mapping, offset,
- gfp_mask & GFP_RECLAIM_MASK);
+ err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
if (unlikely(err)) {
page_cache_release(page);
page = NULL;
@@ -1829,19 +1828,18 @@ EXPORT_SYMBOL(generic_file_read_iter);
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
*/
-static int page_cache_read(struct file *file, pgoff_t offset)
+static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
{
struct address_space *mapping = file->f_mapping;
struct page *page;
int ret;
do {
- page = page_cache_alloc_cold(mapping);
+ page = __page_cache_alloc(gfp_mask|__GFP_COLD);
if (!page)
return -ENOMEM;
- ret = add_to_page_cache_lru(page, mapping, offset,
- mapping_gfp_constraint(mapping, GFP_KERNEL));
+ ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
if (ret == 0)
ret = mapping->a_ops->readpage(file, page);
else if (ret == -EEXIST)
@@ -2022,7 +2020,7 @@ no_cached_page:
* We're only likely to ever get here if MADV_RANDOM is in
* effect.
*/
- error = page_cache_read(file, offset);
+ error = page_cache_read(file, offset, vmf->gfp_mask);
/*
* The page we want has now been added to the page cache.
diff --git a/mm/memory.c b/mm/memory.c
index 291cbf54b347..78ab57141731 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1990,6 +1990,20 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
copy_user_highpage(dst, src, va, vma);
}
+static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
+{
+ struct file *vm_file = vma->vm_file;
+
+ if (vm_file)
+ return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
+
+ /*
+ * Special mappings (e.g. VDSO) do not have any file so fake
+ * a default GFP_KERNEL for them.
+ */
+ return GFP_KERNEL;
+}
+
/*
* Notify the address space that the page is about to become writable so that
* it can prohibit this or wait for the page to get into an appropriate state.
@@ -2005,6 +2019,7 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
vmf.virtual_address = (void __user *)(address & PAGE_MASK);
vmf.pgoff = page->index;
vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
+ vmf.gfp_mask = __get_fault_gfp_mask(vma);
vmf.page = page;
vmf.cow_page = NULL;
@@ -2771,6 +2786,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
vmf.pgoff = pgoff;
vmf.flags = flags;
vmf.page = NULL;
+ vmf.gfp_mask = __get_fault_gfp_mask(vma);
vmf.cow_page = cow_page;
ret = vma->vm_ops->fault(vma, &vmf);
@@ -2937,6 +2953,7 @@ static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
vmf.pgoff = pgoff;
vmf.max_pgoff = max_pgoff;
vmf.flags = flags;
+ vmf.gfp_mask = __get_fault_gfp_mask(vma);
vma->vm_ops->map_pages(vma, &vmf);
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 2339b533f4b2..5457c5f4935b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -206,6 +206,13 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
free += global_page_state(NR_SLAB_RECLAIMABLE);
/*
+ * Part of the kernel memory, which can be released
+ * under memory pressure.
+ */
+ free += global_page_state(
+ NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT;
+
+ /*
* Leave reserved pages. The pages are not for anonymous pages.
*/
if (free <= totalreserve_pages)
diff --git a/mm/nommu.c b/mm/nommu.c
index 92be862c859b..8d75e425c21c 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1880,6 +1880,13 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
free += global_page_state(NR_SLAB_RECLAIMABLE);
/*
+ * Part of the kernel memory, which can be released
+ * under memory pressure.
+ */
+ free += global_page_state(
+ NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT;
+
+ /*
* Leave reserved pages. The pages are not for anonymous pages.
*/
if (free <= totalreserve_pages)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index c5bf17598afa..a98dae1bdcff 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2526,13 +2526,13 @@ void account_page_redirty(struct page *page)
if (mapping && mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
- bool locked;
+ struct wb_lock_cookie cookie = {};
- wb = unlocked_inode_to_wb_begin(inode, &locked);
+ wb = unlocked_inode_to_wb_begin(inode, &cookie);
current->nr_dirtied--;
dec_zone_page_state(page, NR_DIRTIED);
dec_wb_stat(wb, WB_DIRTIED);
- unlocked_inode_to_wb_end(inode, locked);
+ unlocked_inode_to_wb_end(inode, &cookie);
}
}
EXPORT_SYMBOL(account_page_redirty);
@@ -2638,15 +2638,15 @@ void cancel_dirty_page(struct page *page)
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
struct mem_cgroup *memcg;
- bool locked;
+ struct wb_lock_cookie cookie = {};
memcg = mem_cgroup_begin_page_stat(page);
- wb = unlocked_inode_to_wb_begin(inode, &locked);
+ wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (TestClearPageDirty(page))
account_page_cleaned(page, mapping, memcg, wb);
- unlocked_inode_to_wb_end(inode, locked);
+ unlocked_inode_to_wb_end(inode, &cookie);
mem_cgroup_end_page_stat(memcg);
} else {
ClearPageDirty(page);
@@ -2679,7 +2679,7 @@ int clear_page_dirty_for_io(struct page *page)
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
struct mem_cgroup *memcg;
- bool locked;
+ struct wb_lock_cookie cookie = {};
/*
* Yes, Virginia, this is indeed insane.
@@ -2717,14 +2717,14 @@ int clear_page_dirty_for_io(struct page *page)
* exclusion.
*/
memcg = mem_cgroup_begin_page_stat(page);
- wb = unlocked_inode_to_wb_begin(inode, &locked);
+ wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (TestClearPageDirty(page)) {
mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
dec_zone_page_state(page, NR_FILE_DIRTY);
dec_wb_stat(wb, WB_RECLAIMABLE);
ret = 1;
}
- unlocked_inode_to_wb_end(inode, locked);
+ unlocked_inode_to_wb_end(inode, &cookie);
mem_cgroup_end_page_stat(memcg);
return ret;
}
diff --git a/mm/slab.c b/mm/slab.c
index 8fc762c178bd..80ca19a122f3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3942,7 +3942,8 @@ next:
next_reap_node();
out:
/* Set up the next iteration */
- schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
+ schedule_delayed_work_on(smp_processor_id(), work,
+ round_jiffies_relative(REAPTIMEOUT_AC));
}
#ifdef CONFIG_SLABINFO
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6c841595b963..9ab13e3be5df 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -765,6 +765,7 @@ const char * const vmstat_text[] = {
"nr_anon_transparent_hugepages",
"nr_free_cma",
"nr_swapcache",
+ "nr_indirectly_reclaimable",
/* enum writeback_stat_item counters */
"nr_dirty_threshold",
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 89da689b6433..ac9791dd4768 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -29,6 +29,7 @@
#include <linux/net_tstamp.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/phy.h>
#include <net/arp.h>
#include "vlan.h"
@@ -654,8 +655,11 @@ static int vlan_ethtool_get_ts_info(struct net_device *dev,
{
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops;
+ struct phy_device *phydev = vlan->real_dev->phydev;
- if (ops->get_ts_info) {
+ if (phydev && phydev->drv && phydev->drv->ts_info) {
+ return phydev->drv->ts_info(phydev, info);
+ } else if (ops->get_ts_info) {
return ops->get_ts_info(vlan->real_dev, info);
} else {
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index da4078651c22..5b95477c3453 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -716,6 +716,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ bool changed = false;
/* If Connectionless Slave Broadcast master role is supported
* enable all necessary events for it.
@@ -725,6 +726,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
events[1] |= 0x80; /* Synchronization Train Complete */
events[2] |= 0x10; /* Slave Page Response Timeout */
events[2] |= 0x20; /* CSB Channel Map Change */
+ changed = true;
}
/* If Connectionless Slave Broadcast slave role is supported
@@ -735,13 +737,24 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
events[2] |= 0x02; /* CSB Receive */
events[2] |= 0x04; /* CSB Timeout */
events[2] |= 0x08; /* Truncated Page Complete */
+ changed = true;
}
/* Enable Authenticated Payload Timeout Expired event if supported */
- if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
+ if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
events[2] |= 0x80;
+ changed = true;
+ }
- hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
+ /* Some Broadcom based controllers indicate support for Set Event
+ * Mask Page 2 command, but then actually do not support it. Since
+ * the default value is all bits set to zero, the command is only
+ * required if the event mask has to be changed. In case no change
+ * to the event mask is needed, skip this command.
+ */
+ if (changed)
+ hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
+ sizeof(events), events);
}
static void hci_init3_req(struct hci_request *req, unsigned long opt)
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index a6b2f2138c9d..ad3c9e96a275 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2531,6 +2531,11 @@ static int try_write(struct ceph_connection *con)
int ret = 1;
dout("try_write start %p state %lu\n", con, con->state);
+ if (con->state != CON_STATE_PREOPEN &&
+ con->state != CON_STATE_CONNECTING &&
+ con->state != CON_STATE_NEGOTIATING &&
+ con->state != CON_STATE_OPEN)
+ return 0;
more:
dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
@@ -2556,6 +2561,8 @@ more:
}
more_kvec:
+ BUG_ON(!con->sock);
+
/* kvec data queued? */
if (con->out_kvec_left) {
ret = write_partial_kvec(con);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index bc95e48d5cfb..378c9ed00d40 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -295,6 +295,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
u32 yes;
struct crush_rule *r;
+ err = -EINVAL;
ceph_decode_32_safe(p, end, yes, bad);
if (!yes) {
dout("crush_decode NO rule %d off %x %p to %p\n",
diff --git a/net/core/dev.c b/net/core/dev.c
index 129e188d7722..8c884cf5fbc9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -991,7 +991,7 @@ bool dev_valid_name(const char *name)
{
if (*name == '\0')
return false;
- if (strlen(name) >= IFNAMSIZ)
+ if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
return false;
if (!strcmp(name, ".") || !strcmp(name, ".."))
return false;
@@ -2517,7 +2517,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
return 0;
- eth = (struct ethhdr *)skb_mac_header(skb);
+ eth = (struct ethhdr *)skb->data;
type = eth->h_proto;
}
@@ -2708,7 +2708,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
}
EXPORT_SYMBOL(passthru_features_check);
-static netdev_features_t dflt_features_check(const struct sk_buff *skb,
+static netdev_features_t dflt_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index eff329e9c23b..642b13ddd69c 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -54,7 +54,8 @@ do { \
static void neigh_timer_handler(unsigned long arg);
static void __neigh_notify(struct neighbour *n, int type, int flags);
static void neigh_update_notify(struct neighbour *neigh);
-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
+ struct net_device *dev);
#ifdef CONFIG_PROC_FS
static const struct file_operations neigh_stat_seq_fops;
@@ -254,8 +255,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
{
write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev);
- pneigh_ifdown(tbl, dev);
- write_unlock_bh(&tbl->lock);
+ pneigh_ifdown_and_unlock(tbl, dev);
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
@@ -645,9 +645,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
return -ENOENT;
}
-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
+ struct net_device *dev)
{
- struct pneigh_entry *n, **np;
+ struct pneigh_entry *n, **np, *freelist = NULL;
u32 h;
for (h = 0; h <= PNEIGH_HASHMASK; h++) {
@@ -655,16 +656,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
while ((n = *np) != NULL) {
if (!dev || n->dev == dev) {
*np = n->next;
- if (tbl->pdestructor)
- tbl->pdestructor(n);
- if (n->dev)
- dev_put(n->dev);
- kfree(n);
+ n->next = freelist;
+ freelist = n;
continue;
}
np = &n->next;
}
}
+ write_unlock_bh(&tbl->lock);
+ while ((n = freelist)) {
+ freelist = n->next;
+ n->next = NULL;
+ if (tbl->pdestructor)
+ tbl->pdestructor(n);
+ if (n->dev)
+ dev_put(n->dev);
+ kfree(n);
+ }
return -ENOENT;
}
@@ -1132,10 +1140,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
lladdr = neigh->ha;
}
- if (new & NUD_CONNECTED)
- neigh->confirmed = jiffies;
- neigh->updated = jiffies;
-
/* If entry was valid and address is not changed,
do not change entry state, if new one is STALE.
*/
@@ -1159,6 +1163,16 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
}
}
+ /* Update timestamps only once we know we will make a change to the
+ * neighbour entry. Otherwise we risk to move the locktime window with
+ * noop updates and ignore relevant ARP updates.
+ */
+ if (new != old || lladdr != neigh->ha) {
+ if (new & NUD_CONNECTED)
+ neigh->confirmed = jiffies;
+ neigh->updated = jiffies;
+ }
+
if (new != old) {
neigh_del_timer(neigh);
if (new & NUD_PROBE)
@@ -2274,12 +2288,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
if (!err) {
- if (tb[NDA_IFINDEX])
+ if (tb[NDA_IFINDEX]) {
+ if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
+ return -EINVAL;
filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
-
- if (tb[NDA_MASTER])
+ }
+ if (tb[NDA_MASTER]) {
+ if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
+ return -EINVAL;
filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
-
+ }
if (filter_idx || filter_master_idx)
flags |= NLM_F_DUMP_FILTERED;
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index b5c351d2830b..ccd20669ac00 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -310,6 +310,25 @@ out_undo:
goto out;
}
+static int __net_init net_defaults_init_net(struct net *net)
+{
+ net->core.sysctl_somaxconn = SOMAXCONN;
+ return 0;
+}
+
+static struct pernet_operations net_defaults_ops = {
+ .init = net_defaults_init_net,
+};
+
+static __init int net_defaults_init(void)
+{
+ if (register_pernet_subsys(&net_defaults_ops))
+ panic("Cannot initialize net default settings");
+
+ return 0;
+}
+
+core_initcall(net_defaults_init);
#ifdef CONFIG_NET_NS
static struct kmem_cache *net_cachep;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2549fa05b564..3c5e3c022232 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2577,7 +2577,8 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
{
int pos = skb_headlen(skb);
- skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
+ skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
+ SKBTX_SHARED_FRAG;
if (len < pos) /* Split line is inside header. */
skb_split_inside_header(skb, skb1, len, pos);
else /* Second chunk has no header, nothing to copy. */
@@ -3141,8 +3142,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
skb_copy_from_linear_data_offset(head_skb, offset,
skb_put(nskb, hsize), hsize);
- skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
- SKBTX_SHARED_FRAG;
+ skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
+ SKBTX_SHARED_FRAG;
while (pos < offset + len) {
if (i >= nfrags) {
@@ -3355,24 +3356,18 @@ void __init skb_init(void)
NULL);
}
-/**
- * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
- * @skb: Socket buffer containing the buffers to be mapped
- * @sg: The scatter-gather list to map into
- * @offset: The offset into the buffer's contents to start mapping
- * @len: Length of buffer space to be mapped
- *
- * Fill the specified scatter-gather list with mappings/pointers into a
- * region of the buffer space attached to a socket buffer.
- */
static int
-__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
+ unsigned int recursion_level)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
struct sk_buff *frag_iter;
int elt = 0;
+ if (unlikely(recursion_level >= 24))
+ return -EMSGSIZE;
+
if (copy > 0) {
if (copy > len)
copy = len;
@@ -3391,6 +3386,8 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ if (unlikely(elt && sg_is_last(&sg[elt - 1])))
+ return -EMSGSIZE;
if (copy > len)
copy = len;
@@ -3405,16 +3402,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
}
skb_walk_frags(skb, frag_iter) {
- int end;
+ int end, ret;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
+ if (unlikely(elt && sg_is_last(&sg[elt - 1])))
+ return -EMSGSIZE;
+
if (copy > len)
copy = len;
- elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
- copy);
+ ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
+ copy, recursion_level + 1);
+ if (unlikely(ret < 0))
+ return ret;
+ elt += ret;
if ((len -= copy) == 0)
return elt;
offset += copy;
@@ -3425,6 +3428,31 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
return elt;
}
+/**
+ * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
+ * @skb: Socket buffer containing the buffers to be mapped
+ * @sg: The scatter-gather list to map into
+ * @offset: The offset into the buffer's contents to start mapping
+ * @len: Length of buffer space to be mapped
+ *
+ * Fill the specified scatter-gather list with mappings/pointers into a
+ * region of the buffer space attached to a socket buffer. Returns either
+ * the number of scatterlist items used, or -EMSGSIZE if the contents
+ * could not fit.
+ */
+int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+{
+ int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
+
+ if (nsg <= 0)
+ return nsg;
+
+ sg_mark_end(&sg[nsg - 1]);
+
+ return nsg;
+}
+EXPORT_SYMBOL_GPL(skb_to_sgvec);
+
/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
* sglist without mark the sg which contain last skb data as the end.
* So the caller can mannipulate sg list as will when padding new data after
@@ -3447,19 +3475,11 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
int offset, int len)
{
- return __skb_to_sgvec(skb, sg, offset, len);
+ return __skb_to_sgvec(skb, sg, offset, len, 0);
}
EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
-int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
-{
- int nsg = __skb_to_sgvec(skb, sg, offset, len);
- sg_mark_end(&sg[nsg - 1]);
-
- return nsg;
-}
-EXPORT_SYMBOL_GPL(skb_to_sgvec);
/**
* skb_cow_data - Check that a socket buffer's data buffers are writable
@@ -3741,7 +3761,8 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
return;
if (tsonly) {
- skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags;
+ skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
+ SKBTX_ANY_TSTAMP;
skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
}
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 6578a0a2f708..32898247d8bf 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -429,8 +429,6 @@ static __net_init int sysctl_core_net_init(struct net *net)
{
struct ctl_table *tbl;
- net->core.sysctl_somaxconn = SOMAXCONN;
-
tbl = netns_core_table;
if (!net_eq(net, &init_net)) {
tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 6abc5012200b..e26df2764e83 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -25,6 +25,7 @@
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/ratelimit.h>
#include <linux/kernel.h>
#include <linux/keyctl.h>
#include <linux/err.h>
@@ -91,9 +92,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
next_opt = memchr(opt, '#', end - opt) ?: end;
opt_len = next_opt - opt;
- if (!opt_len) {
- printk(KERN_WARNING
- "Empty option to dns_resolver key\n");
+ if (opt_len <= 0 || opt_len > 128) {
+ pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
+ opt_len);
return -EINVAL;
}
@@ -127,10 +128,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
}
bad_option_value:
- printk(KERN_WARNING
- "Option '%*.*s' to dns_resolver key:"
- " bad/missing value\n",
- opt_nlen, opt_nlen, opt);
+ pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
+ opt_nlen, opt_nlen, opt);
return -EINVAL;
} while (opt = next_opt + 1, opt < end);
}
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index a548be247e15..47b397264f24 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -302,12 +302,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
skb->sk = sk;
skb->protocol = htons(ETH_P_IEEE802154);
- dev_put(dev);
-
err = dev_queue_xmit(skb);
if (err > 0)
err = net_xmit_errno(err);
+ dev_put(dev);
+
return err ?: size;
out_skb:
@@ -689,12 +689,12 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
skb->sk = sk;
skb->protocol = htons(ETH_P_IEEE802154);
- dev_put(dev);
-
err = dev_queue_xmit(skb);
if (err > 0)
err = net_xmit_errno(err);
+ dev_put(dev);
+
return err ?: size;
out_skb:
diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c
index 5f60666d4c79..bfb76a84be73 100644
--- a/net/ipc_router/ipc_router_core.c
+++ b/net/ipc_router/ipc_router_core.c
@@ -224,6 +224,25 @@ void msm_ipc_router_set_ws_allowed(bool flag)
is_wakeup_source_allowed = flag;
}
+/**
+ * is_sensor_port() - Check if the remote port is sensor service or not
+ * @rport: Pointer to the remote port.
+ *
+ * Return: true if the remote port is sensor service else false.
+ */
+static int is_sensor_port(struct msm_ipc_router_remote_port *rport)
+{
+ u32 svcid = 0;
+
+ if (rport && rport->server) {
+ svcid = rport->server->name.service;
+ if (svcid == 400 || (svcid >= 256 && svcid <= 320))
+ return true;
+ }
+
+ return false;
+}
+
static void init_routing_table(void)
{
int i;
@@ -2731,7 +2750,6 @@ static void do_read_data(struct work_struct *work)
struct rr_packet *pkt = NULL;
struct msm_ipc_port *port_ptr;
struct msm_ipc_router_remote_port *rport_ptr;
- int ret;
struct msm_ipc_router_xprt_info *xprt_info =
container_of(work,
@@ -2739,16 +2757,7 @@ static void do_read_data(struct work_struct *work)
read_data);
while ((pkt = rr_read(xprt_info)) != NULL) {
- if (pkt->length < calc_rx_header_size(xprt_info) ||
- pkt->length > MAX_IPC_PKT_SIZE) {
- IPC_RTR_ERR("%s: Invalid pkt length %d\n",
- __func__, pkt->length);
- goto read_next_pkt1;
- }
- ret = extract_header(pkt);
- if (ret < 0)
- goto read_next_pkt1;
hdr = &(pkt->hdr);
if ((hdr->dst_node_id != IPC_ROUTER_NID_LOCAL) &&
@@ -4195,6 +4204,7 @@ void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt,
{
struct msm_ipc_router_xprt_info *xprt_info = xprt->priv;
struct msm_ipc_router_xprt_work *xprt_work;
+ struct msm_ipc_router_remote_port *rport_ptr = NULL;
struct rr_packet *pkt;
int ret;
@@ -4247,16 +4257,40 @@ void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt,
if (!pkt)
return;
+ if (pkt->length < calc_rx_header_size(xprt_info) ||
+ pkt->length > MAX_IPC_PKT_SIZE) {
+ IPC_RTR_ERR("%s: Invalid pkt length %d\n",
+ __func__, pkt->length);
+ release_pkt(pkt);
+ return;
+ }
+
+ ret = extract_header(pkt);
+ if (ret < 0) {
+ release_pkt(pkt);
+ return;
+ }
+
pkt->ws_need = false;
+
+ if (pkt->hdr.type == IPC_ROUTER_CTRL_CMD_DATA)
+ rport_ptr = ipc_router_get_rport_ref(pkt->hdr.src_node_id,
+ pkt->hdr.src_port_id);
+
mutex_lock(&xprt_info->rx_lock_lhb2);
list_add_tail(&pkt->list, &xprt_info->pkt_list);
- if (!xprt_info->dynamic_ws) {
- __pm_stay_awake(&xprt_info->ws);
- pkt->ws_need = true;
- } else {
- if (is_wakeup_source_allowed) {
+ /* check every pkt is from SENSOR services or not and
+ * avoid holding both edge and port specific wake-up sources
+ */
+ if (!is_sensor_port(rport_ptr)) {
+ if (!xprt_info->dynamic_ws) {
__pm_stay_awake(&xprt_info->ws);
pkt->ws_need = true;
+ } else {
+ if (is_wakeup_source_allowed) {
+ __pm_stay_awake(&xprt_info->ws);
+ pkt->ws_need = true;
+ }
}
}
mutex_unlock(&xprt_info->rx_lock_lhb2);
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 22377c8ff14b..e8f862358518 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -220,7 +220,9 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;
if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
@@ -393,7 +395,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
skb_push(skb, ihl);
sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;
if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index cb5eb649ad5f..bfa79831873f 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -437,7 +437,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
/*unsigned long now; */
struct net *net = dev_net(dev);
- rt = ip_route_output(net, sip, tip, 0, 0);
+ rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev));
if (IS_ERR(rt))
return 1;
if (rt->dst.dev != dev) {
@@ -658,6 +658,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
unsigned char *arp_ptr;
struct rtable *rt;
unsigned char *sha;
+ unsigned char *tha = NULL;
__be32 sip, tip;
u16 dev_type = dev->type;
int addr_type;
@@ -729,6 +730,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
break;
#endif
default:
+ tha = arp_ptr;
arp_ptr += dev->addr_len;
}
memcpy(&tip, arp_ptr, 4);
@@ -839,8 +841,18 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
It is possible, that this option should be enabled for some
devices (strip is candidate)
*/
- is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
- addr_type == RTN_UNICAST;
+ is_garp = tip == sip && addr_type == RTN_UNICAST;
+
+ /* Unsolicited ARP _replies_ also require target hwaddr to be
+ * the same as source.
+ */
+ if (is_garp && arp->ar_op == htons(ARPOP_REPLY))
+ is_garp =
+ /* IPv4 over IEEE 1394 doesn't provide target
+ * hardware address field in its ARP payload.
+ */
+ tha &&
+ !memcmp(tha, sha, dev->addr_len);
if (!n &&
((arp->ar_op == htons(ARPOP_REPLY) &&
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 20fb25e3027b..3d8021d55336 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -268,10 +268,11 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
esph->spi = x->id.spi;
sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg,
- (unsigned char *)esph - skb->data,
- assoclen + ivlen + clen + alen);
-
+ err = skb_to_sgvec(skb, sg,
+ (unsigned char *)esph - skb->data,
+ assoclen + ivlen + clen + alen);
+ if (unlikely(err < 0))
+ goto error;
aead_request_set_crypt(req, sg, sg, ivlen + clen, iv);
aead_request_set_ad(req, assoclen);
@@ -481,7 +482,9 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
}
sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ err = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out;
aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
aead_request_set_ad(req, assoclen);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 80e2d1b0c08c..3d62feb65932 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -253,13 +253,14 @@ static struct net_device *__ip_tunnel_create(struct net *net,
struct net_device *dev;
char name[IFNAMSIZ];
- if (parms->name[0])
+ err = -E2BIG;
+ if (parms->name[0]) {
+ if (!dev_valid_name(parms->name))
+ goto failed;
strlcpy(name, parms->name, IFNAMSIZ);
- else {
- if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
- err = -E2BIG;
+ } else {
+ if (strlen(ops->kind) > (IFNAMSIZ - 3))
goto failed;
- }
strlcpy(name, ops->kind, IFNAMSIZ);
strncat(name, "%d", 2);
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1c04dad774a7..c381ef51aa69 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2598,8 +2598,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
#ifdef CONFIG_TCP_MD5SIG
case TCP_MD5SIG:
- /* Read the IP->Key mappings from userspace */
- err = tp->af_specific->md5_parse(sk, optval, optlen);
+ if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
+ err = tp->af_specific->md5_parse(sk, optval, optlen);
+ else
+ err = -EINVAL;
break;
#endif
case TCP_USER_TIMEOUT:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 1a8e95d2dd39..b1cfce7f8e85 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -118,6 +118,7 @@ int sysctl_tcp_default_init_rwnd __read_mostly = TCP_INIT_CWND * 2;
#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
+#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
@@ -3544,7 +3545,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (before(ack, prior_snd_una)) {
/* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
if (before(ack, prior_snd_una - tp->max_window)) {
- tcp_send_challenge_ack(sk, skb);
+ if (!(flag & FLAG_NO_CHALLENGE_ACK))
+ tcp_send_challenge_ack(sk, skb);
return -1;
}
goto old_ack;
@@ -3868,11 +3870,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
int length = (th->doff << 2) - sizeof(*th);
const u8 *ptr = (const u8 *)(th + 1);
- /* If the TCP option is too short, we can short cut */
- if (length < TCPOLEN_MD5SIG)
- return NULL;
-
- while (length > 0) {
+ /* If not enough data remaining, we can short cut */
+ while (length >= TCPOLEN_MD5SIG) {
int opcode = *ptr++;
int opsize;
@@ -5834,13 +5833,17 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
/* step 5: check the ACK field */
acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
- FLAG_UPDATE_TS_RECENT) > 0;
+ FLAG_UPDATE_TS_RECENT |
+ FLAG_NO_CHALLENGE_ACK) > 0;
+ if (!acceptable) {
+ if (sk->sk_state == TCP_SYN_RECV)
+ return 1; /* send one RST */
+ tcp_send_challenge_ack(sk, skb);
+ goto discard;
+ }
switch (sk->sk_state) {
case TCP_SYN_RECV:
- if (!acceptable)
- return 1;
-
if (!tp->srtt_us)
tcp_synack_rtt_meas(sk, req);
@@ -5909,14 +5912,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
* our SYNACK so stop the SYNACK timer.
*/
if (req) {
- /* Return RST if ack_seq is invalid.
- * Note that RFC793 only says to generate a
- * DUPACK for it but for TCP Fast Open it seems
- * better to treat this case like TCP_SYN_RECV
- * above.
- */
- if (!acceptable)
- return 1;
/* We no longer need the request sock. */
reqsk_fastopen_remove(sk, req, false);
tcp_rearm_rto(sk);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 40c29712f32a..199658afa68b 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -986,7 +986,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
INIT_HLIST_NODE(&ifa->addr_lst);
ifa->scope = scope;
ifa->prefix_len = pfxlen;
- ifa->flags = flags | IFA_F_TENTATIVE;
+ ifa->flags = flags;
+ /* No need to add the TENTATIVE flag for addresses with NODAD */
+ if (!(flags & IFA_F_NODAD))
+ ifa->flags |= IFA_F_TENTATIVE;
ifa->valid_lft = valid_lft;
ifa->prefered_lft = prefered_lft;
ifa->cstamp = ifa->tstamp = jiffies;
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 189eb10b742d..e742c4deb13d 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -423,7 +423,9 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;
if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
@@ -603,7 +605,9 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
ip6h->hop_limit = 0;
sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;
if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index cbcdd5db31f4..44a2010e2076 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -248,9 +248,11 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
esph->spi = x->id.spi;
sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg,
- (unsigned char *)esph - skb->data,
- assoclen + ivlen + clen + alen);
+ err = skb_to_sgvec(skb, sg,
+ (unsigned char *)esph - skb->data,
+ assoclen + ivlen + clen + alen);
+ if (unlikely(err < 0))
+ goto error;
aead_request_set_crypt(req, sg, sg, ivlen + clen, iv);
aead_request_set_ad(req, assoclen);
@@ -423,7 +425,9 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
}
sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ ret = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(ret < 0))
+ goto out;
aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
aead_request_set_ad(req, assoclen);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 85afef175cf9..6e496c3dd8ef 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -320,11 +320,13 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
if (t || !create)
return t;
- if (parms->name[0])
+ if (parms->name[0]) {
+ if (!dev_valid_name(parms->name))
+ return NULL;
strlcpy(name, parms->name, IFNAMSIZ);
- else
+ } else {
strcpy(name, "ip6gre%d");
-
+ }
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ip6gre_tunnel_setup);
if (!dev)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 3ef81c387923..bfa710e8b615 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -340,6 +340,10 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
static inline int ip6_forward_finish(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
+ struct dst_entry *dst = skb_dst(skb);
+
+ IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
+ IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
skb_sender_cpu_clear(skb);
return dst_output(net, sk, skb);
}
@@ -534,8 +538,6 @@ int ip6_forward(struct sk_buff *skb)
hdr->hop_limit--;
- IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
- IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
net, NULL, skb, skb->dev, dst->dev,
ip6_forward_finish);
@@ -1276,7 +1278,7 @@ static int __ip6_append_data(struct sock *sk,
unsigned int flags, int dontfrag)
{
struct sk_buff *skb, *skb_prev = NULL;
- unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
+ unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
int exthdrlen = 0;
int dst_exthdrlen = 0;
int hh_len;
@@ -1312,6 +1314,12 @@ static int __ip6_append_data(struct sock *sk,
sizeof(struct frag_hdr) : 0) +
rt->rt6i_nfheader_len;
+ /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
+ * the first fragment
+ */
+ if (headersize + transhdrlen > mtu)
+ goto emsgsize;
+
if (cork->length + length > mtu - headersize && dontfrag &&
(sk->sk_protocol == IPPROTO_UDP ||
sk->sk_protocol == IPPROTO_RAW)) {
@@ -1327,9 +1335,8 @@ static int __ip6_append_data(struct sock *sk,
if (cork->length + length > maxnonfragsize - headersize) {
emsgsize:
- ipv6_local_error(sk, EMSGSIZE, fl6,
- mtu - headersize +
- sizeof(struct ipv6hdr));
+ pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
+ ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
return -EMSGSIZE;
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 27b00c4e642d..3c2468bd0b7c 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -286,13 +286,16 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
struct net_device *dev;
struct ip6_tnl *t;
char name[IFNAMSIZ];
- int err = -ENOMEM;
+ int err = -E2BIG;
- if (p->name[0])
+ if (p->name[0]) {
+ if (!dev_valid_name(p->name))
+ goto failed;
strlcpy(name, p->name, IFNAMSIZ);
- else
+ } else {
sprintf(name, "ip6tnl%%d");
-
+ }
+ err = -ENOMEM;
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ip6_tnl_dev_setup);
if (!dev)
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 00111ac94251..b8bf123f7f79 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -212,10 +212,13 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
char name[IFNAMSIZ];
int err;
- if (p->name[0])
+ if (p->name[0]) {
+ if (!dev_valid_name(p->name))
+ goto failed;
strlcpy(name, p->name, IFNAMSIZ);
- else
+ } else {
sprintf(name, "ip6_vti%%d");
+ }
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
if (!dev)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 5842430a122a..f55c7aa1db34 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -844,6 +844,9 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net,
struct fib6_node *fn;
struct rt6_info *rt;
+ if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
+ flags &= ~RT6_LOOKUP_F_IFACE;
+
read_lock_bh(&table->tb6_lock);
fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
restart:
@@ -2696,6 +2699,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
[RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
+ [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
[RTA_OIF] = { .type = NLA_U32 },
[RTA_IIF] = { .type = NLA_U32 },
[RTA_PRIORITY] = { .type = NLA_U32 },
@@ -2705,6 +2709,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
[RTA_ENCAP_TYPE] = { .type = NLA_U16 },
[RTA_ENCAP] = { .type = NLA_NESTED },
[RTA_UID] = { .type = NLA_U32 },
+ [RTA_TABLE] = { .type = NLA_U32 },
};
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index bbba435d0639..51f7c32f04d7 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -244,11 +244,13 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
if (!create)
goto failed;
- if (parms->name[0])
+ if (parms->name[0]) {
+ if (!dev_valid_name(parms->name))
+ goto failed;
strlcpy(name, parms->name, IFNAMSIZ);
- else
+ } else {
strcpy(name, "sit%d");
-
+ }
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ipip6_tunnel_setup);
if (!dev)
@@ -690,6 +692,7 @@ static int ipip6_rcv(struct sk_buff *skb)
if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6)))
goto out;
+ iph = ip_hdr(skb);
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 6482b001f19a..15150b412930 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3305,7 +3305,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
p += pol->sadb_x_policy_len*8;
sec_ctx = (struct sadb_x_sec_ctx *)p;
if (len < pol->sadb_x_policy_len*8 +
- sec_ctx->sadb_x_sec_len) {
+ sec_ctx->sadb_x_sec_len*8) {
*dir = -EINVAL;
goto out;
}
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index fb3248ff8b48..ae3438685caa 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -732,6 +732,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
if ((session->ifname[0] &&
nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
+ (session->offset &&
+ nla_put_u16(skb, L2TP_ATTR_OFFSET, session->offset)) ||
(session->cookie_len &&
nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
&session->cookie[0])) ||
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 67f2e72723b2..2764c4bd072c 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -606,6 +606,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
error = -EINVAL;
+
+ if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
+ sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
+ sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
+ sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
+ goto end;
+
if (sp->sa_protocol != PX_PROTO_OL2TP)
goto end;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index bb8edb9ef506..09f2f3471ad6 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -197,9 +197,19 @@ static int llc_ui_release(struct socket *sock)
llc->laddr.lsap, llc->daddr.lsap);
if (!llc_send_disc(sk))
llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
- if (!sock_flag(sk, SOCK_ZAPPED))
+ if (!sock_flag(sk, SOCK_ZAPPED)) {
+ struct llc_sap *sap = llc->sap;
+
+ /* Hold this for release_sock(), so that llc_backlog_rcv()
+ * could still use it.
+ */
+ llc_sap_hold(sap);
llc_sap_remove_socket(llc->sap, sk);
- release_sock(sk);
+ release_sock(sk);
+ llc_sap_put(sap);
+ } else {
+ release_sock(sk);
+ }
if (llc->dev)
dev_put(llc->dev);
sock_put(sk);
@@ -309,6 +319,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
int rc = -EINVAL;
dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
+
+ lock_sock(sk);
if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
goto out;
rc = -EAFNOSUPPORT;
@@ -380,6 +392,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
out_put:
llc_sap_put(sap);
out:
+ release_sock(sk);
return rc;
}
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index ea225bd2672c..f8d4ab8ca1a5 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -1096,14 +1096,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb)
int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
{
- struct llc_sock *llc = llc_sk(sk);
-
- del_timer(&llc->pf_cycle_timer.timer);
- del_timer(&llc->ack_timer.timer);
- del_timer(&llc->rej_sent_timer.timer);
- del_timer(&llc->busy_state_timer.timer);
- llc->ack_must_be_send = 0;
- llc->ack_pf = 0;
+ llc_sk_stop_all_timers(sk, false);
return 0;
}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 8bc5a1bd2d45..d861b74ad068 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -951,6 +951,26 @@ out:
return sk;
}
+void llc_sk_stop_all_timers(struct sock *sk, bool sync)
+{
+ struct llc_sock *llc = llc_sk(sk);
+
+ if (sync) {
+ del_timer_sync(&llc->pf_cycle_timer.timer);
+ del_timer_sync(&llc->ack_timer.timer);
+ del_timer_sync(&llc->rej_sent_timer.timer);
+ del_timer_sync(&llc->busy_state_timer.timer);
+ } else {
+ del_timer(&llc->pf_cycle_timer.timer);
+ del_timer(&llc->ack_timer.timer);
+ del_timer(&llc->rej_sent_timer.timer);
+ del_timer(&llc->busy_state_timer.timer);
+ }
+
+ llc->ack_must_be_send = 0;
+ llc->ack_pf = 0;
+}
+
/**
* llc_sk_free - Frees a LLC socket
* @sk - socket to free
@@ -963,7 +983,7 @@ void llc_sk_free(struct sock *sk)
llc->state = LLC_CONN_OUT_OF_SVC;
/* Stop all (possibly) running timers */
- llc_conn_ac_stop_all_timers(sk, NULL);
+ llc_sk_stop_all_timers(sk, true);
#ifdef DEBUG_LLC_CONN_ALLOC
printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
skb_queue_len(&llc->pdu_unack_q),
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index f3deee7da389..c16b4da20db2 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4326,6 +4326,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
return -EINVAL;
+ /* If a reconfig is happening, bail out */
+ if (local->in_reconfig)
+ return -EBUSY;
+
if (assoc) {
rcu_read_lock();
have_sta = sta_info_get(sdata, cbss->bssid);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 2f0e4f61c40f..9979f4a1053b 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2808,7 +2808,7 @@ static struct genl_family ip_vs_genl_family = {
.hdrsize = 0,
.name = IPVS_GENL_NAME,
.version = IPVS_GENL_VERSION,
- .maxattr = IPVS_CMD_MAX,
+ .maxattr = IPVS_CMD_ATTR_MAX,
.netnsok = true, /* Make ipvsadm to work on netns */
};
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 4ece07c68b3f..c68e020427ab 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -887,8 +887,13 @@ restart:
}
out:
local_bh_enable();
- if (last)
+ if (last) {
+ /* nf ct hash resize happened, now clear the leftover. */
+ if ((struct nf_conn *)cb->args[1] == last)
+ cb->args[1] = 0;
+
nf_ct_put(last);
+ }
return skb->len;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index f59d82f0aa97..83c0f56d05cb 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1034,6 +1034,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
if (addr->sa_family != AF_NETLINK)
return -EINVAL;
+ if (alen < sizeof(struct sockaddr_nl))
+ return -EINVAL;
+
if ((nladdr->nl_groups || nladdr->nl_pid) &&
!netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
return -EPERM;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 92ca3e106c2b..f165514a4db5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -332,11 +332,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
skb_set_queue_mapping(skb, queue_index);
}
-/* register_prot_hook must be invoked with the po->bind_lock held,
+/* __register_prot_hook must be invoked through register_prot_hook
* or from a context in which asynchronous accesses to the packet
* socket is not possible (packet_create()).
*/
-static void register_prot_hook(struct sock *sk)
+static void __register_prot_hook(struct sock *sk)
{
struct packet_sock *po = pkt_sk(sk);
@@ -351,8 +351,13 @@ static void register_prot_hook(struct sock *sk)
}
}
-/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
- * held. If the sync parameter is true, we will temporarily drop
+static void register_prot_hook(struct sock *sk)
+{
+ lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
+ __register_prot_hook(sk);
+}
+
+/* If the sync parameter is true, we will temporarily drop
* the po->bind_lock and do a synchronize_net to make sure no
* asynchronous packet processing paths still refer to the elements
* of po->prot_hook. If the sync parameter is false, it is the
@@ -362,6 +367,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
{
struct packet_sock *po = pkt_sk(sk);
+ lockdep_assert_held_once(&po->bind_lock);
+
po->running = 0;
if (po->fanout)
@@ -2892,6 +2899,7 @@ static int packet_release(struct socket *sock)
packet_flush_mclist(sk);
+ lock_sock(sk);
if (po->rx_ring.pg_vec) {
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 0);
@@ -2901,6 +2909,7 @@ static int packet_release(struct socket *sock)
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 1);
}
+ release_sock(sk);
f = fanout_release(sk);
@@ -3134,7 +3143,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
if (proto) {
po->prot_hook.type = proto;
- register_prot_hook(sk);
+ __register_prot_hook(sk);
}
mutex_lock(&net->packet.sklist_lock);
@@ -3570,6 +3579,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
union tpacket_req_u req_u;
int len;
+ lock_sock(sk);
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
@@ -3580,14 +3590,21 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
len = sizeof(req_u.req3);
break;
}
- if (optlen < len)
- return -EINVAL;
- if (pkt_sk(sk)->has_vnet_hdr)
- return -EINVAL;
- if (copy_from_user(&req_u.req, optval, len))
- return -EFAULT;
- return packet_set_ring(sk, &req_u, 0,
- optname == PACKET_TX_RING);
+ if (optlen < len) {
+ ret = -EINVAL;
+ } else {
+ if (pkt_sk(sk)->has_vnet_hdr) {
+ ret = -EINVAL;
+ } else {
+ if (copy_from_user(&req_u.req, optval, len))
+ ret = -EFAULT;
+ else
+ ret = packet_set_ring(sk, &req_u, 0,
+ optname == PACKET_TX_RING);
+ }
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_COPY_THRESH:
{
@@ -3653,12 +3670,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (optlen != sizeof(val))
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- po->tp_loss = !!val;
- return 0;
+
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->tp_loss = !!val;
+ ret = 0;
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_AUXDATA:
{
@@ -3669,7 +3692,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
+ lock_sock(sk);
po->auxdata = !!val;
+ release_sock(sk);
return 0;
}
case PACKET_ORIGDEV:
@@ -3681,7 +3706,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
+ lock_sock(sk);
po->origdev = !!val;
+ release_sock(sk);
return 0;
}
case PACKET_VNET_HDR:
@@ -3690,15 +3717,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (sock->type != SOCK_RAW)
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (optlen < sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- po->has_vnet_hdr = !!val;
- return 0;
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->has_vnet_hdr = !!val;
+ ret = 0;
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_TIMESTAMP:
{
@@ -3736,11 +3768,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (optlen != sizeof(val))
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- po->tp_tx_has_off = !!val;
+
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->tp_tx_has_off = !!val;
+ ret = 0;
+ }
+ release_sock(sk);
return 0;
}
case PACKET_QDISC_BYPASS:
@@ -4116,7 +4154,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;
- lock_sock(sk);
/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
WARN(1, "Tx-ring is not supported.\n");
@@ -4252,7 +4289,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
- release_sock(sk);
return err;
}
diff --git a/net/packet/internal.h b/net/packet/internal.h
index d55bfc34d6b3..1309e2a7baad 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -109,10 +109,12 @@ struct packet_sock {
int copy_thresh;
spinlock_t bind_lock;
struct mutex pg_vec_lock;
- unsigned int running:1, /* prot_hook is attached*/
- auxdata:1,
+ unsigned int running; /* bind_lock must be held */
+ unsigned int auxdata:1, /* writer must hold sock lock */
origdev:1,
- has_vnet_hdr:1;
+ has_vnet_hdr:1,
+ tp_loss:1,
+ tp_tx_has_off:1;
int pressure;
int ifindex; /* bound device */
__be16 num;
@@ -122,8 +124,6 @@ struct packet_sock {
enum tpacket_versions tp_version;
unsigned int tp_hdrlen;
unsigned int tp_reserve;
- unsigned int tp_loss:1;
- unsigned int tp_tx_has_off:1;
unsigned int tp_tstamp;
struct net_device __rcu *cached_dev;
int (*xmit)(struct sk_buff *skb);
diff --git a/net/rds/bind.c b/net/rds/bind.c
index b22ea956522b..e29b47193645 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -108,6 +108,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
rs, &addr, (int)ntohs(*port));
break;
} else {
+ rs->rs_bound_addr = 0;
rds_sock_put(rs);
ret = -ENOMEM;
break;
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index d7a9ab5a9d9c..6c65fb229e50 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -209,7 +209,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
struct sk_buff *trailer;
unsigned int len;
u16 check;
- int nsg;
+ int nsg, err;
sp = rxrpc_skb(skb);
@@ -240,7 +240,9 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
len &= ~(call->conn->size_align - 1);
sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, 0, len);
+ err = skb_to_sgvec(skb, sg, 0, len);
+ if (unlikely(err < 0))
+ return err;
crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
_leave(" = 0");
@@ -336,7 +338,7 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
struct sk_buff *trailer;
u32 data_size, buf;
u16 check;
- int nsg;
+ int nsg, ret;
_enter("");
@@ -348,7 +350,9 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
goto nomem;
sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, 0, 8);
+ ret = skb_to_sgvec(skb, sg, 0, 8);
+ if (unlikely(ret < 0))
+ return ret;
/* start the decryption afresh */
memset(&iv, 0, sizeof(iv));
@@ -411,7 +415,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
struct sk_buff *trailer;
u32 data_size, buf;
u16 check;
- int nsg;
+ int nsg, ret;
_enter(",{%d}", skb->len);
@@ -430,7 +434,12 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
}
sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ ret = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(ret < 0)) {
+ if (sg != _sg)
+ kfree(sg);
+ return ret;
+ }
/* decrypt from the session key */
token = call->conn->key->payload.data[0];
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 694a06f1e0d5..f44fea22d69c 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -101,8 +101,10 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
a->order = n_i;
nest = nla_nest_start(skb, a->order);
- if (nest == NULL)
+ if (nest == NULL) {
+ index--;
goto nla_put_failure;
+ }
err = tcf_action_dump_1(skb, a, 0, 0);
if (err < 0) {
index--;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 0bc6f912f870..bd155e59be1c 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -249,10 +249,14 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
{
- if (cfg->is_ebpf)
- bpf_prog_put(cfg->filter);
- else
- bpf_prog_destroy(cfg->filter);
+ struct bpf_prog *filter = cfg->filter;
+
+ if (filter) {
+ if (cfg->is_ebpf)
+ bpf_prog_put(filter);
+ else
+ bpf_prog_destroy(filter);
+ }
kfree(cfg->bpf_ops);
kfree(cfg->bpf_name);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 481806b43de8..1cd7b7e33fa3 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -519,46 +519,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
addr->v6.sin6_scope_id = 0;
}
-/* Compare addresses exactly.
- * v4-mapped-v6 is also in consideration.
- */
-static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
- const union sctp_addr *addr2)
+static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
+ const union sctp_addr *addr2)
{
if (addr1->sa.sa_family != addr2->sa.sa_family) {
if (addr1->sa.sa_family == AF_INET &&
addr2->sa.sa_family == AF_INET6 &&
- ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) {
- if (addr2->v6.sin6_port == addr1->v4.sin_port &&
- addr2->v6.sin6_addr.s6_addr32[3] ==
- addr1->v4.sin_addr.s_addr)
- return 1;
- }
+ ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
+ addr2->v6.sin6_addr.s6_addr32[3] ==
+ addr1->v4.sin_addr.s_addr)
+ return 1;
+
if (addr2->sa.sa_family == AF_INET &&
addr1->sa.sa_family == AF_INET6 &&
- ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) {
- if (addr1->v6.sin6_port == addr2->v4.sin_port &&
- addr1->v6.sin6_addr.s6_addr32[3] ==
- addr2->v4.sin_addr.s_addr)
- return 1;
- }
+ ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
+ addr1->v6.sin6_addr.s6_addr32[3] ==
+ addr2->v4.sin_addr.s_addr)
+ return 1;
+
return 0;
}
- if (addr1->v6.sin6_port != addr2->v6.sin6_port)
- return 0;
+
if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
return 0;
+
/* If this is a linklocal address, compare the scope_id. */
- if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
- if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
- (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) {
- return 0;
- }
- }
+ if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
+ addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
+ addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
+ return 0;
return 1;
}
+/* Compare addresses exactly.
+ * v4-mapped-v6 is also in consideration.
+ */
+static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
+ const union sctp_addr *addr2)
+{
+ return __sctp_v6_cmp_addr(addr1, addr2) &&
+ addr1->v6.sin6_port == addr2->v6.sin6_port;
+}
+
/* Initialize addr struct to INADDR_ANY. */
static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port)
{
@@ -723,8 +726,10 @@ static int sctp_v6_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
sctp_v6_map_v4(addr);
}
- if (addr->sa.sa_family == AF_INET)
+ if (addr->sa.sa_family == AF_INET) {
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
return sizeof(struct sockaddr_in);
+ }
return sizeof(struct sockaddr_in6);
}
@@ -841,8 +846,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
const union sctp_addr *addr2,
struct sctp_sock *opt)
{
- struct sctp_af *af1, *af2;
struct sock *sk = sctp_opt2sk(opt);
+ struct sctp_af *af1, *af2;
af1 = sctp_get_af_specific(addr1->sa.sa_family);
af2 = sctp_get_af_specific(addr2->sa.sa_family);
@@ -858,10 +863,7 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
return 1;
- if (addr1->sa.sa_family != addr2->sa.sa_family)
- return 0;
-
- return af1->cmp_addr(addr1, addr2);
+ return __sctp_v6_cmp_addr(addr1, addr2);
}
/* Verify that the provided sockaddr looks bindable. Common verification,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index df6a4b2d0728..13c7f42b7040 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -335,11 +335,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
if (!opt->pf->af_supported(addr->sa.sa_family, opt))
return NULL;
- /* V4 mapped address are really of AF_INET family */
- if (addr->sa.sa_family == AF_INET6 &&
- ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
- !opt->pf->af_supported(AF_INET, opt))
- return NULL;
+ if (addr->sa.sa_family == AF_INET6) {
+ if (len < SIN6_LEN_RFC2133)
+ return NULL;
+ /* V4 mapped address are really of AF_INET family */
+ if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
+ !opt->pf->af_supported(AF_INET, opt))
+ return NULL;
+ }
/* If we get this far, af is valid. */
af = sctp_get_af_specific(addr->sa.sa_family);
@@ -1518,7 +1521,7 @@ static void sctp_close(struct sock *sk, long timeout)
pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
- lock_sock(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_state = SCTP_SS_CLOSING;
@@ -1569,7 +1572,7 @@ static void sctp_close(struct sock *sk, long timeout)
* held and that should be grabbed before socket lock.
*/
spin_lock_bh(&net->sctp.addr_wq_lock);
- bh_lock_sock(sk);
+ bh_lock_sock_nested(sk);
/* Hold the sock, since sk_common_release() will put sock_put()
* and we have just a little more cleanup.
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index d81186d34558..9103dd15511c 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1375,6 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry)
struct dentry *clnt_dir = pipe_dentry->d_parent;
struct dentry *gssd_dir = clnt_dir->d_parent;
+ dget(pipe_dentry);
__rpc_rmpipe(d_inode(clnt_dir), pipe_dentry);
__rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1);
__rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 728d65fbab0c..c9c0976d3bbb 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2363,7 +2363,12 @@ static void xs_tcp_setup_socket(struct work_struct *work)
case -EHOSTUNREACH:
case -EADDRINUSE:
case -ENOBUFS:
- /* retry with existing socket, after a delay */
+ /*
+ * xs_tcp_force_close() wakes tasks with -EIO.
+ * We need to wake them first to ensure the
+ * correct error code.
+ */
+ xprt_wake_pending_tasks(xprt, status);
xs_tcp_force_close(xprt);
goto out;
}
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 77bf9113c7a7..2763bd369b79 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -44,7 +44,8 @@
static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
[TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_NET_ID] = { .type = NLA_U32 }
+ [TIPC_NLA_NET_ID] = { .type = NLA_U32 },
+ [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 },
};
/*
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 970880924eca..d68c7318805a 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1760,13 +1760,11 @@ static void wiphy_update_regulatory(struct wiphy *wiphy,
if (ignore_reg_update(wiphy, initiator)) {
/*
* Regulatory updates set by CORE are ignored for custom
- * regulatory cards and for self managed regulatory.
- * Let us notify the changes to the driver,
+ * regulatory cards. Let us notify the changes to the driver,
* as some drivers used this to restore its orig_* reg domain.
*/
- if ((initiator == NL80211_REGDOM_SET_BY_CORE &&
- wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) ||
- (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED))
+ if (initiator == NL80211_REGDOM_SET_BY_CORE &&
+ wiphy->regulatory_flags & REGULATORY_CUSTOM_REG)
reg_call_notifier(wiphy, lr);
return;
}
@@ -1796,14 +1794,6 @@ static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
reg_check_channels();
}
-void cfg80211_send_reg_change_event(struct regulatory_request *request,
- struct wiphy *wiphy)
-{
- request->wiphy_idx = get_wiphy_idx(wiphy);
- nl80211_send_reg_change_event(request);
-}
-EXPORT_SYMBOL(cfg80211_send_reg_change_event);
-
static void handle_channel_custom(struct wiphy *wiphy,
struct ieee80211_channel *chan,
const struct ieee80211_regdomain *regd)
@@ -2257,7 +2247,22 @@ out_free:
reg_free_request(reg_request);
}
-static bool reg_only_self_managed_wiphys(struct regulatory_request *reg_request)
+static void notify_self_managed_wiphys(struct regulatory_request *request)
+{
+ struct cfg80211_registered_device *rdev;
+ struct wiphy *wiphy;
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ wiphy = &rdev->wiphy;
+ if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED &&
+ request->initiator == NL80211_REGDOM_SET_BY_USER &&
+ request->user_reg_hint_type ==
+ NL80211_USER_REG_HINT_CELL_BASE)
+ reg_call_notifier(wiphy, request);
+ }
+}
+
+static bool reg_only_self_managed_wiphys(void)
{
struct cfg80211_registered_device *rdev;
struct wiphy *wiphy;
@@ -2267,12 +2272,10 @@ static bool reg_only_self_managed_wiphys(struct regulatory_request *reg_request)
list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
wiphy = &rdev->wiphy;
- if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {
+ if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
self_managed_found = true;
- reg_call_notifier(wiphy, reg_request);
- } else {
+ else
return false;
- }
}
/* make sure at least one self-managed wiphy exists */
@@ -2310,7 +2313,8 @@ static void reg_process_pending_hints(void)
spin_unlock(&reg_requests_lock);
- if (reg_only_self_managed_wiphys(reg_request)) {
+ notify_self_managed_wiphys(reg_request);
+ if (reg_only_self_managed_wiphys()) {
reg_free_request(reg_request);
return;
}
@@ -3188,17 +3192,26 @@ EXPORT_SYMBOL(regulatory_set_wiphy_regd_sync_rtnl);
void wiphy_regulatory_register(struct wiphy *wiphy)
{
- struct regulatory_request *lr;
+ struct regulatory_request *lr = get_last_request();
- /* self-managed devices ignore external hints */
- if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
+ /* self-managed devices ignore beacon hints and country IE */
+ if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {
wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS |
REGULATORY_COUNTRY_IE_IGNORE;
+ /*
+ * The last request may have been received before this
+ * registration call. Call the driver notifier if
+ * initiator is USER and user type is CELL_BASE.
+ */
+ if (lr->initiator == NL80211_REGDOM_SET_BY_USER &&
+ lr->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE)
+ reg_call_notifier(wiphy, lr);
+ }
+
if (!reg_dev_ignore_cell_hint(wiphy))
reg_num_devs_support_basehint++;
- lr = get_last_request();
wiphy_update_regulatory(wiphy, lr->initiator);
}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index a750f330b8dd..c6ab4da4b8e2 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1794,32 +1794,40 @@ void x25_kill_by_neigh(struct x25_neigh *nb)
static int __init x25_init(void)
{
- int rc = proto_register(&x25_proto, 0);
+ int rc;
- if (rc != 0)
+ rc = proto_register(&x25_proto, 0);
+ if (rc)
goto out;
rc = sock_register(&x25_family_ops);
- if (rc != 0)
+ if (rc)
goto out_proto;
dev_add_pack(&x25_packet_type);
rc = register_netdevice_notifier(&x25_dev_notifier);
- if (rc != 0)
+ if (rc)
goto out_sock;
- pr_info("Linux Version 0.2\n");
+ rc = x25_register_sysctl();
+ if (rc)
+ goto out_dev;
- x25_register_sysctl();
rc = x25_proc_init();
- if (rc != 0)
- goto out_dev;
+ if (rc)
+ goto out_sysctl;
+
+ pr_info("Linux Version 0.2\n");
+
out:
return rc;
+out_sysctl:
+ x25_unregister_sysctl();
out_dev:
unregister_netdevice_notifier(&x25_dev_notifier);
out_sock:
+ dev_remove_pack(&x25_packet_type);
sock_unregister(AF_X25);
out_proto:
proto_unregister(&x25_proto);
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index 43239527a205..703d46aae7a2 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,9 +73,12 @@ static struct ctl_table x25_table[] = {
{ 0, },
};
-void __init x25_register_sysctl(void)
+int __init x25_register_sysctl(void)
{
x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
+ if (!x25_table_header)
+ return -ENOMEM;
+ return 0;
}
void x25_unregister_sysctl(void)
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 7dfa35d223e6..70535b8ee4d6 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1208,6 +1208,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
x->curlft.add_time = orig->curlft.add_time;
x->km.state = orig->km.state;
x->km.seq = orig->km.seq;
+ x->replay = orig->replay;
+ x->preplay = orig->preplay;
return x;
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 262889046703..45e246595d10 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -106,6 +106,7 @@ all_compiled_sources()
case "$i" in
*.[cS])
j=${i/\.[cS]/\.o}
+ j="${j#$tree}"
if [ -e $j ]; then
echo $i
fi
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 13bb3b409b5c..e3f8891c5d72 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1945,8 +1945,9 @@ static inline u32 file_to_av(struct file *file)
static inline u32 open_file_to_av(struct file *file)
{
u32 av = file_to_av(file);
+ struct inode *inode = file_inode(file);
- if (selinux_policycap_openperm)
+ if (selinux_policycap_openperm && inode->i_sb->s_magic != SOCKFS_MAGIC)
av |= FILE__OPEN;
return av;
@@ -2915,6 +2916,7 @@ static int selinux_inode_permission(struct inode *inode, int mask)
static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
{
const struct cred *cred = current_cred();
+ struct inode *inode = d_backing_inode(dentry);
unsigned int ia_valid = iattr->ia_valid;
__u32 av = FILE__WRITE;
@@ -2930,8 +2932,10 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
return dentry_has_perm(cred, dentry, FILE__SETATTR);
- if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE)
- && !(ia_valid & ATTR_FILE))
+ if (selinux_policycap_openperm &&
+ inode->i_sb->s_magic != SOCKFS_MAGIC &&
+ (ia_valid & ATTR_SIZE) &&
+ !(ia_valid & ATTR_FILE))
av |= FILE__OPEN;
return dentry_has_perm(cred, dentry, av);
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 6cd8aec146f2..07feb35f1935 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -833,8 +833,25 @@ static int choose_rate(struct snd_pcm_substream *substream,
return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
}
-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
- bool trylock)
+/* parameter locking: returns immediately if tried during streaming */
+static int lock_params(struct snd_pcm_runtime *runtime)
+{
+ if (mutex_lock_interruptible(&runtime->oss.params_lock))
+ return -ERESTARTSYS;
+ if (atomic_read(&runtime->oss.rw_ref)) {
+ mutex_unlock(&runtime->oss.params_lock);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static void unlock_params(struct snd_pcm_runtime *runtime)
+{
+ mutex_unlock(&runtime->oss.params_lock);
+}
+
+/* call with params_lock held */
+static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_pcm_hw_params *params, *sparams;
@@ -848,12 +865,9 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
struct snd_mask sformat_mask;
struct snd_mask mask;
- if (trylock) {
- if (!(mutex_trylock(&runtime->oss.params_lock)))
- return -EAGAIN;
- } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
- return -EINTR;
- sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL);
+ if (!runtime->oss.params)
+ return 0;
+ sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
params = kmalloc(sizeof(*params), GFP_KERNEL);
sparams = kmalloc(sizeof(*sparams), GFP_KERNEL);
if (!sw_params || !params || !sparams) {
@@ -991,7 +1005,6 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
goto failure;
}
- memset(sw_params, 0, sizeof(*sw_params));
if (runtime->oss.trigger) {
sw_params->start_threshold = 1;
} else {
@@ -1079,6 +1092,23 @@ failure:
kfree(sw_params);
kfree(params);
kfree(sparams);
+ return err;
+}
+
+/* this one takes the lock by itself */
+static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+ bool trylock)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int err;
+
+ if (trylock) {
+ if (!(mutex_trylock(&runtime->oss.params_lock)))
+ return -EAGAIN;
+ } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
+ return -ERESTARTSYS;
+
+ err = snd_pcm_oss_change_params_locked(substream);
mutex_unlock(&runtime->oss.params_lock);
return err;
}
@@ -1107,6 +1137,10 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
return 0;
}
+/* call with params_lock held */
+/* NOTE: this always call PREPARE unconditionally no matter whether
+ * runtime->oss.prepare is set or not
+ */
static int snd_pcm_oss_prepare(struct snd_pcm_substream *substream)
{
int err;
@@ -1131,8 +1165,6 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime;
int err;
- if (substream == NULL)
- return 0;
runtime = substream->runtime;
if (runtime->oss.params) {
err = snd_pcm_oss_change_params(substream, false);
@@ -1140,6 +1172,29 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
return err;
}
if (runtime->oss.prepare) {
+ if (mutex_lock_interruptible(&runtime->oss.params_lock))
+ return -ERESTARTSYS;
+ err = snd_pcm_oss_prepare(substream);
+ mutex_unlock(&runtime->oss.params_lock);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+/* call with params_lock held */
+static int snd_pcm_oss_make_ready_locked(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime;
+ int err;
+
+ runtime = substream->runtime;
+ if (runtime->oss.params) {
+ err = snd_pcm_oss_change_params_locked(substream);
+ if (err < 0)
+ return err;
+ }
+ if (runtime->oss.prepare) {
err = snd_pcm_oss_prepare(substream);
if (err < 0)
return err;
@@ -1367,13 +1422,15 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
if (atomic_read(&substream->mmap_count))
return -ENXIO;
- if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
- return tmp;
+ atomic_inc(&runtime->oss.rw_ref);
while (bytes > 0) {
if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
tmp = -ERESTARTSYS;
break;
}
+ tmp = snd_pcm_oss_make_ready_locked(substream);
+ if (tmp < 0)
+ goto err;
if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
tmp = bytes;
if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
@@ -1429,6 +1486,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
}
tmp = 0;
}
+ atomic_dec(&runtime->oss.rw_ref);
return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
}
@@ -1474,13 +1532,15 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
if (atomic_read(&substream->mmap_count))
return -ENXIO;
- if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
- return tmp;
+ atomic_inc(&runtime->oss.rw_ref);
while (bytes > 0) {
if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
tmp = -ERESTARTSYS;
break;
}
+ tmp = snd_pcm_oss_make_ready_locked(substream);
+ if (tmp < 0)
+ goto err;
if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
if (runtime->oss.buffer_used == 0) {
tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
@@ -1521,6 +1581,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
}
tmp = 0;
}
+ atomic_dec(&runtime->oss.rw_ref);
return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
}
@@ -1536,10 +1597,12 @@ static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file)
continue;
runtime = substream->runtime;
snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+ mutex_lock(&runtime->oss.params_lock);
runtime->oss.prepare = 1;
runtime->oss.buffer_used = 0;
runtime->oss.prev_hw_ptr_period = 0;
runtime->oss.period_ptr = 0;
+ mutex_unlock(&runtime->oss.params_lock);
}
return 0;
}
@@ -1625,9 +1688,13 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
goto __direct;
if ((err = snd_pcm_oss_make_ready(substream)) < 0)
return err;
+ atomic_inc(&runtime->oss.rw_ref);
+ if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+ atomic_dec(&runtime->oss.rw_ref);
+ return -ERESTARTSYS;
+ }
format = snd_pcm_oss_format_from(runtime->oss.format);
width = snd_pcm_format_physical_width(format);
- mutex_lock(&runtime->oss.params_lock);
if (runtime->oss.buffer_used > 0) {
#ifdef OSS_DEBUG
pcm_dbg(substream->pcm, "sync: buffer_used\n");
@@ -1637,10 +1704,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
runtime->oss.buffer + runtime->oss.buffer_used,
size);
err = snd_pcm_oss_sync1(substream, runtime->oss.period_bytes);
- if (err < 0) {
- mutex_unlock(&runtime->oss.params_lock);
- return err;
- }
+ if (err < 0)
+ goto unlock;
} else if (runtime->oss.period_ptr > 0) {
#ifdef OSS_DEBUG
pcm_dbg(substream->pcm, "sync: period_ptr\n");
@@ -1650,10 +1715,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
runtime->oss.buffer,
size * 8 / width);
err = snd_pcm_oss_sync1(substream, size);
- if (err < 0) {
- mutex_unlock(&runtime->oss.params_lock);
- return err;
- }
+ if (err < 0)
+ goto unlock;
}
/*
* The ALSA's period might be a bit large than OSS one.
@@ -1684,7 +1747,11 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
snd_pcm_lib_writev(substream, buffers, size);
}
}
+unlock:
mutex_unlock(&runtime->oss.params_lock);
+ atomic_dec(&runtime->oss.rw_ref);
+ if (err < 0)
+ return err;
/*
* finish sync: drain the buffer
*/
@@ -1695,7 +1762,9 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
substream->f_flags = saved_f_flags;
if (err < 0)
return err;
+ mutex_lock(&runtime->oss.params_lock);
runtime->oss.prepare = 1;
+ mutex_unlock(&runtime->oss.params_lock);
}
substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
@@ -1706,8 +1775,10 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
if (err < 0)
return err;
+ mutex_lock(&runtime->oss.params_lock);
runtime->oss.buffer_used = 0;
runtime->oss.prepare = 1;
+ mutex_unlock(&runtime->oss.params_lock);
}
return 0;
}
@@ -1719,6 +1790,8 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
for (idx = 1; idx >= 0; --idx) {
struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
struct snd_pcm_runtime *runtime;
+ int err;
+
if (substream == NULL)
continue;
runtime = substream->runtime;
@@ -1726,10 +1799,14 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
rate = 1000;
else if (rate > 192000)
rate = 192000;
+ err = lock_params(runtime);
+ if (err < 0)
+ return err;
if (runtime->oss.rate != rate) {
runtime->oss.params = 1;
runtime->oss.rate = rate;
}
+ unlock_params(runtime);
}
return snd_pcm_oss_get_rate(pcm_oss_file);
}
@@ -1754,13 +1831,19 @@ static int snd_pcm_oss_set_channels(struct snd_pcm_oss_file *pcm_oss_file, unsig
for (idx = 1; idx >= 0; --idx) {
struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
struct snd_pcm_runtime *runtime;
+ int err;
+
if (substream == NULL)
continue;
runtime = substream->runtime;
+ err = lock_params(runtime);
+ if (err < 0)
+ return err;
if (runtime->oss.channels != channels) {
runtime->oss.params = 1;
runtime->oss.channels = channels;
}
+ unlock_params(runtime);
}
return snd_pcm_oss_get_channels(pcm_oss_file);
}
@@ -1833,6 +1916,7 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format)
{
int formats, idx;
+ int err;
if (format != AFMT_QUERY) {
formats = snd_pcm_oss_get_formats(pcm_oss_file);
@@ -1846,10 +1930,14 @@ static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int for
if (substream == NULL)
continue;
runtime = substream->runtime;
+ err = lock_params(runtime);
+ if (err < 0)
+ return err;
if (runtime->oss.format != format) {
runtime->oss.params = 1;
runtime->oss.format = format;
}
+ unlock_params(runtime);
}
}
return snd_pcm_oss_get_format(pcm_oss_file);
@@ -1869,8 +1957,6 @@ static int snd_pcm_oss_set_subdivide1(struct snd_pcm_substream *substream, int s
{
struct snd_pcm_runtime *runtime;
- if (substream == NULL)
- return 0;
runtime = substream->runtime;
if (subdivide == 0) {
subdivide = runtime->oss.subdivision;
@@ -1894,9 +1980,17 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int
for (idx = 1; idx >= 0; --idx) {
struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
+ struct snd_pcm_runtime *runtime;
+
if (substream == NULL)
continue;
- if ((err = snd_pcm_oss_set_subdivide1(substream, subdivide)) < 0)
+ runtime = substream->runtime;
+ err = lock_params(runtime);
+ if (err < 0)
+ return err;
+ err = snd_pcm_oss_set_subdivide1(substream, subdivide);
+ unlock_params(runtime);
+ if (err < 0)
return err;
}
return err;
@@ -1906,8 +2000,6 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
{
struct snd_pcm_runtime *runtime;
- if (substream == NULL)
- return 0;
runtime = substream->runtime;
if (runtime->oss.subdivision || runtime->oss.fragshift)
return -EINVAL;
@@ -1927,9 +2019,17 @@ static int snd_pcm_oss_set_fragment(struct snd_pcm_oss_file *pcm_oss_file, unsig
for (idx = 1; idx >= 0; --idx) {
struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
+ struct snd_pcm_runtime *runtime;
+
if (substream == NULL)
continue;
- if ((err = snd_pcm_oss_set_fragment1(substream, val)) < 0)
+ runtime = substream->runtime;
+ err = lock_params(runtime);
+ if (err < 0)
+ return err;
+ err = snd_pcm_oss_set_fragment1(substream, val);
+ unlock_params(runtime);
+ if (err < 0)
return err;
}
return err;
@@ -2013,6 +2113,9 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
}
if (psubstream) {
runtime = psubstream->runtime;
+ cmd = 0;
+ if (mutex_lock_interruptible(&runtime->oss.params_lock))
+ return -ERESTARTSYS;
if (trigger & PCM_ENABLE_OUTPUT) {
if (runtime->oss.trigger)
goto _skip1;
@@ -2030,13 +2133,19 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
cmd = SNDRV_PCM_IOCTL_DROP;
runtime->oss.prepare = 1;
}
- err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
- if (err < 0)
- return err;
- }
_skip1:
+ mutex_unlock(&runtime->oss.params_lock);
+ if (cmd) {
+ err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
+ if (err < 0)
+ return err;
+ }
+ }
if (csubstream) {
runtime = csubstream->runtime;
+ cmd = 0;
+ if (mutex_lock_interruptible(&runtime->oss.params_lock))
+ return -ERESTARTSYS;
if (trigger & PCM_ENABLE_INPUT) {
if (runtime->oss.trigger)
goto _skip2;
@@ -2051,11 +2160,14 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
cmd = SNDRV_PCM_IOCTL_DROP;
runtime->oss.prepare = 1;
}
- err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
- if (err < 0)
- return err;
- }
_skip2:
+ mutex_unlock(&runtime->oss.params_lock);
+ if (cmd) {
+ err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
+ if (err < 0)
+ return err;
+ }
+ }
return 0;
}
@@ -2307,6 +2419,7 @@ static void snd_pcm_oss_init_substream(struct snd_pcm_substream *substream,
runtime->oss.maxfrags = 0;
runtime->oss.subdivision = 0;
substream->pcm_release = snd_pcm_oss_release_substream;
+ atomic_set(&runtime->oss.rw_ref, 0);
}
static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file)
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 48f6aee3680d..d79a04e703dc 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -28,6 +28,7 @@
#include <sound/core.h>
#include <sound/minors.h>
#include <sound/pcm.h>
+#include <sound/timer.h>
#include <sound/control.h>
#include <sound/info.h>
@@ -1036,8 +1037,13 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
snd_free_pages((void*)runtime->control,
PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)));
kfree(runtime->hw_constraints.rules);
- kfree(runtime);
+ /* Avoid concurrent access to runtime via PCM timer interface */
+ if (substream->timer)
+ spin_lock_irq(&substream->timer->lock);
substream->runtime = NULL;
+ if (substream->timer)
+ spin_unlock_irq(&substream->timer->lock);
+ kfree(runtime);
put_pid(substream->pid);
substream->pid = NULL;
substream->pstr->substream_opened--;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 7c36499491c5..981b18bf2e63 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2765,6 +2765,7 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
sync_ptr.s.status.hw_ptr = status->hw_ptr;
sync_ptr.s.status.tstamp = status->tstamp;
sync_ptr.s.status.suspended_state = status->suspended_state;
+ sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
snd_pcm_stream_unlock_irq(substream);
if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
return -EFAULT;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 16f8124b1150..514380104944 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -115,6 +115,7 @@ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
return -ENOMEM;
runtime->substream = substream;
spin_lock_init(&runtime->lock);
+ mutex_init(&runtime->realloc_mutex);
init_waitqueue_head(&runtime->sleep);
INIT_WORK(&runtime->event_work, snd_rawmidi_input_event_work);
runtime->event = NULL;
@@ -636,8 +637,10 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_params * params)
{
char *newbuf;
+ char *oldbuf;
struct snd_rawmidi_runtime *runtime = substream->runtime;
-
+ unsigned long flags;
+
if (substream->append && substream->use_count > 1)
return -EBUSY;
snd_rawmidi_drain_output(substream);
@@ -648,13 +651,22 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
return -EINVAL;
}
if (params->buffer_size != runtime->buffer_size) {
- newbuf = krealloc(runtime->buffer, params->buffer_size,
+ mutex_lock(&runtime->realloc_mutex);
+ newbuf = __krealloc(runtime->buffer, params->buffer_size,
GFP_KERNEL);
- if (!newbuf)
+ if (!newbuf) {
+ mutex_unlock(&runtime->realloc_mutex);
return -ENOMEM;
+ }
+ spin_lock_irqsave(&runtime->lock, flags);
+ oldbuf = runtime->buffer;
runtime->buffer = newbuf;
runtime->buffer_size = params->buffer_size;
runtime->avail = runtime->buffer_size;
+ spin_unlock_irqrestore(&runtime->lock, flags);
+ if (oldbuf != newbuf)
+ kfree(oldbuf);
+ mutex_unlock(&runtime->realloc_mutex);
}
runtime->avail_min = params->avail_min;
substream->active_sensing = !params->no_active_sensing;
@@ -666,7 +678,9 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_params * params)
{
char *newbuf;
+ char *oldbuf;
struct snd_rawmidi_runtime *runtime = substream->runtime;
+ unsigned long flags;
snd_rawmidi_drain_input(substream);
if (params->buffer_size < 32 || params->buffer_size > 1024L * 1024L) {
@@ -676,12 +690,21 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
return -EINVAL;
}
if (params->buffer_size != runtime->buffer_size) {
- newbuf = krealloc(runtime->buffer, params->buffer_size,
+ mutex_lock(&runtime->realloc_mutex);
+ newbuf = __krealloc(runtime->buffer, params->buffer_size,
GFP_KERNEL);
- if (!newbuf)
+ if (!newbuf) {
+ mutex_unlock(&runtime->realloc_mutex);
return -ENOMEM;
+ }
+ spin_lock_irqsave(&runtime->lock, flags);
+ oldbuf = runtime->buffer;
runtime->buffer = newbuf;
runtime->buffer_size = params->buffer_size;
+ spin_unlock_irqrestore(&runtime->lock, flags);
+ if (oldbuf != newbuf)
+ kfree(oldbuf);
+ mutex_unlock(&runtime->realloc_mutex);
}
runtime->avail_min = params->avail_min;
return 0;
@@ -954,6 +977,8 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
unsigned long appl_ptr;
spin_lock_irqsave(&runtime->lock, flags);
+ if (userbuf)
+ mutex_lock(&runtime->realloc_mutex);
while (count > 0 && runtime->avail) {
count1 = runtime->buffer_size - runtime->appl_ptr;
if (count1 > count)
@@ -973,6 +998,7 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
spin_unlock_irqrestore(&runtime->lock, flags);
if (copy_to_user(userbuf + result,
runtime->buffer + appl_ptr, count1)) {
+ mutex_unlock(&runtime->realloc_mutex);
return result > 0 ? result : -EFAULT;
}
spin_lock_irqsave(&runtime->lock, flags);
@@ -981,6 +1007,8 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
count -= count1;
}
spin_unlock_irqrestore(&runtime->lock, flags);
+ if (userbuf)
+ mutex_unlock(&runtime->realloc_mutex);
return result;
}
@@ -1245,10 +1273,14 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
return -EINVAL;
result = 0;
+ if (userbuf)
+ mutex_lock(&runtime->realloc_mutex);
spin_lock_irqsave(&runtime->lock, flags);
if (substream->append) {
if ((long)runtime->avail < count) {
spin_unlock_irqrestore(&runtime->lock, flags);
+ if (userbuf)
+ mutex_unlock(&runtime->realloc_mutex);
return -EAGAIN;
}
}
@@ -1284,6 +1316,8 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
__end:
count1 = runtime->avail < runtime->buffer_size;
spin_unlock_irqrestore(&runtime->lock, flags);
+ if (userbuf)
+ mutex_unlock(&runtime->realloc_mutex);
if (count1)
snd_rawmidi_output_trigger(substream, 1);
return result;
diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
index 09a89094dcf7..4e304a24924a 100644
--- a/sound/core/rawmidi_compat.c
+++ b/sound/core/rawmidi_compat.c
@@ -36,8 +36,6 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
struct snd_rawmidi_params params;
unsigned int val;
- if (rfile->output == NULL)
- return -EINVAL;
if (get_user(params.stream, &src->stream) ||
get_user(params.buffer_size, &src->buffer_size) ||
get_user(params.avail_min, &src->avail_min) ||
@@ -46,8 +44,12 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
params.no_active_sensing = val;
switch (params.stream) {
case SNDRV_RAWMIDI_STREAM_OUTPUT:
+ if (!rfile->output)
+ return -EINVAL;
return snd_rawmidi_output_params(rfile->output, &params);
case SNDRV_RAWMIDI_STREAM_INPUT:
+ if (!rfile->input)
+ return -EINVAL;
return snd_rawmidi_input_params(rfile->input, &params);
}
return -EINVAL;
@@ -67,16 +69,18 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
int err;
struct snd_rawmidi_status status;
- if (rfile->output == NULL)
- return -EINVAL;
if (get_user(status.stream, &src->stream))
return -EFAULT;
switch (status.stream) {
case SNDRV_RAWMIDI_STREAM_OUTPUT:
+ if (!rfile->output)
+ return -EINVAL;
err = snd_rawmidi_output_status(rfile->output, &status);
break;
case SNDRV_RAWMIDI_STREAM_INPUT:
+ if (!rfile->input)
+ return -EINVAL;
err = snd_rawmidi_input_status(rfile->input, &status);
break;
default:
@@ -113,16 +117,18 @@ static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
int err;
struct snd_rawmidi_status status;
- if (rfile->output == NULL)
- return -EINVAL;
if (get_user(status.stream, &src->stream))
return -EFAULT;
switch (status.stream) {
case SNDRV_RAWMIDI_STREAM_OUTPUT:
+ if (!rfile->output)
+ return -EINVAL;
err = snd_rawmidi_output_status(rfile->output, &status);
break;
case SNDRV_RAWMIDI_STREAM_INPUT:
+ if (!rfile->input)
+ return -EINVAL;
err = snd_rawmidi_input_status(rfile->input, &status);
break;
default:
diff --git a/sound/core/seq/oss/seq_oss_event.c b/sound/core/seq/oss/seq_oss_event.c
index c3908862bc8b..86ca584c27b2 100644
--- a/sound/core/seq/oss/seq_oss_event.c
+++ b/sound/core/seq/oss/seq_oss_event.c
@@ -26,6 +26,7 @@
#include <sound/seq_oss_legacy.h>
#include "seq_oss_readq.h"
#include "seq_oss_writeq.h"
+#include <linux/nospec.h>
/*
@@ -287,10 +288,10 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
{
struct seq_oss_synthinfo *info;
- if (!snd_seq_oss_synth_is_valid(dp, dev))
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info)
return -ENXIO;
- info = &dp->synths[dev];
switch (info->arg.event_passing) {
case SNDRV_SEQ_OSS_PROCESS_EVENTS:
if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -298,6 +299,7 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
}
+ ch = array_index_nospec(ch, info->nr_voices);
if (note == 255 && info->ch[ch].note >= 0) {
/* volume control */
int type;
@@ -347,10 +349,10 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
{
struct seq_oss_synthinfo *info;
- if (!snd_seq_oss_synth_is_valid(dp, dev))
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info)
return -ENXIO;
- info = &dp->synths[dev];
switch (info->arg.event_passing) {
case SNDRV_SEQ_OSS_PROCESS_EVENTS:
if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -358,6 +360,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
}
+ ch = array_index_nospec(ch, info->nr_voices);
if (info->ch[ch].note >= 0) {
note = info->ch[ch].note;
info->ch[ch].vel = 0;
@@ -381,7 +384,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
static int
set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ if (!snd_seq_oss_synth_info(dp, dev))
return -ENXIO;
ev->type = type;
@@ -399,7 +402,7 @@ set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note,
static int
set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ if (!snd_seq_oss_synth_info(dp, dev))
return -ENXIO;
ev->type = type;
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index b30b2139e3f0..9debd1b8fd28 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -29,6 +29,7 @@
#include "../seq_lock.h"
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
/*
@@ -315,6 +316,7 @@ get_mididev(struct seq_oss_devinfo *dp, int dev)
{
if (dev < 0 || dev >= dp->max_mididev)
return NULL;
+ dev = array_index_nospec(dev, dp->max_mididev);
return get_mdev(dev);
}
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index b16dbef04174..ea545f9291b4 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
/*
* constants
@@ -339,17 +340,13 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
dp->max_synthdev = 0;
}
-/*
- * check if the specified device is MIDI mapped device
- */
-static int
-is_midi_dev(struct seq_oss_devinfo *dp, int dev)
+static struct seq_oss_synthinfo *
+get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev)
{
if (dev < 0 || dev >= dp->max_synthdev)
- return 0;
- if (dp->synths[dev].is_midi)
- return 1;
- return 0;
+ return NULL;
+ dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS);
+ return &dp->synths[dev];
}
/*
@@ -359,14 +356,20 @@ static struct seq_oss_synth *
get_synthdev(struct seq_oss_devinfo *dp, int dev)
{
struct seq_oss_synth *rec;
- if (dev < 0 || dev >= dp->max_synthdev)
- return NULL;
- if (! dp->synths[dev].opened)
+ struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
+
+ if (!info)
return NULL;
- if (dp->synths[dev].is_midi)
- return &midi_synth_dev;
- if ((rec = get_sdev(dev)) == NULL)
+ if (!info->opened)
return NULL;
+ if (info->is_midi) {
+ rec = &midi_synth_dev;
+ snd_use_lock_use(&rec->use_lock);
+ } else {
+ rec = get_sdev(dev);
+ if (!rec)
+ return NULL;
+ }
if (! rec->opened) {
snd_use_lock_free(&rec->use_lock);
return NULL;
@@ -402,10 +405,8 @@ snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev)
struct seq_oss_synth *rec;
struct seq_oss_synthinfo *info;
- if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev))
- return;
- info = &dp->synths[dev];
- if (! info->opened)
+ info = get_synthinfo_nospec(dp, dev);
+ if (!info || !info->opened)
return;
if (info->sysex)
info->sysex->len = 0; /* reset sysex */
@@ -454,12 +455,14 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
const char __user *buf, int p, int c)
{
struct seq_oss_synth *rec;
+ struct seq_oss_synthinfo *info;
int rc;
- if (dev < 0 || dev >= dp->max_synthdev)
+ info = get_synthinfo_nospec(dp, dev);
+ if (!info)
return -ENXIO;
- if (is_midi_dev(dp, dev))
+ if (info->is_midi)
return 0;
if ((rec = get_synthdev(dp, dev)) == NULL)
return -ENXIO;
@@ -467,24 +470,25 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
if (rec->oper.load_patch == NULL)
rc = -ENXIO;
else
- rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c);
+ rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c);
snd_use_lock_free(&rec->use_lock);
return rc;
}
/*
- * check if the device is valid synth device
+ * check if the device is valid synth device and return the synth info
*/
-int
-snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev)
+struct seq_oss_synthinfo *
+snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev)
{
struct seq_oss_synth *rec;
+
rec = get_synthdev(dp, dev);
if (rec) {
snd_use_lock_free(&rec->use_lock);
- return 1;
+ return get_synthinfo_nospec(dp, dev);
}
- return 0;
+ return NULL;
}
@@ -499,16 +503,18 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
int i, send;
unsigned char *dest;
struct seq_oss_synth_sysex *sysex;
+ struct seq_oss_synthinfo *info;
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info)
return -ENXIO;
- sysex = dp->synths[dev].sysex;
+ sysex = info->sysex;
if (sysex == NULL) {
sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
if (sysex == NULL)
return -ENOMEM;
- dp->synths[dev].sysex = sysex;
+ info->sysex = sysex;
}
send = 0;
@@ -553,10 +559,12 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
int
snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev);
+
+ if (!info)
return -EINVAL;
- snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client,
- dp->synths[dev].arg.addr.port);
+ snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client,
+ info->arg.addr.port);
return 0;
}
@@ -568,16 +576,18 @@ int
snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr)
{
struct seq_oss_synth *rec;
+ struct seq_oss_synthinfo *info;
int rc;
- if (is_midi_dev(dp, dev))
+ info = get_synthinfo_nospec(dp, dev);
+ if (!info || info->is_midi)
return -ENXIO;
if ((rec = get_synthdev(dp, dev)) == NULL)
return -ENXIO;
if (rec->oper.ioctl == NULL)
rc = -ENXIO;
else
- rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr);
+ rc = rec->oper.ioctl(&info->arg, cmd, addr);
snd_use_lock_free(&rec->use_lock);
return rc;
}
@@ -589,7 +599,10 @@ snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, u
int
snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev))
+ struct seq_oss_synthinfo *info;
+
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info || info->is_midi)
return -ENXIO;
ev->type = SNDRV_SEQ_EVENT_OSS;
memcpy(ev->data.raw8.d, data, 8);
diff --git a/sound/core/seq/oss/seq_oss_synth.h b/sound/core/seq/oss/seq_oss_synth.h
index 74ac55f166b6..a63f9e22974d 100644
--- a/sound/core/seq/oss/seq_oss_synth.h
+++ b/sound/core/seq/oss/seq_oss_synth.h
@@ -37,7 +37,8 @@ void snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp);
void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev);
int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
const char __user *buf, int p, int c);
-int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev);
+struct seq_oss_synthinfo *snd_seq_oss_synth_info(struct seq_oss_devinfo *dp,
+ int dev);
int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
struct snd_seq_event *ev);
int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev);
diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c
index ddcc1a325a61..42920a243328 100644
--- a/sound/drivers/opl3/opl3_synth.c
+++ b/sound/drivers/opl3/opl3_synth.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/nospec.h>
#include <sound/opl3.h>
#include <sound/asound_fm.h>
@@ -448,7 +449,7 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
{
unsigned short reg_side;
unsigned char op_offset;
- unsigned char voice_offset;
+ unsigned char voice_offset, voice_op;
unsigned short opl3_reg;
unsigned char reg_val;
@@ -473,7 +474,9 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
voice_offset = voice->voice - MAX_OPL2_VOICES;
}
/* Get register offset of operator */
- op_offset = snd_opl3_regmap[voice_offset][voice->op];
+ voice_offset = array_index_nospec(voice_offset, MAX_OPL2_VOICES);
+ voice_op = array_index_nospec(voice->op, 4);
+ op_offset = snd_opl3_regmap[voice_offset][voice_op];
reg_val = 0x00;
/* Set amplitude modulation (tremolo) effect */
diff --git a/sound/pci/asihpi/hpimsginit.c b/sound/pci/asihpi/hpimsginit.c
index 7eb617175fde..a31a70dccecf 100644
--- a/sound/pci/asihpi/hpimsginit.c
+++ b/sound/pci/asihpi/hpimsginit.c
@@ -23,6 +23,7 @@
#include "hpi_internal.h"
#include "hpimsginit.h"
+#include <linux/nospec.h>
/* The actual message size for each object type */
static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT;
@@ -39,10 +40,12 @@ static void hpi_init_message(struct hpi_message *phm, u16 object,
{
u16 size;
- if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
+ if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
+ object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
size = msg_size[object];
- else
+ } else {
size = sizeof(*phm);
+ }
memset(phm, 0, size);
phm->size = size;
@@ -66,10 +69,12 @@ void hpi_init_response(struct hpi_response *phr, u16 object, u16 function,
{
u16 size;
- if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
+ if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
+ object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
size = res_size[object];
- else
+ } else {
size = sizeof(*phr);
+ }
memset(phr, 0, sizeof(*phr));
phr->size = size;
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index d17937b92331..7a32abbe0cef 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -33,6 +33,7 @@
#include <linux/stringify.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
+#include <linux/nospec.h>
#ifdef MODULE_FIRMWARE
MODULE_FIRMWARE("asihpi/dsp5000.bin");
@@ -182,7 +183,8 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct hpi_adapter *pa = NULL;
if (hm->h.adapter_index < ARRAY_SIZE(adapters))
- pa = &adapters[hm->h.adapter_index];
+ pa = &adapters[array_index_nospec(hm->h.adapter_index,
+ ARRAY_SIZE(adapters))];
if (!pa || !pa->adapter || !pa->adapter->type) {
hpi_init_response(&hr->r0, hm->h.object,
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 57df06e76968..cc009a4a3d1d 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/compat.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include "hda_codec.h"
#include "hda_local.h"
@@ -51,7 +52,16 @@ static int get_wcap_ioctl(struct hda_codec *codec,
if (get_user(verb, &arg->verb))
return -EFAULT;
- res = get_wcaps(codec, verb >> 24);
+ /* open-code get_wcaps(verb>>24) with nospec */
+ verb >>= 24;
+ if (verb < codec->core.start_nid ||
+ verb >= codec->core.start_nid + codec->core.num_nodes) {
+ res = 0;
+ } else {
+ verb -= codec->core.start_nid;
+ verb = array_index_nospec(verb, codec->core.num_nodes);
+ res = codec->wcaps[verb];
+ }
if (put_user(res, &arg->res))
return -EFAULT;
return 0;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index fbd00821e326..3be91696ac35 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1549,7 +1549,8 @@ static void azx_check_snoop_available(struct azx *chip)
*/
u8 val;
pci_read_config_byte(chip->pci, 0x42, &val);
- if (!(val & 0x80) && chip->pci->revision == 0x30)
+ if (!(val & 0x80) && (chip->pci->revision == 0x30 ||
+ chip->pci->revision == 0x20))
snoop = false;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8cb14e27988b..6a789278970e 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -329,6 +329,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
break;
case 0x10ec0225:
case 0x10ec0233:
+ case 0x10ec0235:
case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
@@ -6296,6 +6297,7 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0298:
spec->codec_variant = ALC269_TYPE_ALC298;
break;
+ case 0x10ec0235:
case 0x10ec0255:
spec->codec_variant = ALC269_TYPE_ALC255;
break;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index a4a999a0317e..1a0c0d16a279 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -137,6 +137,7 @@
#include <linux/pci.h>
#include <linux/math64.h>
#include <linux/io.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -5692,40 +5693,43 @@ static int snd_hdspm_channel_info(struct snd_pcm_substream *substream,
struct snd_pcm_channel_info *info)
{
struct hdspm *hdspm = snd_pcm_substream_chip(substream);
+ unsigned int channel = info->channel;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) {
+ if (snd_BUG_ON(channel >= hdspm->max_channels_out)) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: output channel out of range (%d)\n",
- info->channel);
+ channel);
return -EINVAL;
}
- if (hdspm->channel_map_out[info->channel] < 0) {
+ channel = array_index_nospec(channel, hdspm->max_channels_out);
+ if (hdspm->channel_map_out[channel] < 0) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: output channel %d mapped out\n",
- info->channel);
+ channel);
return -EINVAL;
}
- info->offset = hdspm->channel_map_out[info->channel] *
+ info->offset = hdspm->channel_map_out[channel] *
HDSPM_CHANNEL_BUFFER_BYTES;
} else {
- if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) {
+ if (snd_BUG_ON(channel >= hdspm->max_channels_in)) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: input channel out of range (%d)\n",
- info->channel);
+ channel);
return -EINVAL;
}
- if (hdspm->channel_map_in[info->channel] < 0) {
+ channel = array_index_nospec(channel, hdspm->max_channels_in);
+ if (hdspm->channel_map_in[channel] < 0) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: input channel %d mapped out\n",
- info->channel);
+ channel);
return -EINVAL;
}
- info->offset = hdspm->channel_map_in[info->channel] *
+ info->offset = hdspm->channel_map_in[channel] *
HDSPM_CHANNEL_BUFFER_BYTES;
}
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index fdbc0aa2776a..c253bdf92e36 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -2036,9 +2037,10 @@ static int snd_rme9652_channel_info(struct snd_pcm_substream *substream,
if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS))
return -EINVAL;
- if ((chn = rme9652->channel_map[info->channel]) < 0) {
+ chn = rme9652->channel_map[array_index_nospec(info->channel,
+ RME9652_NCHANNELS)];
+ if (chn < 0)
return -EINVAL;
- }
info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES;
info->first = 0;
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
index 55eef61a01de..fe455c9b8c25 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -3805,12 +3805,11 @@ static int msm_anlg_cdc_device_down(struct snd_soc_codec *codec)
}
msm_anlg_cdc_boost_off(codec);
sdm660_cdc_priv->hph_mode = NORMAL_MODE;
-
- /* 40ms to allow boost to discharge */
- msleep(40);
/* Disable PA to avoid pop during codec bring up */
snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_RX_HPH_CNP_EN,
0x30, 0x00);
+ /* 40ms to allow boost to discharge */
+ msleep(40);
snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_SPKR_DRV_CTL,
0x80, 0x00);
snd_soc_write(codec,
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 4452fea0b118..bd4998f577a0 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -54,10 +54,17 @@ struct ssm2602_priv {
* using 2 wire for device control, so we cache them instead.
* There is no point in caching the reset register
*/
-static const u16 ssm2602_reg[SSM2602_CACHEREGNUM] = {
- 0x0097, 0x0097, 0x0079, 0x0079,
- 0x000a, 0x0008, 0x009f, 0x000a,
- 0x0000, 0x0000
+static const struct reg_default ssm2602_reg[SSM2602_CACHEREGNUM] = {
+ { .reg = 0x00, .def = 0x0097 },
+ { .reg = 0x01, .def = 0x0097 },
+ { .reg = 0x02, .def = 0x0079 },
+ { .reg = 0x03, .def = 0x0079 },
+ { .reg = 0x04, .def = 0x000a },
+ { .reg = 0x05, .def = 0x0008 },
+ { .reg = 0x06, .def = 0x009f },
+ { .reg = 0x07, .def = 0x000a },
+ { .reg = 0x08, .def = 0x0000 },
+ { .reg = 0x09, .def = 0x0000 }
};
@@ -618,8 +625,8 @@ const struct regmap_config ssm2602_regmap_config = {
.volatile_reg = ssm2602_register_volatile,
.cache_type = REGCACHE_RBTREE,
- .reg_defaults_raw = ssm2602_reg,
- .num_reg_defaults_raw = ARRAY_SIZE(ssm2602_reg),
+ .reg_defaults = ssm2602_reg,
+ .num_reg_defaults = ARRAY_SIZE(ssm2602_reg),
};
EXPORT_SYMBOL_GPL(ssm2602_regmap_config);
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 59f234e51971..e8adead8be00 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -143,6 +143,13 @@ static int fsl_esai_divisor_cal(struct snd_soc_dai *dai, bool tx, u32 ratio,
psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8;
+ /* Do not loop-search if PM (1 ~ 256) alone can serve the ratio */
+ if (ratio <= 256) {
+ pm = ratio;
+ fp = 1;
+ goto out;
+ }
+
/* Set the max fluctuation -- 0.1% of the max devisor */
savesub = (psr ? 1 : 8) * 256 * maxfp / 1000;
diff --git a/sound/soc/intel/atom/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c
index a74c64c7053c..e83da42a8c03 100644
--- a/sound/soc/intel/atom/sst/sst_stream.c
+++ b/sound/soc/intel/atom/sst/sst_stream.c
@@ -221,7 +221,7 @@ int sst_send_byte_stream_mrfld(struct intel_sst_drv *sst_drv_ctx,
sst_free_block(sst_drv_ctx, block);
out:
test_and_clear_bit(pvt_id, &sst_drv_ctx->pvt_id);
- return 0;
+ return ret;
}
/*
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 38d65a3529c4..44d560966e9c 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -96,6 +96,7 @@ static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Int Mic", NULL),
+ SND_SOC_DAPM_MIC("Int Analog Mic", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
platform_clock_control, SND_SOC_DAPM_POST_PMD),
@@ -106,6 +107,8 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
{"IN1N", NULL, "Headset Mic"},
{"DMIC L1", NULL, "Int Mic"},
{"DMIC R1", NULL, "Int Mic"},
+ {"IN2P", NULL, "Int Analog Mic"},
+ {"IN2N", NULL, "Int Analog Mic"},
{"Headphone", NULL, "HPOL"},
{"Headphone", NULL, "HPOR"},
{"Ext Spk", NULL, "SPOL"},
@@ -119,6 +122,9 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
{"Headphone", NULL, "Platform Clock"},
{"Headset Mic", NULL, "Platform Clock"},
{"Int Mic", NULL, "Platform Clock"},
+ {"Int Analog Mic", NULL, "Platform Clock"},
+ {"Int Analog Mic", NULL, "micbias1"},
+ {"Int Analog Mic", NULL, "micbias2"},
{"Ext Spk", NULL, "Platform Clock"},
};
@@ -147,6 +153,7 @@ static const struct snd_kcontrol_new cht_mc_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Int Mic"),
+ SOC_DAPM_PIN_SWITCH("Int Analog Mic"),
SOC_DAPM_PIN_SWITCH("Ext Spk"),
};
diff --git a/sound/soc/msm/apq8096-auto.c b/sound/soc/msm/apq8096-auto.c
index d6473cccf496..a0c5ef0dce6d 100644
--- a/sound/soc/msm/apq8096-auto.c
+++ b/sound/soc/msm/apq8096-auto.c
@@ -5122,20 +5122,22 @@ static struct snd_soc_dai_link apq8096_common_dai_links[] = {
.be_id = MSM_FRONTEND_DAI_LSM7,
},
{
- .name = "Listen 8 Audio Service",
- .stream_name = "Listen 8 Audio Service",
- .cpu_dai_name = "LSM8",
- .platform_name = "msm-lsm-client",
+ .name = "MSM8996 LowLatency2",
+ .stream_name = "MultiMedia22",
+ .cpu_dai_name = "MultiMedia22",
+ .platform_name = "msm-pcm-dsp.1",
.dynamic = 1,
- .dpcm_capture = 1,
- .trigger = { SND_SOC_DPCM_TRIGGER_POST,
- SND_SOC_DPCM_TRIGGER_POST },
- .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
- .ignore_suspend = 1,
- .ignore_pmdown_time = 1,
+ .async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+ .dpcm_playback = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM8,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA22,
+ .ops = &apq8096_ll_ops,
},
{
.name = "MSM8996 LowLatency Loopback",
@@ -5697,6 +5699,22 @@ static struct snd_soc_dai_link apq8096_auto_fe_dai_links[] = {
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "DTMF RX Hostless",
+ .stream_name = "DTMF RX Hostless",
+ .cpu_dai_name = "DTMF_RX_HOSTLESS",
+ .platform_name = "msm-pcm-dtmf",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .be_id = MSM_FRONTEND_DAI_DTMF_RX,
}
};
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index 15134a0d662e..69951e12ecb1 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -3748,9 +3748,8 @@ static int msm_compr_adsp_stream_cmd_put(struct snd_kcontrol *kcontrol,
goto done;
}
-
- if ((sizeof(struct msm_adsp_event_data) + event_data->payload_len) >=
- sizeof(ucontrol->value.bytes.data)) {
+ if (event_data->payload_len > sizeof(ucontrol->value.bytes.data)
+ - sizeof(struct msm_adsp_event_data)) {
pr_err("%s param length=%d exceeds limit",
__func__, event_data->payload_len);
ret = -EINVAL;
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 26b40d2081f0..8098db80194d 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -91,86 +91,6 @@ enum {
RATE_MAX_NUM_OF_AUX_PCM_RATES,
};
-enum {
- IDX_PRIMARY_TDM_RX_0,
- IDX_PRIMARY_TDM_RX_1,
- IDX_PRIMARY_TDM_RX_2,
- IDX_PRIMARY_TDM_RX_3,
- IDX_PRIMARY_TDM_RX_4,
- IDX_PRIMARY_TDM_RX_5,
- IDX_PRIMARY_TDM_RX_6,
- IDX_PRIMARY_TDM_RX_7,
- IDX_PRIMARY_TDM_TX_0,
- IDX_PRIMARY_TDM_TX_1,
- IDX_PRIMARY_TDM_TX_2,
- IDX_PRIMARY_TDM_TX_3,
- IDX_PRIMARY_TDM_TX_4,
- IDX_PRIMARY_TDM_TX_5,
- IDX_PRIMARY_TDM_TX_6,
- IDX_PRIMARY_TDM_TX_7,
- IDX_SECONDARY_TDM_RX_0,
- IDX_SECONDARY_TDM_RX_1,
- IDX_SECONDARY_TDM_RX_2,
- IDX_SECONDARY_TDM_RX_3,
- IDX_SECONDARY_TDM_RX_4,
- IDX_SECONDARY_TDM_RX_5,
- IDX_SECONDARY_TDM_RX_6,
- IDX_SECONDARY_TDM_RX_7,
- IDX_SECONDARY_TDM_TX_0,
- IDX_SECONDARY_TDM_TX_1,
- IDX_SECONDARY_TDM_TX_2,
- IDX_SECONDARY_TDM_TX_3,
- IDX_SECONDARY_TDM_TX_4,
- IDX_SECONDARY_TDM_TX_5,
- IDX_SECONDARY_TDM_TX_6,
- IDX_SECONDARY_TDM_TX_7,
- IDX_TERTIARY_TDM_RX_0,
- IDX_TERTIARY_TDM_RX_1,
- IDX_TERTIARY_TDM_RX_2,
- IDX_TERTIARY_TDM_RX_3,
- IDX_TERTIARY_TDM_RX_4,
- IDX_TERTIARY_TDM_RX_5,
- IDX_TERTIARY_TDM_RX_6,
- IDX_TERTIARY_TDM_RX_7,
- IDX_TERTIARY_TDM_TX_0,
- IDX_TERTIARY_TDM_TX_1,
- IDX_TERTIARY_TDM_TX_2,
- IDX_TERTIARY_TDM_TX_3,
- IDX_TERTIARY_TDM_TX_4,
- IDX_TERTIARY_TDM_TX_5,
- IDX_TERTIARY_TDM_TX_6,
- IDX_TERTIARY_TDM_TX_7,
- IDX_QUATERNARY_TDM_RX_0,
- IDX_QUATERNARY_TDM_RX_1,
- IDX_QUATERNARY_TDM_RX_2,
- IDX_QUATERNARY_TDM_RX_3,
- IDX_QUATERNARY_TDM_RX_4,
- IDX_QUATERNARY_TDM_RX_5,
- IDX_QUATERNARY_TDM_RX_6,
- IDX_QUATERNARY_TDM_RX_7,
- IDX_QUATERNARY_TDM_TX_0,
- IDX_QUATERNARY_TDM_TX_1,
- IDX_QUATERNARY_TDM_TX_2,
- IDX_QUATERNARY_TDM_TX_3,
- IDX_QUATERNARY_TDM_TX_4,
- IDX_QUATERNARY_TDM_TX_5,
- IDX_QUATERNARY_TDM_TX_6,
- IDX_QUATERNARY_TDM_TX_7,
- IDX_TDM_MAX,
-};
-
-enum {
- IDX_GROUP_PRIMARY_TDM_RX,
- IDX_GROUP_PRIMARY_TDM_TX,
- IDX_GROUP_SECONDARY_TDM_RX,
- IDX_GROUP_SECONDARY_TDM_TX,
- IDX_GROUP_TERTIARY_TDM_RX,
- IDX_GROUP_TERTIARY_TDM_TX,
- IDX_GROUP_QUATERNARY_TDM_RX,
- IDX_GROUP_QUATERNARY_TDM_TX,
- IDX_GROUP_TDM_MAX,
-};
-
struct msm_dai_q6_dai_data {
DECLARE_BITMAP(status_mask, STATUS_MAX);
DECLARE_BITMAP(hwfree_status, STATUS_MAX);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-dtmf-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-dtmf-v2.c
index ea7989cb5a96..abbcfed39152 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-dtmf-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-dtmf-v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -97,12 +97,12 @@ static int msm_dtmf_rx_generate_put(struct snd_kcontrol *kcontrol,
{
uint16_t low_freq = ucontrol->value.integer.value[0];
uint16_t high_freq = ucontrol->value.integer.value[1];
- int64_t duration = ucontrol->value.integer.value[2];
+ int16_t duration = ucontrol->value.integer.value[2];
uint16_t gain = ucontrol->value.integer.value[3];
pr_debug("%s: low_freq=%d high_freq=%d duration=%d gain=%d\n",
__func__, low_freq, high_freq, (int)duration, gain);
- afe_dtmf_generate_rx(duration, high_freq, low_freq, gain);
+ afe_dtmf_generate_rx((int64_t) duration, high_freq, low_freq, gain);
return 0;
}
@@ -154,7 +154,7 @@ static int msm_dtmf_detect_volte_rx_get(struct snd_kcontrol *kcontrol,
static struct snd_kcontrol_new msm_dtmf_controls[] = {
SOC_SINGLE_MULTI_EXT("DTMF_Generate Rx Low High Duration Gain",
- SND_SOC_NOPM, 0, 5000, 0, 4,
+ SND_SOC_NOPM, 0, 65535, 0, 4,
msm_dtmf_rx_generate_get,
msm_dtmf_rx_generate_put),
SOC_SINGLE_EXT("DTMF_Detect Rx Voice enable", SND_SOC_NOPM, 0, 1, 0,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c
index 5d3d24b058d3..9af5de2952d4 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c
@@ -847,7 +847,7 @@ static int msm_pcm_channel_mixer_output_map_ctl_put(
}
chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]);
- for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+ for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
chmixer_pspd->out_ch_map[i] =
ucontrol->value.integer.value[i];
@@ -878,7 +878,7 @@ static int msm_pcm_channel_mixer_output_map_ctl_get(
}
chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]);
- for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+ for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
ucontrol->value.integer.value[i] =
chmixer_pspd->out_ch_map[i];
return 0;
@@ -908,7 +908,7 @@ static int msm_pcm_channel_mixer_input_map_ctl_put(
}
chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]);
- for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+ for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
chmixer_pspd->in_ch_map[i] = ucontrol->value.integer.value[i];
return 0;
@@ -938,7 +938,7 @@ static int msm_pcm_channel_mixer_input_map_ctl_get(
}
chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]);
- for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+ for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
ucontrol->value.integer.value[i] =
chmixer_pspd->in_ch_map[i];
return 0;
@@ -969,13 +969,13 @@ static int msm_pcm_channel_mixer_weight_ctl_put(
}
chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]);
- if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL) {
+ if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL_V2) {
pr_err("%s: invalid channel number %d\n", __func__, channel);
return -EINVAL;
}
channel--;
- for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+ for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
chmixer_pspd->channel_weight[channel][i] =
ucontrol->value.integer.value[i];
return 0;
@@ -1005,14 +1005,14 @@ static int msm_pcm_channel_mixer_weight_ctl_get(
return -EINVAL;
}
- if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL) {
+ if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL_V2) {
pr_err("%s: invalid channel number %d\n", __func__, channel);
return -EINVAL;
}
channel--;
chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]);
- for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+ for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
ucontrol->value.integer.value[i] =
chmixer_pspd->channel_weight[channel][i];
return 0;
@@ -1462,7 +1462,7 @@ static int msm_pcm_add_channel_mixer_controls(struct snd_soc_pcm_runtime *rtd)
pr_err("%s: pcm add channel mixer output map controls failed:%d\n",
__func__, ret);
- for (i = 1; i <= PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+ for (i = 1; i <= PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
ret |= msm_pcm_add_channel_mixer_weight_controls(rtd, i);
if (ret)
pr_err("%s: pcm add channel mixer weight controls failed:%d\n",
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index a0364bbdfeb9..280c665dded4 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -1165,8 +1165,8 @@ static int msm_pcm_adsp_stream_cmd_put(struct snd_kcontrol *kcontrol,
goto done;
}
- if ((sizeof(struct msm_adsp_event_data) + event_data->payload_len) >=
- sizeof(ucontrol->value.bytes.data)) {
+ if (event_data->payload_len > sizeof(ucontrol->value.bytes.data)
+ - sizeof(struct msm_adsp_event_data)) {
pr_err("%s param length=%d exceeds limit",
__func__, event_data->payload_len);
ret = -EINVAL;
@@ -2263,7 +2263,7 @@ static int msm_pcm_channel_mixer_output_map_ctl_put(
}
chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]);
- for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+ for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
chmixer_pspd->out_ch_map[i] =
ucontrol->value.integer.value[i];
@@ -2294,7 +2294,7 @@ static int msm_pcm_channel_mixer_output_map_ctl_get(
}
chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]);
- for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+ for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
ucontrol->value.integer.value[i] =
chmixer_pspd->out_ch_map[i];
return 0;
@@ -2324,7 +2324,7 @@ static int msm_pcm_channel_mixer_input_map_ctl_put(
}
chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]);
- for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+ for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
chmixer_pspd->in_ch_map[i] = ucontrol->value.integer.value[i];
return 0;
@@ -2354,7 +2354,7 @@ static int msm_pcm_channel_mixer_input_map_ctl_get(
}
chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]);
- for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+ for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
ucontrol->value.integer.value[i] =
chmixer_pspd->in_ch_map[i];
return 0;
@@ -2385,13 +2385,13 @@ static int msm_pcm_channel_mixer_weight_ctl_put(
}
chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]);
- if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL) {
+ if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL_V2) {
pr_err("%s: invalid channel number %d\n", __func__, channel);
return -EINVAL;
}
channel--;
- for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+ for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
chmixer_pspd->channel_weight[channel][i] =
ucontrol->value.integer.value[i];
return 0;
@@ -2421,14 +2421,14 @@ static int msm_pcm_channel_mixer_weight_ctl_get(
return -EINVAL;
}
- if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL) {
+ if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL_V2) {
pr_err("%s: invalid channel number %d\n", __func__, channel);
return -EINVAL;
}
channel--;
chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]);
- for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+ for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
ucontrol->value.integer.value[i] =
chmixer_pspd->channel_weight[channel][i];
return 0;
@@ -2878,7 +2878,7 @@ static int msm_pcm_add_channel_mixer_controls(struct snd_soc_pcm_runtime *rtd)
pr_err("%s: pcm add channel mixer output map controls failed:%d\n",
__func__, ret);
- for (i = 1; i <= PCM_FORMAT_MAX_NUM_CHANNEL; i++)
+ for (i = 1; i <= PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++)
ret |= msm_pcm_add_channel_mixer_weight_controls(rtd, i);
if (ret)
pr_err("%s: pcm add channel mixer weight controls failed:%d\n",
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 2b712bee0bc1..4e4970b7be33 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -5844,6 +5844,9 @@ static const struct snd_kcontrol_new pri_tdm_rx_0_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_0,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_0,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -5901,6 +5904,9 @@ static const struct snd_kcontrol_new pri_tdm_rx_1_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_1,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_1,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -5958,6 +5964,9 @@ static const struct snd_kcontrol_new pri_tdm_rx_2_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_2,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_2,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -6015,6 +6024,9 @@ static const struct snd_kcontrol_new pri_tdm_rx_3_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_3,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_3,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -6123,6 +6135,9 @@ static const struct snd_kcontrol_new sec_tdm_rx_0_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_0,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_0,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -6180,6 +6195,9 @@ static const struct snd_kcontrol_new sec_tdm_rx_1_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_1,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_1,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -6237,6 +6255,9 @@ static const struct snd_kcontrol_new sec_tdm_rx_2_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_2,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_2,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -6294,6 +6315,9 @@ static const struct snd_kcontrol_new sec_tdm_rx_3_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_3,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_3,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -6402,6 +6426,9 @@ static const struct snd_kcontrol_new tert_tdm_rx_0_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_0,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_0,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -6510,6 +6537,9 @@ static const struct snd_kcontrol_new tert_tdm_rx_1_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_1,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_1,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -6567,6 +6597,9 @@ static const struct snd_kcontrol_new tert_tdm_rx_2_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_2,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_2,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -6624,6 +6657,9 @@ static const struct snd_kcontrol_new tert_tdm_rx_3_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_3,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_3,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -6681,6 +6717,9 @@ static const struct snd_kcontrol_new tert_tdm_rx_4_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_4,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_4,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -6741,6 +6780,9 @@ static const struct snd_kcontrol_new quat_tdm_rx_0_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -6852,6 +6894,9 @@ static const struct snd_kcontrol_new quat_tdm_rx_1_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -6912,6 +6957,9 @@ static const struct snd_kcontrol_new quat_tdm_rx_2_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -6972,6 +7020,9 @@ static const struct snd_kcontrol_new quat_tdm_rx_3_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -8865,6 +8916,9 @@ static const struct snd_kcontrol_new quat_tdm_rx_2_voice_mixer_controls[] = {
SOC_SINGLE_EXT("VoiceMMode1", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
MSM_FRONTEND_DAI_VOICEMMODE1, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+ MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new stub_rx_mixer_controls[] = {
@@ -13780,6 +13834,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0 Audio Mixer"},
@@ -13800,6 +13855,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"PRI_TDM_RX_1 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"PRI_TDM_RX_1", NULL, "PRI_TDM_RX_1 Audio Mixer"},
@@ -13820,6 +13876,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"PRI_TDM_RX_2 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"PRI_TDM_RX_2", NULL, "PRI_TDM_RX_2 Audio Mixer"},
@@ -13840,6 +13897,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"PRI_TDM_RX_3 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"PRI_TDM_RX_3", NULL, "PRI_TDM_RX_3 Audio Mixer"},
@@ -13878,6 +13936,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0 Audio Mixer"},
@@ -13898,6 +13957,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"SEC_TDM_RX_1 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SEC_TDM_RX_1", NULL, "SEC_TDM_RX_1 Audio Mixer"},
@@ -13918,6 +13978,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"SEC_TDM_RX_2 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SEC_TDM_RX_2", NULL, "SEC_TDM_RX_2 Audio Mixer"},
@@ -13938,6 +13999,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"SEC_TDM_RX_3 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SEC_TDM_RX_3", NULL, "SEC_TDM_RX_3 Audio Mixer"},
@@ -13976,6 +14038,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"TERT_TDM_RX_0 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"TERT_TDM_RX_0", NULL, "TERT_TDM_RX_0 Audio Mixer"},
@@ -14014,6 +14077,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"TERT_TDM_RX_1 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"TERT_TDM_RX_1", NULL, "TERT_TDM_RX_1 Audio Mixer"},
@@ -14034,6 +14098,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"TERT_TDM_RX_2 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"TERT_TDM_RX_2", NULL, "TERT_TDM_RX_2 Audio Mixer"},
@@ -14054,6 +14119,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"TERT_TDM_RX_3 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"TERT_TDM_RX_3", NULL, "TERT_TDM_RX_3 Audio Mixer"},
@@ -14074,6 +14140,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"TERT_TDM_RX_4", NULL, "TERT_TDM_RX_4 Audio Mixer"},
@@ -14095,47 +14162,10 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia20", "MM_DL20"},
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"QUAT_TDM_RX_0", NULL, "QUAT_TDM_RX_0 Audio Mixer"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
- {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
- {"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0 Audio Mixer"},
-
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
- {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
- {"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0 Audio Mixer"},
-
{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -14172,6 +14202,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia20", "MM_DL20"},
{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"QUAT_TDM_RX_1", NULL, "QUAT_TDM_RX_1 Audio Mixer"},
@@ -14193,6 +14224,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia20", "MM_DL20"},
{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"QUAT_TDM_RX_2", NULL, "QUAT_TDM_RX_2 Audio Mixer"},
@@ -14214,6 +14246,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia20", "MM_DL20"},
{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia22", "MM_DL22"},
{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"QUAT_TDM_RX_3", NULL, "QUAT_TDM_RX_3 Audio Mixer"},
@@ -14924,6 +14957,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUIN_MI2S_RX", NULL, "QUIN_MI2S_RX_Voice Mixer"},
{"QUAT_TDM_RX_2_Voice Mixer", "VoiceMMode1", "VOICEMMODE1_DL"},
+ {"QUAT_TDM_RX_2_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"QUAT_TDM_RX_2", NULL, "QUAT_TDM_RX_2_Voice Mixer"},
{"VOC_EXT_EC MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
diff --git a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
index 1ddb3845cd40..e890e6a71fb3 100644
--- a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
@@ -982,8 +982,9 @@ int msm_adsp_inform_mixer_ctl(struct snd_soc_pcm_runtime *rtd,
event_data = (struct msm_adsp_event_data *)payload;
kctl->info(kctl, &kctl_info);
- if (sizeof(struct msm_adsp_event_data)
- + event_data->payload_len > kctl_info.count) {
+
+ if (event_data->payload_len >
+ kctl_info.count - sizeof(struct msm_adsp_event_data)) {
pr_err("%s: payload length exceeds limit of %u bytes.\n",
__func__, kctl_info.count);
ret = -EINVAL;
diff --git a/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c
index 72dd751bb0d8..3b53614aba31 100644
--- a/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c
@@ -692,9 +692,8 @@ static int msm_transcode_stream_cmd_put(struct snd_kcontrol *kcontrol,
goto done;
}
-
- if ((sizeof(struct msm_adsp_event_data) + event_data->payload_len) >=
- sizeof(ucontrol->value.bytes.data)) {
+ if (event_data->payload_len > sizeof(ucontrol->value.bytes.data)
+ - sizeof(struct msm_adsp_event_data)) {
pr_err("%s param length=%d exceeds limit",
__func__, event_data->payload_len);
ret = -EINVAL;
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 4b76f1b8af1d..24d4199ac30b 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -454,7 +454,7 @@ int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id,
int channel_index)
{
struct adm_cmd_set_pspd_mtmx_strtr_params_v5 *adm_params = NULL;
- struct param_hdr_v3 data_v5 = {0,};
+ struct param_hdr_v1 data_v5 = {0,};
int ret = 0, port_idx, sz = 0, param_size = 0;
u16 *adm_pspd_params;
u16 *ptr;
@@ -511,7 +511,7 @@ int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id,
data_v5.param_size = param_size;
adm_params->payload_size =
sizeof(struct default_chmixer_param_id_coeff) +
- sizeof(struct param_hdr_v3) + data_v5.param_size;
+ sizeof(struct param_hdr_v1) + data_v5.param_size;
adm_pspd_params = (u16 *)((u8 *)adm_params +
sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5));
memcpy(adm_pspd_params, &data_v5, sizeof(data_v5));
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 352ea9257832..38dc3639a682 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -124,6 +124,10 @@ struct afe_ctl {
int set_custom_topology;
int dev_acdb_id[AFE_MAX_PORTS];
routing_cb rt_cb;
+ int num_alloced_rddma;
+ bool alloced_rddma[AFE_MAX_RDDMA];
+ int num_alloced_wrdma;
+ bool alloced_wrdma[AFE_MAX_WRDMA];
};
static atomic_t afe_ports_mad_type[SLIMBUS_PORT_LAST - SLIMBUS_0_RX];
@@ -178,6 +182,42 @@ done:
return ret;
}
+static atomic_t tdm_gp_en_ref[IDX_GROUP_TDM_MAX];
+
+static int afe_get_tdm_group_idx(u16 group_id)
+{
+ int gp_idx = -1;
+
+ switch (group_id) {
+ case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_RX:
+ gp_idx = IDX_GROUP_PRIMARY_TDM_RX;
+ break;
+ case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_TX:
+ gp_idx = IDX_GROUP_PRIMARY_TDM_TX;
+ break;
+ case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_RX:
+ gp_idx = IDX_GROUP_SECONDARY_TDM_RX;
+ break;
+ case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_TX:
+ gp_idx = IDX_GROUP_SECONDARY_TDM_TX;
+ break;
+ case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_RX:
+ gp_idx = IDX_GROUP_TERTIARY_TDM_RX;
+ break;
+ case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_TX:
+ gp_idx = IDX_GROUP_TERTIARY_TDM_TX;
+ break;
+ case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_RX:
+ gp_idx = IDX_GROUP_QUATERNARY_TDM_RX;
+ break;
+ case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_TX:
+ gp_idx = IDX_GROUP_QUATERNARY_TDM_TX;
+ break;
+ }
+
+ return gp_idx;
+}
+
int afe_get_topology(int port_id)
{
int topology;
@@ -349,6 +389,99 @@ static int32_t sp_make_afe_callback(uint32_t opcode, uint32_t *payload,
return 0;
}
+static int32_t afe_lpass_resources_callback(struct apr_client_data *data)
+{
+ uint8_t *payload = data->payload;
+ struct afe_cmdrsp_request_lpass_resources *resources =
+ (struct afe_cmdrsp_request_lpass_resources *) payload;
+ struct afe_cmdrsp_request_lpass_dma_resources *dma_resources = NULL;
+ uint8_t *dma_channels_id_payload = NULL;
+
+ if (!payload || (data->token >= AFE_MAX_PORTS)) {
+ pr_err("%s: Error: size %d payload %pK token %d\n",
+ __func__, data->payload_size,
+ payload, data->token);
+ atomic_set(&this_afe.status, ADSP_EBADPARAM);
+ return -EINVAL;
+ }
+
+ if (resources->status != 0) {
+ pr_debug("%s: Error: Requesting LPASS resources ret %d\n",
+ __func__, resources->status);
+ atomic_set(&this_afe.status, ADSP_EBADPARAM);
+ return -EINVAL;
+ }
+
+ if (resources->resource_id == AFE_LPAIF_DMA_RESOURCE_ID) {
+ int i;
+
+ payload += sizeof(
+ struct afe_cmdrsp_request_lpass_resources);
+ dma_resources = (struct
+ afe_cmdrsp_request_lpass_dma_resources *)
+ payload;
+
+ pr_debug("%s: DMA Type allocated = %d\n",
+ __func__,
+ dma_resources->dma_type);
+
+ if (dma_resources->num_read_dma_channels > AFE_MAX_RDDMA) {
+ pr_err("%s: Allocated Read DMA %d exceeds max %d\n",
+ __func__,
+ dma_resources->num_read_dma_channels,
+ AFE_MAX_RDDMA);
+ dma_resources->num_read_dma_channels = AFE_MAX_RDDMA;
+ }
+
+ if (dma_resources->num_write_dma_channels > AFE_MAX_WRDMA) {
+ pr_err("%s: Allocated Write DMA %d exceeds max %d\n",
+ __func__,
+ dma_resources->num_write_dma_channels,
+ AFE_MAX_WRDMA);
+ dma_resources->num_write_dma_channels = AFE_MAX_WRDMA;
+ }
+
+ this_afe.num_alloced_rddma =
+ dma_resources->num_read_dma_channels;
+ this_afe.num_alloced_wrdma =
+ dma_resources->num_write_dma_channels;
+
+ pr_debug("%s: Number of allocated Read DMA channels= %d\n",
+ __func__,
+ dma_resources->num_read_dma_channels);
+ pr_debug("%s: Number of allocated Write DMA channels= %d\n",
+ __func__,
+ dma_resources->num_write_dma_channels);
+
+ payload += sizeof(
+ struct afe_cmdrsp_request_lpass_dma_resources);
+ dma_channels_id_payload = payload;
+
+ for (i = 0; i < this_afe.num_alloced_rddma; i++) {
+ pr_debug("%s: Read DMA Index %d allocated\n",
+ __func__, *dma_channels_id_payload);
+ this_afe.alloced_rddma
+ [*dma_channels_id_payload] = 1;
+ dma_channels_id_payload++;
+ }
+
+ for (i = 0; i < this_afe.num_alloced_wrdma; i++) {
+ pr_debug("%s: Write DMA Index %d allocated\n",
+ __func__, *dma_channels_id_payload);
+ this_afe.alloced_wrdma
+ [*dma_channels_id_payload] = 1;
+ dma_channels_id_payload++;
+ }
+ } else {
+ pr_err("%s: Error: Unknown resource ID %d",
+ __func__, resources->resource_id);
+ atomic_set(&this_afe.status, ADSP_EBADPARAM);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int32_t afe_callback(struct apr_client_data *data, void *priv)
{
if (!data) {
@@ -356,6 +489,7 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv)
return -EINVAL;
}
if (data->opcode == RESET_EVENTS) {
+ int i = 0;
pr_debug("%s: reset event = %d %d apr[%pK]\n",
__func__,
data->reset_event, data->reset_proc, this_afe.apr);
@@ -398,6 +532,12 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv)
this_afe.rx_cb = NULL;
}
+ /*
+ * reset TDM group enable ref cnt
+ */
+ for (i = 0; i < IDX_GROUP_TDM_MAX; i++)
+ atomic_set(&tdm_gp_en_ref[i], 0);
+
return 0;
}
afe_callback_debug_print(data);
@@ -429,6 +569,15 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv)
return -EINVAL;
}
wake_up(&this_afe.wait[data->token]);
+ } else if (data->opcode == AFE_CMDRSP_REQUEST_LPASS_RESOURCES) {
+ uint32_t ret = 0;
+
+ ret = afe_lpass_resources_callback(data);
+ atomic_set(&this_afe.state, 0);
+ wake_up(&this_afe.wait[data->token]);
+ if (!ret) {
+ return ret;
+ }
} else if (data->payload_size) {
uint32_t *payload;
uint16_t port_id = 0;
@@ -459,6 +608,7 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv)
case AFE_PORTS_CMD_DTMF_CTL:
case AFE_SVC_CMD_SET_PARAM:
case AFE_SVC_CMD_SET_PARAM_V2:
+ case AFE_CMD_REQUEST_LPASS_RESOURCES:
atomic_set(&this_afe.state, 0);
wake_up(&this_afe.wait[data->token]);
break;
@@ -498,6 +648,18 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv)
atomic_set(&this_afe.state, payload[1]);
wake_up(&this_afe.wait[data->token]);
break;
+ case AFE_CMD_RELEASE_LPASS_RESOURCES:
+ memset(&this_afe.alloced_rddma[0],
+ 0,
+ AFE_MAX_RDDMA);
+ memset(&this_afe.alloced_wrdma[0],
+ 0,
+ AFE_MAX_WRDMA);
+ this_afe.num_alloced_rddma = 0;
+ this_afe.num_alloced_wrdma = 0;
+ atomic_set(&this_afe.state, 0);
+ wake_up(&this_afe.wait[data->token]);
+ break;
default:
pr_err("%s: Unknown cmd 0x%x\n", __func__,
payload[0]);
@@ -3864,11 +4026,36 @@ int afe_port_group_enable(u16 group_id,
{
struct afe_group_device_enable group_enable = {0};
struct param_hdr_v3 param_hdr = {0};
- int ret;
+ int ret = 0;
+ int gp_idx;
pr_debug("%s: group id: 0x%x enable: %d\n", __func__,
group_id, enable);
+ gp_idx = afe_get_tdm_group_idx(group_id);
+
+ if ((gp_idx >= 0) && (gp_idx < IDX_GROUP_TDM_MAX)) {
+
+ atomic_t *gp_ref = &tdm_gp_en_ref[gp_idx];
+
+ if (enable)
+ atomic_inc(gp_ref);
+ else
+ atomic_dec(gp_ref);
+
+ if ((enable) && (atomic_read(gp_ref) > 1)) {
+ pr_err("%s: this TDM group is enabled already %d refs_cnt %d\n",
+ __func__, group_id, atomic_read(gp_ref));
+ goto rtn;
+ }
+
+ if ((!enable) && (atomic_read(gp_ref) > 0)) {
+ pr_err("%s: this TDM group will be disabled in last call %d refs_cnt %d\n",
+ __func__, group_id, atomic_read(gp_ref));
+ goto rtn;
+ }
+ }
+
ret = afe_q6_interface_prepare();
if (ret != 0) {
pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
@@ -3896,6 +4083,8 @@ int afe_port_group_enable(u16 group_id,
pr_err("%s: AFE_PARAM_ID_GROUP_DEVICE_ENABLE failed %d\n",
__func__, ret);
+rtn:
+
return ret;
}
@@ -6528,6 +6717,173 @@ done:
return result;
}
+int afe_request_dma_resources(uint8_t dma_type, uint8_t num_read_dma_channels,
+ uint8_t num_write_dma_channels)
+{
+ int result = 0;
+ struct afe_request_lpass_dma_resources_command config;
+
+ pr_debug("%s:\n", __func__);
+
+ if (dma_type != AFE_LPAIF_DEFAULT_DMA_TYPE) {
+ pr_err("%s: DMA type %d is invalid\n",
+ __func__,
+ dma_type);
+ goto done;
+ }
+
+ if ((num_read_dma_channels == 0) &&
+ (num_write_dma_channels == 0)) {
+ pr_err("%s: DMA channels to allocate are 0\n",
+ __func__);
+ goto done;
+ }
+
+ if (num_read_dma_channels > AFE_MAX_RDDMA) {
+ pr_err("%s: Read DMA channels %d to allocate are > %d\n",
+ __func__,
+ num_read_dma_channels,
+ AFE_MAX_RDDMA);
+ goto done;
+ }
+
+ if (num_write_dma_channels > AFE_MAX_WRDMA) {
+ pr_err("%s: Write DMA channels %d to allocate are > %d\n",
+ __func__,
+ num_write_dma_channels,
+ AFE_MAX_WRDMA);
+ goto done;
+ }
+
+ result = afe_q6_interface_prepare();
+ if (result != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n",
+ __func__, result);
+ goto done;
+ }
+
+ memset(&config, 0, sizeof(config));
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ config.hdr.pkt_size = sizeof(config);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = IDX_GLOBAL_CFG;
+ config.hdr.opcode = AFE_CMD_REQUEST_LPASS_RESOURCES;
+ config.resources.resource_id = AFE_LPAIF_DMA_RESOURCE_ID;
+ /* Only AFE_LPAIF_DEFAULT_DMA_TYPE dma type is supported */
+ config.dma_resources.dma_type = dma_type;
+ config.dma_resources.num_read_dma_channels = num_read_dma_channels;
+ config.dma_resources.num_write_dma_channels = num_write_dma_channels;
+
+ result = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
+ if (result)
+ pr_err("%s: AFE_CMD_REQUEST_LPASS_RESOURCES failed %d\n",
+ __func__, result);
+
+done:
+ return result;
+}
+EXPORT_SYMBOL(afe_request_dma_resources);
+
+int afe_get_dma_idx(bool **ret_rddma_idx,
+ bool **ret_wrdma_idx)
+{
+ int ret = 0;
+
+ if (!ret_rddma_idx || !ret_wrdma_idx) {
+ pr_err("%s: invalid return pointers.", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ *ret_rddma_idx = &this_afe.alloced_rddma[0];
+ *ret_wrdma_idx = &this_afe.alloced_wrdma[0];
+
+done:
+ return ret;
+}
+EXPORT_SYMBOL(afe_get_dma_idx);
+
+int afe_release_all_dma_resources(void)
+{
+ int result = 0;
+ int i, total_size;
+ struct afe_release_lpass_dma_resources_command *config;
+ uint8_t *payload;
+
+ pr_debug("%s:\n", __func__);
+
+ if ((this_afe.num_alloced_rddma == 0) &&
+ (this_afe.num_alloced_wrdma == 0)) {
+ pr_err("%s: DMA channels to release is 0",
+ __func__);
+ goto done;
+ }
+
+ result = afe_q6_interface_prepare();
+ if (result != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n",
+ __func__, result);
+ goto done;
+ }
+
+ total_size = sizeof(struct afe_release_lpass_dma_resources_command) +
+ sizeof(uint8_t) *
+ (this_afe.num_alloced_rddma + this_afe.num_alloced_wrdma);
+
+ config = kzalloc(total_size, GFP_KERNEL);
+ if (!config) {
+ result = -ENOMEM;
+ goto done;
+ }
+
+ memset(config, 0, total_size);
+ payload = (uint8_t *) config +
+ sizeof(struct afe_release_lpass_dma_resources_command);
+
+ config->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ config->hdr.pkt_size = total_size;
+ config->hdr.src_port = 0;
+ config->hdr.dest_port = 0;
+ config->hdr.token = IDX_GLOBAL_CFG;
+ config->hdr.opcode = AFE_CMD_RELEASE_LPASS_RESOURCES;
+ config->resources.resource_id = AFE_LPAIF_DMA_RESOURCE_ID;
+ /* Only AFE_LPAIF_DEFAULT_DMA_TYPE dma type is supported */
+ config->dma_resources.dma_type = AFE_LPAIF_DEFAULT_DMA_TYPE;
+ config->dma_resources.num_read_dma_channels =
+ this_afe.num_alloced_rddma;
+ config->dma_resources.num_write_dma_channels =
+ this_afe.num_alloced_wrdma;
+
+ for (i = 0; i < AFE_MAX_RDDMA; i++) {
+ if (this_afe.alloced_rddma[i]) {
+ *payload = i;
+ payload++;
+ }
+ }
+
+ for (i = 0; i < AFE_MAX_WRDMA; i++) {
+ if (this_afe.alloced_wrdma[i]) {
+ *payload = i;
+ payload++;
+ }
+ }
+
+ result = afe_apr_send_pkt(config, &this_afe.wait[IDX_GLOBAL_CFG]);
+ if (result)
+ pr_err("%s: AFE_CMD_RELEASE_LPASS_RESOURCES failed %d\n",
+ __func__, result);
+
+ kfree(config);
+done:
+ return result;
+}
+EXPORT_SYMBOL(afe_release_all_dma_resources);
+
static int __init afe_init(void)
{
int i = 0, ret;
@@ -6556,6 +6912,10 @@ static int __init afe_init(void)
pr_err("%s: could not init cal data! %d\n", __func__, ret);
config_debug_fs_init();
+
+ for (i = 0; i < IDX_GROUP_TDM_MAX; i++)
+ atomic_set(&tdm_gp_en_ref[i], 0);
+
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/q6audio-v2.c b/sound/soc/msm/qdsp6v2/q6audio-v2.c
index 3b745c24f90e..0062e4cd6432 100644
--- a/sound/soc/msm/qdsp6v2/q6audio-v2.c
+++ b/sound/soc/msm/qdsp6v2/q6audio-v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -794,6 +794,7 @@ int q6audio_validate_port(u16 port_id)
case AFE_PORT_ID_INT5_MI2S_TX:
case AFE_PORT_ID_INT6_MI2S_RX:
case AFE_PORT_ID_INT6_MI2S_TX:
+ case AFE_PORT_ID_MULTICHAN_HDMI_RX:
{
ret = 0;
break;
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
index ec018c24591f..d736af0c35b0 100644
--- a/sound/soc/msm/qdsp6v2/q6core.c
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -636,10 +636,10 @@ uint32_t core_set_dolby_manufacturer_id(int manufacturer_id)
return rc;
}
-bool q6core_is_adsp_ready(void)
+int q6core_is_adsp_ready(void)
{
int rc = 0;
- bool ret = false;
+ int ret = false;
struct apr_hdr hdr;
pr_debug("%s: enter\n", __func__);
@@ -666,7 +666,7 @@ bool q6core_is_adsp_ready(void)
if (rc > 0 && q6core_lcl.bus_bw_resp_received) {
/* ensure to read updated param by callback thread */
rmb();
- ret = !!q6core_lcl.param;
+ ret = q6core_lcl.param;
}
}
bail:
diff --git a/sound/soc/msm/sdm660-external.c b/sound/soc/msm/sdm660-external.c
index 47c988618398..362ed2918658 100644
--- a/sound/soc/msm/sdm660-external.c
+++ b/sound/soc/msm/sdm660-external.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -140,6 +140,10 @@ static char const *slim_sample_rate_text[] = {"KHZ_8", "KHZ_16",
"KHZ_192", "KHZ_352P8", "KHZ_384"};
static const char *const spk_function_text[] = {"Off", "On"};
static char const *bt_sample_rate_text[] = {"KHZ_8", "KHZ_16", "KHZ_48"};
+static char const *bt_sample_rate_rx_text[] = {"KHZ_8", "KHZ_16", "KHZ_48"};
+static char const *bt_sample_rate_tx_text[] = {"KHZ_8", "KHZ_16", "KHZ_48"};
+
+
static SOC_ENUM_SINGLE_EXT_DECL(spk_func_en, spk_function_text);
static SOC_ENUM_SINGLE_EXT_DECL(slim_0_rx_chs, slim_rx_ch_text);
@@ -159,6 +163,9 @@ static SOC_ENUM_SINGLE_EXT_DECL(slim_0_tx_sample_rate, slim_sample_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(slim_5_rx_sample_rate, slim_sample_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(slim_6_rx_sample_rate, slim_sample_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(bt_sample_rate, bt_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(bt_sample_rate_rx, bt_sample_rate_rx_text);
+static SOC_ENUM_SINGLE_EXT_DECL(bt_sample_rate_tx, bt_sample_rate_tx_text);
+
static int slim_get_sample_rate_val(int sample_rate)
{
@@ -374,6 +381,95 @@ static int msm_bt_sample_rate_put(struct snd_kcontrol *kcontrol,
return 0;
}
+static int msm_bt_sample_rate_rx_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (slim_rx_cfg[SLIM_RX_7].sample_rate) {
+ case SAMPLING_RATE_48KHZ:
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case SAMPLING_RATE_16KHZ:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SAMPLING_RATE_8KHZ:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: sample rate = %d", __func__,
+ slim_rx_cfg[SLIM_RX_7].sample_rate);
+
+ return 0;
+}
+
+static int msm_bt_sample_rate_rx_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ slim_rx_cfg[SLIM_RX_7].sample_rate = SAMPLING_RATE_16KHZ;
+ break;
+ case 2:
+ slim_rx_cfg[SLIM_RX_7].sample_rate = SAMPLING_RATE_48KHZ;
+ break;
+ case 0:
+ default:
+ slim_rx_cfg[SLIM_RX_7].sample_rate = SAMPLING_RATE_8KHZ;
+ break;
+ }
+ pr_debug("%s: sample rates: slim7_rx = %d, value = %d\n",
+ __func__,
+ slim_rx_cfg[SLIM_RX_7].sample_rate,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_bt_sample_rate_tx_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (slim_tx_cfg[SLIM_TX_7].sample_rate) {
+ case SAMPLING_RATE_48KHZ:
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case SAMPLING_RATE_16KHZ:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SAMPLING_RATE_8KHZ:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: sample rate = %d", __func__,
+ slim_tx_cfg[SLIM_TX_7].sample_rate);
+
+ return 0;
+}
+
+static int msm_bt_sample_rate_tx_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ slim_tx_cfg[SLIM_TX_7].sample_rate = SAMPLING_RATE_16KHZ;
+ break;
+ case 2:
+ slim_tx_cfg[SLIM_TX_7].sample_rate = SAMPLING_RATE_48KHZ;
+ break;
+ case 0:
+ default:
+ slim_tx_cfg[SLIM_TX_7].sample_rate = SAMPLING_RATE_8KHZ;
+ break;
+ }
+ pr_debug("%s: sample rates: slim7_tx = %d, value = %d\n",
+ __func__,
+ slim_tx_cfg[SLIM_TX_7].sample_rate,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+
static int slim_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -745,6 +841,12 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
SOC_ENUM_EXT("BT SampleRate", bt_sample_rate,
msm_bt_sample_rate_get,
msm_bt_sample_rate_put),
+ SOC_ENUM_EXT("BT SampleRate RX", bt_sample_rate_rx,
+ msm_bt_sample_rate_rx_get,
+ msm_bt_sample_rate_rx_put),
+ SOC_ENUM_EXT("BT SampleRate TX", bt_sample_rate_tx,
+ msm_bt_sample_rate_tx_get,
+ msm_bt_sample_rate_tx_put),
};
static int msm_slim_get_ch_from_beid(int32_t be_id)
diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c
index 3d86abd6964f..259763449456 100644
--- a/sound/soc/msm/sdm660-internal.c
+++ b/sound/soc/msm/sdm660-internal.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -44,7 +44,8 @@ enum {
};
enum {
- BT_SLIM7,
+ BT_SLIM7_RX,
+ BT_SLIM7_TX,
FM_SLIM8,
SLIM_MAX,
};
@@ -138,7 +139,8 @@ static struct dev_config int_mi2s_cfg[] = {
};
static struct dev_config bt_fm_cfg[] = {
- [BT_SLIM7] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+ [BT_SLIM7_RX] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
+ [BT_SLIM7_TX] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
[FM_SLIM8] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
};
@@ -151,6 +153,8 @@ static const char *const int_mi2s_tx_ch_text[] = {"One", "Two",
static char const *bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE"};
static const char *const loopback_mclk_text[] = {"DISABLE", "ENABLE"};
static char const *bt_sample_rate_text[] = {"KHZ_8", "KHZ_16", "KHZ_48"};
+static char const *bt_sample_rate_rx_text[] = {"KHZ_8", "KHZ_16", "KHZ_48"};
+static char const *bt_sample_rate_tx_text[] = {"KHZ_8", "KHZ_16", "KHZ_48"};
static SOC_ENUM_SINGLE_EXT_DECL(int0_mi2s_rx_sample_rate, int_mi2s_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(int0_mi2s_rx_chs, int_mi2s_ch_text);
@@ -167,6 +171,8 @@ static SOC_ENUM_SINGLE_EXT_DECL(int4_mi2s_rx_format, bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(int5_mi2s_tx_chs, int_mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(loopback_mclk_en, loopback_mclk_text);
static SOC_ENUM_SINGLE_EXT_DECL(bt_sample_rate, bt_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(bt_sample_rate_rx, bt_sample_rate_rx_text);
+static SOC_ENUM_SINGLE_EXT_DECL(bt_sample_rate_tx, bt_sample_rate_tx_text);
static int msm_dmic_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
@@ -624,12 +630,18 @@ static int msm_btfm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
switch (dai_link->be_id) {
case MSM_BACKEND_DAI_SLIMBUS_7_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ bt_fm_cfg[BT_SLIM7_RX].bit_format);
+ rate->min = rate->max = bt_fm_cfg[BT_SLIM7_RX].sample_rate;
+ channels->min = channels->max =
+ bt_fm_cfg[BT_SLIM7_RX].channels;
+ break;
case MSM_BACKEND_DAI_SLIMBUS_7_TX:
param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
- bt_fm_cfg[BT_SLIM7].bit_format);
- rate->min = rate->max = bt_fm_cfg[BT_SLIM7].sample_rate;
+ bt_fm_cfg[BT_SLIM7_TX].bit_format);
+ rate->min = rate->max = bt_fm_cfg[BT_SLIM7_TX].sample_rate;
channels->min = channels->max =
- bt_fm_cfg[BT_SLIM7].channels;
+ bt_fm_cfg[BT_SLIM7_TX].channels;
break;
case MSM_BACKEND_DAI_SLIMBUS_8_TX:
@@ -834,7 +846,7 @@ static int msm_bt_sample_rate_get(struct snd_kcontrol *kcontrol,
* when used for BT_SCO use case. Return either Rx or Tx sample rate
* value.
*/
- switch (bt_fm_cfg[BT_SLIM7].sample_rate) {
+ switch (bt_fm_cfg[BT_SLIM7_RX].sample_rate) {
case SAMPLING_RATE_48KHZ:
ucontrol->value.integer.value[0] = 2;
break;
@@ -847,7 +859,7 @@ static int msm_bt_sample_rate_get(struct snd_kcontrol *kcontrol,
break;
}
pr_debug("%s: sample rate = %d", __func__,
- bt_fm_cfg[BT_SLIM7].sample_rate);
+ bt_fm_cfg[BT_SLIM7_RX].sample_rate);
return 0;
}
@@ -857,24 +869,115 @@ static int msm_bt_sample_rate_put(struct snd_kcontrol *kcontrol,
{
switch (ucontrol->value.integer.value[0]) {
case 1:
- bt_fm_cfg[BT_SLIM7].sample_rate = SAMPLING_RATE_16KHZ;
+ bt_fm_cfg[BT_SLIM7_RX].sample_rate = SAMPLING_RATE_16KHZ;
+ bt_fm_cfg[BT_SLIM7_TX].sample_rate = SAMPLING_RATE_16KHZ;
break;
case 2:
- bt_fm_cfg[BT_SLIM7].sample_rate = SAMPLING_RATE_48KHZ;
+ bt_fm_cfg[BT_SLIM7_RX].sample_rate = SAMPLING_RATE_48KHZ;
+ bt_fm_cfg[BT_SLIM7_TX].sample_rate = SAMPLING_RATE_48KHZ;
break;
case 0:
default:
- bt_fm_cfg[BT_SLIM7].sample_rate = SAMPLING_RATE_8KHZ;
+ bt_fm_cfg[BT_SLIM7_RX].sample_rate = SAMPLING_RATE_8KHZ;
+ bt_fm_cfg[BT_SLIM7_TX].sample_rate = SAMPLING_RATE_8KHZ;
break;
}
pr_debug("%s: sample rates: slim7_rx = %d, value = %d\n",
__func__,
- bt_fm_cfg[BT_SLIM7].sample_rate,
+ bt_fm_cfg[BT_SLIM7_RX].sample_rate,
ucontrol->value.enumerated.item[0]);
return 0;
}
+static int msm_bt_sample_rate_rx_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (bt_fm_cfg[BT_SLIM7_RX].sample_rate) {
+ case SAMPLING_RATE_48KHZ:
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case SAMPLING_RATE_16KHZ:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SAMPLING_RATE_8KHZ:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: sample rate = %d", __func__,
+ bt_fm_cfg[BT_SLIM7_RX].sample_rate);
+
+ return 0;
+}
+
+static int msm_bt_sample_rate_rx_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ bt_fm_cfg[BT_SLIM7_RX].sample_rate = SAMPLING_RATE_16KHZ;
+ break;
+ case 2:
+ bt_fm_cfg[BT_SLIM7_RX].sample_rate = SAMPLING_RATE_48KHZ;
+ break;
+ case 0:
+ default:
+ bt_fm_cfg[BT_SLIM7_RX].sample_rate = SAMPLING_RATE_8KHZ;
+ break;
+ }
+ pr_debug("%s: sample rates: slim7_rx = %d, value = %d\n",
+ __func__,
+ bt_fm_cfg[BT_SLIM7_RX].sample_rate,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_bt_sample_rate_tx_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (bt_fm_cfg[BT_SLIM7_TX].sample_rate) {
+ case SAMPLING_RATE_48KHZ:
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case SAMPLING_RATE_16KHZ:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SAMPLING_RATE_8KHZ:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: sample rate = %d", __func__,
+ bt_fm_cfg[BT_SLIM7_TX].sample_rate);
+
+ return 0;
+}
+
+static int msm_bt_sample_rate_tx_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ bt_fm_cfg[BT_SLIM7_TX].sample_rate = SAMPLING_RATE_16KHZ;
+ break;
+ case 2:
+ bt_fm_cfg[BT_SLIM7_TX].sample_rate = SAMPLING_RATE_48KHZ;
+ break;
+ case 0:
+ default:
+ bt_fm_cfg[BT_SLIM7_TX].sample_rate = SAMPLING_RATE_8KHZ;
+ break;
+ }
+ pr_debug("%s: sample rates: slim7_tx = %d, value = %d\n",
+ __func__,
+ bt_fm_cfg[BT_SLIM7_TX].sample_rate,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
static const struct snd_kcontrol_new msm_snd_controls[] = {
SOC_ENUM_EXT("INT0_MI2S_RX Format", int0_mi2s_rx_format,
int_mi2s_bit_format_get, int_mi2s_bit_format_put),
@@ -902,6 +1005,12 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
SOC_ENUM_EXT("BT SampleRate", bt_sample_rate,
msm_bt_sample_rate_get,
msm_bt_sample_rate_put),
+ SOC_ENUM_EXT("BT SampleRate RX", bt_sample_rate_rx,
+ msm_bt_sample_rate_rx_get,
+ msm_bt_sample_rate_rx_put),
+ SOC_ENUM_EXT("BT SampleRate TX", bt_sample_rate_tx,
+ msm_bt_sample_rate_tx_get,
+ msm_bt_sample_rate_tx_put),
};
static const struct snd_kcontrol_new msm_sdw_controls[] = {
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index a001331a53c1..df79d7c846ea 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -453,6 +453,13 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
u32 *buf = (u32 *)(runtime->dma_area +
rsnd_dai_pointer_offset(io, 0));
+ int shift = 0;
+
+ switch (runtime->sample_bits) {
+ case 32:
+ shift = 8;
+ break;
+ }
/*
* 8/16/32 data can be assesse to TDR/RDR register
@@ -460,9 +467,9 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
* see rsnd_ssi_init()
*/
if (rsnd_io_is_play(io))
- rsnd_mod_write(mod, SSITDR, *buf);
+ rsnd_mod_write(mod, SSITDR, (*buf) << shift);
else
- *buf = rsnd_mod_read(mod, SSIRDR);
+ *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
elapsed = rsnd_dai_pointer_update(io, sizeof(*buf));
}
diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
index cebea9b7f769..6a9be1df7851 100644
--- a/sound/usb/line6/midi.c
+++ b/sound/usb/line6/midi.c
@@ -125,7 +125,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
}
usb_fill_int_urb(urb, line6->usbdev,
- usb_sndbulkpipe(line6->usbdev,
+ usb_sndintpipe(line6->usbdev,
line6->properties->ep_ctrl_w),
transfer_buffer, length, midi_sent, line6,
line6->interval);
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 1f8fb0d904e0..f5cf23ffb35b 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -351,8 +351,11 @@ static struct usbmix_name_map bose_companion5_map[] = {
/*
* Dell usb dock with ALC4020 codec had a firmware problem where it got
* screwed up when zero volume is passed; just skip it as a workaround
+ *
+ * Also the extension unit gives an access error, so skip it as well.
*/
static const struct usbmix_name_map dell_alc4020_map[] = {
+ { 4, NULL }, /* extension unit */
{ 16, NULL },
{ 19, NULL },
{ 0 }
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 6969b7f4d409..dc66b6016f8d 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -46,7 +46,8 @@
/* event ring iova base address */
#define IOVA_BASE 0x1000
-#define IOVA_XFER_RING_BASE (IOVA_BASE + PAGE_SIZE * (SNDRV_CARDS + 1))
+#define IOVA_DCBA_BASE 0x2000
+#define IOVA_XFER_RING_BASE (IOVA_DCBA_BASE + PAGE_SIZE * (SNDRV_CARDS + 1))
#define IOVA_XFER_BUF_BASE (IOVA_XFER_RING_BASE + PAGE_SIZE * SNDRV_CARDS * 32)
#define IOVA_XFER_RING_MAX (IOVA_XFER_BUF_BASE - PAGE_SIZE)
#define IOVA_XFER_BUF_MAX (0xfffff000 - PAGE_SIZE)
@@ -81,9 +82,10 @@ struct uaudio_dev {
/* audio control interface */
struct usb_host_interface *ctrl_intf;
unsigned int card_num;
- unsigned int usb_core_id;
atomic_t in_use;
struct kref kref;
+ unsigned long dcba_iova;
+ size_t dcba_size;
wait_queue_head_t disconnect_wq;
/* interface specific */
@@ -100,6 +102,9 @@ struct uaudio_qmi_dev {
struct iommu_domain *domain;
/* list to keep track of available iova */
+ struct list_head dcba_list;
+ size_t dcba_iova_size;
+ unsigned long curr_dcba_iova;
struct list_head xfer_ring_list;
size_t xfer_ring_iova_size;
unsigned long curr_xfer_ring_iova;
@@ -146,6 +151,7 @@ static struct msg_desc uaudio_stream_ind_desc = {
enum mem_type {
MEM_EVENT_RING,
+ MEM_DCBA,
MEM_XFER_RING,
MEM_XFER_BUF,
};
@@ -171,26 +177,6 @@ enum usb_qmi_audio_format {
USB_QMI_PCM_FORMAT_U32_BE,
};
-static enum usb_audio_device_speed_enum_v01
-get_speed_info(enum usb_device_speed udev_speed)
-{
- switch (udev_speed) {
- case USB_SPEED_LOW:
- return USB_AUDIO_DEVICE_SPEED_LOW_V01;
- case USB_SPEED_FULL:
- return USB_AUDIO_DEVICE_SPEED_FULL_V01;
- case USB_SPEED_HIGH:
- return USB_AUDIO_DEVICE_SPEED_HIGH_V01;
- case USB_SPEED_SUPER:
- return USB_AUDIO_DEVICE_SPEED_SUPER_V01;
- case USB_SPEED_SUPER_PLUS:
- return USB_AUDIO_DEVICE_SPEED_SUPER_PLUS_V01;
- default:
- pr_err("%s: udev speed %d\n", __func__, udev_speed);
- return USB_AUDIO_DEVICE_SPEED_INVALID_V01;
- }
-}
-
static unsigned long uaudio_get_iova(unsigned long *curr_iova,
size_t *curr_iova_size, struct list_head *head, size_t size)
{
@@ -290,6 +276,10 @@ static unsigned long uaudio_iommu_map(enum mem_type mtype, phys_addr_t pa,
if (uaudio_qdev->er_phys_addr == pa)
map = false;
break;
+ case MEM_DCBA:
+ va = uaudio_get_iova(&uaudio_qdev->curr_dcba_iova,
+ &uaudio_qdev->dcba_iova_size, &uaudio_qdev->dcba_list, size);
+ break;
case MEM_XFER_RING:
va = uaudio_get_iova(&uaudio_qdev->curr_xfer_ring_iova,
&uaudio_qdev->xfer_ring_iova_size, &uaudio_qdev->xfer_ring_list,
@@ -374,7 +364,10 @@ static void uaudio_iommu_unmap(enum mem_type mtype, unsigned long va,
else
unmap = false;
break;
-
+ case MEM_DCBA:
+ uaudio_put_iova(va, size, &uaudio_qdev->dcba_list,
+ &uaudio_qdev->dcba_iova_size);
+ break;
case MEM_XFER_RING:
uaudio_put_iova(va, size, &uaudio_qdev->xfer_ring_list,
&uaudio_qdev->xfer_ring_iova_size);
@@ -416,7 +409,8 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
void *hdr_ptr;
u8 *xfer_buf;
u32 len, mult, remainder, xfer_buf_len;
- unsigned long va, tr_data_va = 0, tr_sync_va = 0, xfer_buf_va = 0;
+ unsigned long va, tr_data_va = 0, tr_sync_va = 0, dcba_va = 0,
+ xfer_buf_va = 0;
phys_addr_t xhci_pa, xfer_buf_pa;
iface = usb_ifnum_to_if(subs->dev, subs->interface);
@@ -561,13 +555,6 @@ skip_sync_ep:
resp->interrupter_num = uaudio_qdev->intr_num;
resp->interrupter_num_valid = 1;
- ret = usb_get_controller_id(subs->dev);
- if (ret < 0)
- goto err;
-
- resp->controller_num = ret;
- resp->controller_num_valid = 1;
-
/* map xhci data structures PA memory to iova */
/* event ring */
@@ -595,17 +582,33 @@ skip_sync_ep:
resp->xhci_mem_info.evt_ring.size = PAGE_SIZE;
uaudio_qdev->er_phys_addr = xhci_pa;
- resp->speed_info = get_speed_info(subs->dev->speed);
- if (resp->speed_info == USB_AUDIO_DEVICE_SPEED_INVALID_V01)
+ /* dcba */
+ xhci_pa = usb_get_dcba_dma_addr(subs->dev);
+ if (!xhci_pa) {
+ pr_err("%s:failed to get dcba dma address\n", __func__);
goto unmap_er;
+ }
+
+ if (!uadev[card_num].dcba_iova) { /* mappped per usb device */
+ va = uaudio_iommu_map(MEM_DCBA, xhci_pa, PAGE_SIZE);
+ if (!va)
+ goto unmap_er;
+
+ uadev[card_num].dcba_iova = va;
+ uadev[card_num].dcba_size = PAGE_SIZE;
+ }
- resp->speed_info_valid = 1;
+ dcba_va = uadev[card_num].dcba_iova;
+ resp->xhci_mem_info.dcba.va = PREPEND_SID_TO_IOVA(dcba_va,
+ uaudio_qdev->sid);
+ resp->xhci_mem_info.dcba.pa = xhci_pa;
+ resp->xhci_mem_info.dcba.size = PAGE_SIZE;
/* data transfer ring */
xhci_pa = resp->xhci_mem_info.tr_data.pa;
va = uaudio_iommu_map(MEM_XFER_RING, xhci_pa, PAGE_SIZE);
if (!va)
- goto unmap_er;
+ goto unmap_dcba;
tr_data_va = va;
resp->xhci_mem_info.tr_data.va = PREPEND_SID_TO_IOVA(va,
@@ -678,7 +681,6 @@ skip_sync:
}
uadev[card_num].card_num = card_num;
- uadev[card_num].usb_core_id = resp->controller_num;
/* cache intf specific info to use it for unmap and free xfer buf */
uadev[card_num].info[info_idx].data_xfer_ring_va = tr_data_va;
@@ -706,6 +708,8 @@ unmap_sync:
uaudio_iommu_unmap(MEM_XFER_RING, tr_sync_va, PAGE_SIZE);
unmap_data:
uaudio_iommu_unmap(MEM_XFER_RING, tr_data_va, PAGE_SIZE);
+unmap_dcba:
+ uaudio_iommu_unmap(MEM_DCBA, dcba_va, PAGE_SIZE);
unmap_er:
uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
err:
@@ -751,6 +755,11 @@ static void uaudio_dev_cleanup(struct uaudio_dev *dev)
dev->info[if_idx].intf_num, dev->card_num);
}
+ /* iommu_unmap dcba iova for a usb device */
+ uaudio_iommu_unmap(MEM_DCBA, dev->dcba_iova, dev->dcba_size);
+
+ dev->dcba_iova = 0;
+ dev->dcba_size = 0;
dev->num_intf = 0;
/* free interface info */
@@ -799,8 +808,6 @@ static void uaudio_disconnect_cb(struct snd_usb_audio *chip)
pr_debug("%s: sending qmi indication disconnect\n", __func__);
disconnect_ind.dev_event = USB_AUDIO_DEV_DISCONNECT_V01;
disconnect_ind.slot_id = dev->udev->slot_id;
- disconnect_ind.controller_num = dev->usb_core_id;
- disconnect_ind.controller_num_valid = 1;
ret = qmi_send_ind(svc->uaudio_svc_hdl, svc->curr_conn,
&uaudio_stream_ind_desc, &disconnect_ind,
sizeof(disconnect_ind));
@@ -1222,7 +1229,11 @@ static int uaudio_qmi_plat_probe(struct platform_device *pdev)
goto free_domain;
}
- /* initialize xfer ring and xfer buf iova list */
+ /* initialize dcba, xfer ring and xfer buf iova list */
+ INIT_LIST_HEAD(&uaudio_qdev->dcba_list);
+ uaudio_qdev->curr_dcba_iova = IOVA_DCBA_BASE;
+ uaudio_qdev->dcba_iova_size = SNDRV_CARDS * PAGE_SIZE;
+
INIT_LIST_HEAD(&uaudio_qdev->xfer_ring_list);
uaudio_qdev->curr_xfer_ring_iova = IOVA_XFER_RING_BASE;
uaudio_qdev->xfer_ring_iova_size =
diff --git a/sound/usb/usb_audio_qmi_v01.c b/sound/usb/usb_audio_qmi_v01.c
index 4fa8445badde..beff1aaf4981 100644
--- a/sound/usb/usb_audio_qmi_v01.c
+++ b/sound/usb/usb_audio_qmi_v01.c
@@ -1,4 +1,4 @@
- /* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ /* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -633,46 +633,6 @@ struct elem_info qmi_uaudio_stream_resp_msg_v01_ei[] = {
interrupter_num),
},
{
- .data_type = QMI_OPT_FLAG,
- .elem_len = 1,
- .elem_size = sizeof(uint8_t),
- .is_array = NO_ARRAY,
- .tlv_type = 0x1C,
- .offset = offsetof(
- struct qmi_uaudio_stream_resp_msg_v01,
- speed_info_valid),
- },
- {
- .data_type = QMI_SIGNED_4_BYTE_ENUM,
- .elem_len = 1,
- .elem_size = sizeof(enum usb_audio_device_speed_enum_v01),
- .is_array = NO_ARRAY,
- .tlv_type = 0x1C,
- .offset = offsetof(
- struct qmi_uaudio_stream_resp_msg_v01,
- speed_info),
- },
- {
- .data_type = QMI_OPT_FLAG,
- .elem_len = 1,
- .elem_size = sizeof(uint8_t),
- .is_array = NO_ARRAY,
- .tlv_type = 0x1D,
- .offset = offsetof(
- struct qmi_uaudio_stream_resp_msg_v01,
- controller_num_valid),
- },
- {
- .data_type = QMI_UNSIGNED_1_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(uint8_t),
- .is_array = NO_ARRAY,
- .tlv_type = 0x1D,
- .offset = offsetof(
- struct qmi_uaudio_stream_resp_msg_v01,
- controller_num),
- },
- {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.is_array = QMI_COMMON_TLV_TYPE,
@@ -866,24 +826,6 @@ struct elem_info qmi_uaudio_stream_ind_msg_v01_ei[] = {
interrupter_num),
},
{
- .data_type = QMI_OPT_FLAG,
- .elem_len = 1,
- .elem_size = sizeof(uint8_t),
- .is_array = NO_ARRAY,
- .tlv_type = 0x19,
- .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
- controller_num_valid),
- },
- {
- .data_type = QMI_UNSIGNED_1_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(uint8_t),
- .is_array = NO_ARRAY,
- .tlv_type = 0x19,
- .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
- controller_num),
- },
- {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.is_array = QMI_COMMON_TLV_TYPE,
diff --git a/sound/usb/usb_audio_qmi_v01.h b/sound/usb/usb_audio_qmi_v01.h
index addc0ed3de2a..f3b6eb05d5f0 100644
--- a/sound/usb/usb_audio_qmi_v01.h
+++ b/sound/usb/usb_audio_qmi_v01.h
@@ -1,4 +1,4 @@
- /* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ /* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -77,17 +77,6 @@ enum usb_audio_device_indication_enum_v01 {
USB_AUDIO_DEVICE_INDICATION_ENUM_MAX_VAL_V01 = INT_MAX,
};
-enum usb_audio_device_speed_enum_v01 {
- USB_AUDIO_DEVICE_SPEED_ENUM_MIN_VAL_V01 = INT_MIN,
- USB_AUDIO_DEVICE_SPEED_INVALID_V01 = 0,
- USB_AUDIO_DEVICE_SPEED_LOW_V01 = 1,
- USB_AUDIO_DEVICE_SPEED_FULL_V01 = 2,
- USB_AUDIO_DEVICE_SPEED_HIGH_V01 = 3,
- USB_AUDIO_DEVICE_SPEED_SUPER_V01 = 4,
- USB_AUDIO_DEVICE_SPEED_SUPER_PLUS_V01 = 5,
- USB_AUDIO_DEVICE_SPEED_ENUM_MAX_VAL_V01 = INT_MAX,
-};
-
struct qmi_uaudio_stream_req_msg_v01 {
uint8_t enable;
uint32_t usb_token;
@@ -129,12 +118,8 @@ struct qmi_uaudio_stream_resp_msg_v01 {
struct apps_mem_info_v01 xhci_mem_info;
uint8_t interrupter_num_valid;
uint8_t interrupter_num;
- uint8_t speed_info_valid;
- enum usb_audio_device_speed_enum_v01 speed_info;
- uint8_t controller_num_valid;
- uint8_t controller_num;
};
-#define QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN 202
+#define QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN 191
extern struct elem_info qmi_uaudio_stream_resp_msg_v01_ei[];
struct qmi_uaudio_stream_ind_msg_v01 {
@@ -158,10 +143,8 @@ struct qmi_uaudio_stream_ind_msg_v01 {
struct apps_mem_info_v01 xhci_mem_info;
uint8_t interrupter_num_valid;
uint8_t interrupter_num;
- uint8_t controller_num_valid;
- uint8_t controller_num;
};
-#define QMI_UAUDIO_STREAM_IND_MSG_V01_MAX_MSG_LEN 181
+#define QMI_UAUDIO_STREAM_IND_MSG_V01_MAX_MSG_LEN 177
extern struct elem_info qmi_uaudio_stream_ind_msg_v01_ei[];
#endif
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index ebe7115c751a..da8afc121118 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1152,6 +1152,10 @@ static struct syscall_fmt {
{ .name = "mlockall", .errmsg = true,
.arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
{ .name = "mmap", .hexret = true,
+/* The standard mmap maps to old_mmap on s390x */
+#if defined(__s390x__)
+ .alias = "old_mmap",
+#endif
.arg_scnprintf = { [0] = SCA_HEX, /* addr */
[2] = SCA_MMAP_PROT, /* prot */
[3] = SCA_MMAP_FLAGS, /* flags */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 43838003c1a1..304f5d710143 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1258,8 +1258,16 @@ static int __event_process_build_id(struct build_id_event *bev,
dso__set_build_id(dso, &bev->build_id);
- if (!is_kernel_module(filename, cpumode))
- dso->kernel = dso_type;
+ if (dso_type != DSO_TYPE_USER) {
+ struct kmod_path m = { .name = NULL, };
+
+ if (!kmod_path__parse_name(&m, filename) && m.kmod)
+ dso__set_short_name(dso, strdup(m.name), true);
+ else
+ dso->kernel = dso_type;
+
+ free(m.name);
+ }
build_id__sprintf(dso->build_id, sizeof(dso->build_id),
sbuild_id);
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index eeeae0629ad3..0b540b84f8b7 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1270,6 +1270,7 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
intel_pt_clear_tx_flags(decoder);
decoder->have_tma = false;
decoder->cbr = 0;
+ decoder->timestamp_insn_cnt = 0;
decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
decoder->overflow = true;
return -EOVERFLOW;
@@ -1492,6 +1493,7 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
case INTEL_PT_PSBEND:
intel_pt_log("ERROR: Missing TIP after FUP\n");
decoder->pkt_state = INTEL_PT_STATE_ERR3;
+ decoder->pkt_step = 0;
return -ENOENT;
case INTEL_PT_OVF:
@@ -2152,14 +2154,6 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
return &decoder->state;
}
-static bool intel_pt_at_psb(unsigned char *buf, size_t len)
-{
- if (len < INTEL_PT_PSB_LEN)
- return false;
- return memmem(buf, INTEL_PT_PSB_LEN, INTEL_PT_PSB_STR,
- INTEL_PT_PSB_LEN);
-}
-
/**
* intel_pt_next_psb - move buffer pointer to the start of the next PSB packet.
* @buf: pointer to buffer pointer
@@ -2248,6 +2242,7 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
* @buf: buffer
* @len: size of buffer
* @tsc: TSC value returned
+ * @rem: returns remaining size when TSC is found
*
* Find a TSC packet in @buf and return the TSC value. This function assumes
* that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a
@@ -2255,7 +2250,8 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
*
* Return: %true if TSC is found, false otherwise.
*/
-static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
+static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc,
+ size_t *rem)
{
struct intel_pt_pkt packet;
int ret;
@@ -2266,6 +2262,7 @@ static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
return false;
if (packet.type == INTEL_PT_TSC) {
*tsc = packet.payload;
+ *rem = len;
return true;
}
if (packet.type == INTEL_PT_PSBEND)
@@ -2316,6 +2313,8 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
* @len_a: size of first buffer
* @buf_b: second buffer
* @len_b: size of second buffer
+ * @consecutive: returns true if there is data in buf_b that is consecutive
+ * to buf_a
*
* If the trace contains TSC we can look at the last TSC of @buf_a and the
* first TSC of @buf_b in order to determine if the buffers overlap, and then
@@ -2328,33 +2327,41 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
size_t len_a,
unsigned char *buf_b,
- size_t len_b)
+ size_t len_b, bool *consecutive)
{
uint64_t tsc_a, tsc_b;
unsigned char *p;
- size_t len;
+ size_t len, rem_a, rem_b;
p = intel_pt_last_psb(buf_a, len_a);
if (!p)
return buf_b; /* No PSB in buf_a => no overlap */
len = len_a - (p - buf_a);
- if (!intel_pt_next_tsc(p, len, &tsc_a)) {
+ if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) {
/* The last PSB+ in buf_a is incomplete, so go back one more */
len_a -= len;
p = intel_pt_last_psb(buf_a, len_a);
if (!p)
return buf_b; /* No full PSB+ => assume no overlap */
len = len_a - (p - buf_a);
- if (!intel_pt_next_tsc(p, len, &tsc_a))
+ if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a))
return buf_b; /* No TSC in buf_a => assume no overlap */
}
while (1) {
/* Ignore PSB+ with no TSC */
- if (intel_pt_next_tsc(buf_b, len_b, &tsc_b) &&
- intel_pt_tsc_cmp(tsc_a, tsc_b) < 0)
- return buf_b; /* tsc_a < tsc_b => no overlap */
+ if (intel_pt_next_tsc(buf_b, len_b, &tsc_b, &rem_b)) {
+ int cmp = intel_pt_tsc_cmp(tsc_a, tsc_b);
+
+ /* Same TSC, so buffers are consecutive */
+ if (!cmp && rem_b >= rem_a) {
+ *consecutive = true;
+ return buf_b + len_b - (rem_b - rem_a);
+ }
+ if (cmp < 0)
+ return buf_b; /* tsc_a < tsc_b => no overlap */
+ }
if (!intel_pt_step_psb(&buf_b, &len_b))
return buf_b + len_b; /* No PSB in buf_b => no data */
@@ -2368,6 +2375,8 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
* @buf_b: second buffer
* @len_b: size of second buffer
* @have_tsc: can use TSC packets to detect overlap
+ * @consecutive: returns true if there is data in buf_b that is consecutive
+ * to buf_a
*
* When trace samples or snapshots are recorded there is the possibility that
* the data overlaps. Note that, for the purposes of decoding, data is only
@@ -2378,7 +2387,7 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
*/
unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
unsigned char *buf_b, size_t len_b,
- bool have_tsc)
+ bool have_tsc, bool *consecutive)
{
unsigned char *found;
@@ -2390,7 +2399,8 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
return buf_b; /* No overlap */
if (have_tsc) {
- found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b);
+ found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b,
+ consecutive);
if (found)
return found;
}
@@ -2405,28 +2415,16 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
}
/* Now len_b >= len_a */
- if (len_b > len_a) {
- /* The leftover buffer 'b' must start at a PSB */
- while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
- if (!intel_pt_step_psb(&buf_a, &len_a))
- return buf_b; /* No overlap */
- }
- }
-
while (1) {
/* Potential overlap so check the bytes */
found = memmem(buf_a, len_a, buf_b, len_a);
- if (found)
+ if (found) {
+ *consecutive = true;
return buf_b + len_a;
+ }
/* Try again at next PSB in buffer 'a' */
if (!intel_pt_step_psb(&buf_a, &len_a))
return buf_b; /* No overlap */
-
- /* The leftover buffer 'b' must start at a PSB */
- while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
- if (!intel_pt_step_psb(&buf_a, &len_a))
- return buf_b; /* No overlap */
- }
}
}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
index 02c38fec1c37..89a3eda6a318 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -102,7 +102,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder);
unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
unsigned char *buf_b, size_t len_b,
- bool have_tsc);
+ bool have_tsc, bool *consecutive);
int intel_pt__strerror(int code, char *buf, size_t buflen);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 89927b5beebf..3693cb26ec66 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -125,6 +125,7 @@ struct intel_pt_queue {
bool stop;
bool step_through_buffers;
bool use_buffer_pid_tid;
+ bool sync_switch;
pid_t pid, tid;
int cpu;
int switch_state;
@@ -188,14 +189,17 @@ static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
struct auxtrace_buffer *b)
{
+ bool consecutive = false;
void *start;
start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
- pt->have_tsc);
+ pt->have_tsc, &consecutive);
if (!start)
return -EINVAL;
b->use_size = b->data + b->size - start;
b->use_data = start;
+ if (b->use_size && consecutive)
+ b->consecutive = true;
return 0;
}
@@ -849,10 +853,12 @@ static int intel_pt_setup_queue(struct intel_pt *pt,
if (pt->timeless_decoding || !pt->have_sched_switch)
ptq->use_buffer_pid_tid = true;
}
+
+ ptq->sync_switch = pt->sync_switch;
}
if (!ptq->on_heap &&
- (!pt->sync_switch ||
+ (!ptq->sync_switch ||
ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
const struct intel_pt_state *state;
int ret;
@@ -1235,7 +1241,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
if (pt->synth_opts.last_branch)
intel_pt_update_last_branch_rb(ptq);
- if (!pt->sync_switch)
+ if (!ptq->sync_switch)
return 0;
if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
@@ -1316,6 +1322,21 @@ static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
return switch_ip;
}
+static void intel_pt_enable_sync_switch(struct intel_pt *pt)
+{
+ unsigned int i;
+
+ pt->sync_switch = true;
+
+ for (i = 0; i < pt->queues.nr_queues; i++) {
+ struct auxtrace_queue *queue = &pt->queues.queue_array[i];
+ struct intel_pt_queue *ptq = queue->priv;
+
+ if (ptq)
+ ptq->sync_switch = true;
+ }
+}
+
static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
{
const struct intel_pt_state *state = ptq->state;
@@ -1332,7 +1353,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
if (pt->switch_ip) {
intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
pt->switch_ip, pt->ptss_ip);
- pt->sync_switch = true;
+ intel_pt_enable_sync_switch(pt);
}
}
}
@@ -1348,9 +1369,9 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
if (state->err) {
if (state->err == INTEL_PT_ERR_NODATA)
return 1;
- if (pt->sync_switch &&
+ if (ptq->sync_switch &&
state->from_ip >= pt->kernel_start) {
- pt->sync_switch = false;
+ ptq->sync_switch = false;
intel_pt_next_tid(pt, ptq);
}
if (pt->synth_opts.errors) {
@@ -1376,7 +1397,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
state->timestamp, state->est_timestamp);
ptq->timestamp = state->est_timestamp;
/* Use estimated TSC in unknown switch state */
- } else if (pt->sync_switch &&
+ } else if (ptq->sync_switch &&
ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
intel_pt_is_switch_ip(ptq, state->to_ip) &&
ptq->next_tid == -1) {
@@ -1523,7 +1544,7 @@ static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
return 1;
ptq = intel_pt_cpu_to_ptq(pt, cpu);
- if (!ptq)
+ if (!ptq || !ptq->sync_switch)
return 1;
switch (ptq->switch_state) {
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 03875f9154e7..0195b7e8c54a 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2349,6 +2349,14 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
out:
free(nbase);
+
+ /* Final validation */
+ if (ret >= 0 && !is_c_func_name(buf)) {
+ pr_warning("Internal error: \"%s\" is an invalid event name.\n",
+ buf);
+ ret = -EINVAL;
+ }
+
return ret;
}
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 2dcfe9a7c8d0..60edec383281 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -37,6 +37,14 @@ static int __report_module(struct addr_location *al, u64 ip,
return 0;
mod = dwfl_addrmodule(ui->dwfl, ip);
+ if (mod) {
+ Dwarf_Addr s;
+
+ dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
+ if (s != al->map->start)
+ mod = 0;
+ }
+
if (!mod)
mod = dwfl_report_elf(ui->dwfl, dso->short_name,
dso->long_name, -1, al->map->start,
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 47b1e36c7ea0..9adc9af8b048 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -162,7 +162,7 @@ int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
size -= ret;
off_in += ret;
- off_out -= ret;
+ off_out += ret;
}
munmap(ptr, off_in + size);
diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
index 42d4c8caad81..de8dc82e2567 100644
--- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
@@ -45,12 +45,12 @@ int test_body(void)
printf("Check DSCR TM context switch: ");
fflush(stdout);
for (;;) {
- rv = 1;
asm __volatile__ (
/* set a known value into the DSCR */
"ld 3, %[dscr1];"
"mtspr %[sprn_dscr], 3;"
+ "li %[rv], 1;"
/* start and suspend a transaction */
TBEGIN
"beq 1f;"