summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs9
-rw-r--r--Documentation/devicetree/bindings/display/msm/edrm.txt57
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-msm.txt12
-rw-r--r--Documentation/filesystems/proc.txt4
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/qcom/Makefile9
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi22
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi25
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mdss.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mtp-overlay.dts23
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mtp.dtsi15
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-pm.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-sde.dtsi11
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-v3-pmi8996.dts24
-rw-r--r--arch/arm/boot/dts/qcom/msm8996.dtsi45
-rw-r--r--arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts9
-rw-r--r--arch/arm/boot/dts/qcom/msm8996pro-v1.1-pmi8996.dts24
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts1
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sx.c2
-rw-r--r--arch/arm64/configs/cuttlefish_defconfig3
-rw-r--r--arch/arm64/configs/msm-auto-perf_defconfig1
-rw-r--r--arch/arm64/configs/msm-auto_defconfig1
-rw-r--r--arch/arm64/include/asm/kvm_arm.h3
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/mips/Kconfig4
-rw-r--r--arch/mips/boot/compressed/calc_vmlinuz_load_addr.c7
-rw-r--r--arch/mips/include/asm/pgtable-64.h5
-rw-r--r--arch/mips/pci/msi-octeon.c4
-rw-r--r--arch/mips/sibyte/common/Makefile1
-rw-r--r--arch/mips/sibyte/common/dma.c14
-rw-r--r--arch/powerpc/boot/crt0.S4
-rw-r--r--arch/x86/configs/x86_64_cuttlefish_defconfig4
-rw-r--r--arch/x86/crypto/chacha20_glue.c1
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c2
-rw-r--r--arch/x86/kvm/vmx.c19
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--block/blk-settings.c1
-rw-r--r--block/blk-sysfs.c1
-rw-r--r--crypto/authenc.c14
-rw-r--r--crypto/authencesn.c2
-rw-r--r--crypto/cts.c8
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/power.c22
-rw-r--r--drivers/base/bus.c7
-rw-r--r--drivers/block/loop.c117
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c25
-rw-r--r--drivers/clk/imx/clk-imx6q.c6
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c8
-rw-r--r--drivers/gpio/gpio-max7301.c12
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c7
-rw-r--r--drivers/gpu/drm/drm_ioctl.c10
-rw-r--r--drivers/gpu/drm/msm/Kconfig10
-rw-r--r--drivers/gpu/drm/msm/Makefile9
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_connector.c123
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_connector.h28
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_crtc.c263
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_crtc.h62
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_drv.c439
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_encoder.c112
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_encoder.h50
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_kms.c723
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_kms.h58
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_plane.c912
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_plane.h41
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_splash.c109
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_splash.h59
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_ctl.c83
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_ctl.h6
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c10
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.c15
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.h13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c2
-rw-r--r--drivers/hv/vmbus_drv.c20
-rw-r--r--drivers/hwtracing/intel_th/msu.c3
-rw-r--r--drivers/i2c/i2c-dev.c6
-rw-r--r--drivers/input/keyboard/omap4-keypad.c16
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/iommu/dma-mapping-fast.c14
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/isdn/capi/kcapi.c4
-rw-r--r--drivers/md/Kconfig16
-rw-r--r--drivers/md/dm-kcopyd.c19
-rw-r--r--drivers/md/dm-snap.c22
-rw-r--r--drivers/md/dm-table.c3
-rw-r--r--drivers/md/dm-verity-target.c9
-rw-r--r--drivers/media/firewire/firedtv-avc.c6
-rw-r--r--drivers/media/firewire/firedtv.h6
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.c6
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h1
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c9
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c24
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h3
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c5
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.c5
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c14
-rw-r--r--drivers/mfd/tps6586x.c24
-rw-r--r--drivers/misc/genwqe/card_utils.c2
-rw-r--r--drivers/misc/mic/card/mic_virtio.c2
-rw-r--r--drivers/mmc/core/core.c10
-rw-r--r--drivers/mmc/core/mmc.c4
-rw-r--r--drivers/mmc/core/sd.c40
-rw-r--r--drivers/mmc/host/atmel-mci.c3
-rw-r--r--drivers/mmc/host/omap_hsmmc.c12
-rw-r--r--drivers/mmc/host/sdhci-msm.c178
-rw-r--r--drivers/mmc/host/sdhci-msm.h12
-rw-r--r--drivers/mmc/host/sdhci.c24
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c13
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/usb/hso.c18
-rw-r--r--drivers/net/wireless/b43/phy_common.c2
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/pci/host/pcie-altera.c201
-rw-r--r--drivers/perf/perf_event_armv8.c1
-rw-r--r--drivers/platform/x86/asus-wmi.c3
-rw-r--r--drivers/power/olpc_battery.c4
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c2
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c6
-rw-r--r--drivers/s390/virtio/kvm_virtio.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c2
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/soc/qcom/early_dom.c7
-rw-r--r--drivers/spi/spi-bcm2835.c16
-rw-r--r--drivers/staging/android/sync.c2
-rw-r--r--drivers/target/target_core_spc.c17
-rw-r--r--drivers/tty/tty_ldsem.c10
-rw-r--r--drivers/usb/class/cdc-acm.c17
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/gadget/function/u_uac1.h2
-rw-r--r--drivers/usb/host/r8a66597-hcd.c5
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/misc/diag_ipc_bridge.c105
-rw-r--r--drivers/usb/misc/ks_bridge.c6
-rw-r--r--drivers/usb/serial/option.c20
-rw-r--r--drivers/usb/serial/pl2303.c5
-rw-r--r--drivers/usb/serial/pl2303.h5
-rw-r--r--drivers/usb/storage/scsiglue.c8
-rw-r--r--drivers/usb/storage/unusual_devs.h12
-rw-r--r--drivers/vhost/Kconfig18
-rw-r--r--drivers/vhost/Kconfig.vringh5
-rw-r--r--drivers/vhost/Makefile4
-rw-r--r--drivers/vhost/net.c214
-rw-r--r--drivers/vhost/scsi.c2
-rw-r--r--drivers/vhost/test.c2
-rw-r--r--drivers/vhost/vhost.c1023
-rw-r--r--drivers/vhost/vhost.h69
-rw-r--r--drivers/vhost/vsock.c797
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c2
-rw-r--r--drivers/virtio/Kconfig2
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_input.c2
-rw-r--r--drivers/virtio/virtio_mmio.c2
-rw-r--r--drivers/virtio/virtio_pci_common.c4
-rw-r--r--drivers/virtio/virtio_pci_common.h2
-rw-r--r--drivers/virtio/virtio_pci_modern.c2
-rw-r--r--drivers/virtio/virtio_ring.c249
-rw-r--r--fs/btrfs/Makefile2
-rw-r--r--fs/btrfs/ctree.c14
-rw-r--r--fs/btrfs/ctree.h141
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c88
-rw-r--r--fs/btrfs/extent-tree.c112
-rw-r--r--fs/btrfs/extent_io.c43
-rw-r--r--fs/btrfs/extent_io.h19
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/extent_map.h10
-rw-r--r--fs/btrfs/scrub.c2
-rw-r--r--fs/btrfs/struct-funcs.c9
-rw-r--r--fs/btrfs/tree-checker.c649
-rw-r--r--fs/btrfs/tree-checker.h38
-rw-r--r--fs/btrfs/volumes.c139
-rw-r--r--fs/btrfs/volumes.h2
-rw-r--r--fs/ceph/caps.c1
-rw-r--r--fs/cifs/file.c8
-rw-r--r--fs/cifs/smb2file.c4
-rw-r--r--fs/cifs/smb2maperror.c4
-rw-r--r--fs/cifs/transport.c2
-rw-r--r--fs/dlm/lock.c17
-rw-r--r--fs/dlm/lockspace.c2
-rw-r--r--fs/ext4/inline.c11
-rw-r--r--fs/ext4/resize.c2
-rw-r--r--fs/ext4/super.c13
-rw-r--r--fs/f2fs/acl.c20
-rw-r--r--fs/f2fs/checkpoint.c33
-rw-r--r--fs/f2fs/data.c206
-rw-r--r--fs/f2fs/debug.c31
-rw-r--r--fs/f2fs/dir.c20
-rw-r--r--fs/f2fs/f2fs.h116
-rw-r--r--fs/f2fs/file.c47
-rw-r--r--fs/f2fs/gc.c81
-rw-r--r--fs/f2fs/inline.c20
-rw-r--r--fs/f2fs/inode.c22
-rw-r--r--fs/f2fs/namei.c8
-rw-r--r--fs/f2fs/node.c40
-rw-r--r--fs/f2fs/node.h2
-rw-r--r--fs/f2fs/recovery.c4
-rw-r--r--fs/f2fs/segment.c101
-rw-r--r--fs/f2fs/segment.h2
-rw-r--r--fs/f2fs/shrinker.c2
-rw-r--r--fs/f2fs/super.c175
-rw-r--r--fs/f2fs/sysfs.c27
-rw-r--r--fs/f2fs/xattr.c22
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/jffs2/super.c3
-rw-r--r--fs/ocfs2/localalloc.c9
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/pstore/ram_core.c5
-rw-r--r--fs/xfs/libxfs/xfs_attr.c9
-rw-r--r--include/linux/backing-dev-defs.h9
-rw-r--r--include/linux/hashtable.h4
-rw-r--r--include/linux/mmc/host.h3
-rw-r--r--include/linux/sunrpc/svc.h5
-rw-r--r--include/linux/virtio_config.h15
-rw-r--r--include/linux/virtio_vsock.h157
-rw-r--r--include/net/af_vsock.h9
-rw-r--r--include/net/gro_cells.h1
-rw-r--r--include/net/sock.h36
-rw-r--r--include/trace/events/ext4.h20
-rw-r--r--include/trace/events/vsock_virtio_transport_common.h144
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/input-event-codes.h9
-rw-r--r--include/uapi/linux/vhost.h39
-rw-r--r--include/uapi/linux/virtio_config.h10
-rw-r--r--include/uapi/linux/virtio_ids.h1
-rw-r--r--include/uapi/linux/virtio_vsock.h94
-rw-r--r--kernel/fork.c13
-rw-r--r--kernel/memremap.c11
-rw-r--r--mm/memory_hotplug.c16
-rw-r--r--mm/page-writeback.c35
-rw-r--r--mm/readahead.c49
-rw-r--r--mm/slab.c6
-rw-r--r--net/9p/client.c21
-rw-r--r--net/ax25/af_ax25.c11
-rw-r--r--net/ax25/ax25_dev.c2
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/can/gw.c30
-rw-r--r--net/compat.c15
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/skbuff.c11
-rw-r--r--net/core/sock.c4
-rw-r--r--net/ieee802154/6lowpan/tx.c3
-rw-r--r--net/ipv4/ip_sockglue.c12
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv6/af_inet6.c14
-rw-r--r--net/ipv6/datagram.c11
-rw-r--r--net/ipv6/ip6_udp_tunnel.c3
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/netrom/af_netrom.c15
-rw-r--r--net/packet/af_packet.c8
-rw-r--r--net/sctp/ipv6.c4
-rw-r--r--net/sctp/protocol.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c8
-rw-r--r--net/sunrpc/cache.c10
-rw-r--r--net/sunrpc/rpcb_clnt.c8
-rw-r--r--net/sunrpc/svc.c10
-rw-r--r--net/sunrpc/svc_xprt.c5
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/tipc/netlink_compat.c50
-rw-r--r--net/vmw_vsock/Kconfig20
-rw-r--r--net/vmw_vsock/Makefile6
-rw-r--r--net/vmw_vsock/af_vsock.c57
-rw-r--r--net/vmw_vsock/virtio_transport.c620
-rw-r--r--net/vmw_vsock/virtio_transport_common.c999
-rw-r--r--net/vmw_vsock/vmci_transport.c78
-rw-r--r--net/xfrm/xfrm_state.c2
-rwxr-xr-xscripts/checkstack.pl4
-rw-r--r--scripts/kconfig/zconf.l4
-rw-r--r--security/security.c7
-rw-r--r--security/selinux/ss/policydb.c3
-rw-r--r--security/yama/yama_lsm.c4
-rw-r--r--sound/core/pcm.c2
-rw-r--r--sound/firewire/bebob/bebob.c2
-rw-r--r--sound/pci/cs46xx/dsp_spos.c3
-rw-r--r--sound/pci/emu10k1/emufx.c5
-rw-r--r--sound/pci/hda/hda_tegra.c2
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_realtek.c16
-rw-r--r--sound/pci/rme9652/hdsp.c10
-rw-r--r--sound/synth/emux/emux_hwdep.c7
-rw-r--r--sound/usb/mixer.c10
-rw-r--r--sound/usb/quirks-table.h3
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c11
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/pmu.c8
-rw-r--r--tools/perf/util/svghelper.c2
-rw-r--r--tools/virtio/linux/dma-mapping.h17
301 files changed, 10993 insertions, 1461 deletions
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 6b5e31f470dc..e09db014830f 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -92,6 +92,15 @@ Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
Description:
Controls the number of trials to find a victim segment.
+What: /sys/fs/f2fs/<disk>/migration_granularity
+Date: October 2018
+Contact: "Chao Yu" <yuchao0@huawei.com>
+Description:
+ Controls migration granularity of garbage collection on large
+ section, it can let GC move partial segment{s} of one section
+ in one GC cycle, so that dispersing heavy overhead GC to
+ multiple lightweight one.
+
What: /sys/fs/f2fs/<disk>/dir_level
Date: March 2014
Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
diff --git a/Documentation/devicetree/bindings/display/msm/edrm.txt b/Documentation/devicetree/bindings/display/msm/edrm.txt
new file mode 100644
index 000000000000..f4856e8c8752
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/msm/edrm.txt
@@ -0,0 +1,57 @@
+Qualcomm Technologies, Inc. EDRM KMS
+
+EDRM KMS implements Linux DRM/KMS APIs to drive early user interface to
+different panel interfaces. EDRM driver provides early service to display
+subsystem which manage a subset data paths to specific panel interfaces.
+EDRM is expected to provide display service to fill in the gap between
+bootloader and main application UI. Only early application is expected to
+use EDRM. EDRM will be terminated when main application UI is ready.
+
+Required properties
+- compatible: Must be "qcom,msm-kms-edrm"
+
+Assigned Display Subnodes:
+- qcom,edrm-assigned-display: List of display that eDRM can use.
+
+Subnode properties:
+- qcom,edrm-display-id@x: a node that contains the display information
+
+- qcom,label: The string that indicate the label of the display. The
+ label should match with the label used in sde_display.
+ For example, SDE display dtsi may defined:
+ dsi_adv_7533_2: qcom,dsi-display@8 {...}
+
+ If eDRM wants to use that display, it should set the
+ label as:
+ qcom,label = "dsi_adv_7533_1";
+
+- qcom,intf-type: a string that indicate the interface type. Right now
+ it can be "dsi" or "hdmi"
+
+- qcom,assigned_plane : List of plane that assigned for this display.
+ There must be at least one plane in this field.
+ The plane that is in this field must also be
+ defined in qcom,sde-reserved-plane under sde_kms
+ For example, SDE may contains the following
+ reserved plane:
+ &sde_kms {
+ qcom,sde-reserved-plane {
+ reserved_plane1: qcom,sde-reserved-plane@1 {
+ reg = <0x1>;
+ qcom,plane-name = "rgb3";
+ lm-stage = <5>;
+ }
+ Then assigned_plane field can reference to the
+ reserved plane like:
+ qcom,assigned_plane = <&reserved_plane1>;
+
+Example:
+ msm_kms_edrm: qcom,msm_kms_edrm@900000 {
+ qcom,edrm-assigned-display {
+ qcom,edrm-display-id@0 {
+ qcom,label = "dsi_adv_7533_1";
+ qcom,intf-type = "dsi";
+ qcom,assigned_plane = <&reserved_plane1>;
+ }
+ }
+ }
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 9916c34e62b8..380e8453b4ce 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -113,6 +113,18 @@ In the following, <supply> can be vdd (flash core voltage) or vdd-io (I/O voltag
- qcom,wakeup-on-idle: if configured, the mmcqd thread will call
set_wake_up_idle(), thereby voting for it to be called on idle CPUs.
+ - qcom,late-sdhci-msm: if configured, the sdhci probe will be called in
+ late_init() call context and probe will be delayed.
+
+ - qcom,tsens-id: temperature sensor id which is closest to SDC host controller.
+
+ - qcom,disable_scaling_threshold_temp: temperature value at which scaling is
+ disabled if the temperature falls below this temperature.
+
+ - qcom,enable_scaling_threshold_temp: temperature value at which scaling is
+ enabled, when the scaling is disabled, if temperature rises above this
+ temperature.
+
Example:
aliases {
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index e7aa730b927b..d4f8be8f63e1 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -471,7 +471,9 @@ manner. The codes are the following:
Note that there is no guarantee that every flag and associated mnemonic will
be present in all further kernel releases. Things get changed, the flags may
-be vanished or the reverse -- new added.
+be vanished or the reverse -- new added. Interpretation of their meaning
+might change in future as well. So each consumer of these flags has to
+follow each specific kernel version for the exact semantic.
The "Name" field will only be present on a mapping that has been named by
userspace, and will show the name passed in by userspace.
diff --git a/MAINTAINERS b/MAINTAINERS
index 13cc0bc83c1c..33b502bf5909 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11469,6 +11469,19 @@ S: Maintained
F: drivers/media/v4l2-core/videobuf2-*
F: include/media/videobuf2-*
+VIRTIO AND VHOST VSOCK DRIVER
+M: Stefan Hajnoczi <stefanha@redhat.com>
+L: kvm@vger.kernel.org
+L: virtualization@lists.linux-foundation.org
+L: netdev@vger.kernel.org
+S: Maintained
+F: include/linux/virtio_vsock.h
+F: include/uapi/linux/virtio_vsock.h
+F: net/vmw_vsock/virtio_transport_common.c
+F: net/vmw_vsock/virtio_transport.c
+F: drivers/vhost/vsock.c
+F: drivers/vhost/vsock.h
+
VIRTUAL SERIO DEVICE DRIVER
M: Stephen Chandler Paul <thatslyude@gmail.com>
S: Maintained
diff --git a/Makefile b/Makefile
index 142b5348920c..04c115e058f4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 169
+SUBLEVEL = 172
EXTRAVERSION =
NAME = Blurry Fish Butt
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index a84b606f94db..c5e693497838 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -1,3 +1,11 @@
+ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+dtbo-$(CONFIG_ARCH_MSM8996) += \
+ msm8996-mtp-overlay.dtbo
+
+msm8996-mtp-overlay.dtbo-base := msm8996pro-v1.1-pmi8996.dtb \
+ msm8996-v3-pmi8996.dtb
+
+else
dtb-$(CONFIG_ARCH_MSM8996) += msm8996-v2-pmi8994-cdp.dtb \
msm8996-v2-pmi8994-mtp.dtb \
msm8996-v2-pmi8994-pmk8001-cdp.dtb \
@@ -115,6 +123,7 @@ dtb-$(CONFIG_ARCH_MSM8996) += msm8996-v2-pmi8994-cdp.dtb \
apq8096-v3-pmi8996-mdm9x55-slimbus-mtp.dtb \
apq8096-v3-pmi8996-dragonboard.dtb \
msm8996-auto-mizar.dtb
+endif
dtb-$(CONFIG_MSM_GVM_QUIN) += vplatform-lfv-msm8996-telematics.dtb \
vplatform-lfv-msm8996-ivi.dtb \
diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
index 62c72942eb46..68846efc4e48 100644
--- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1643,11 +1643,27 @@
};
};
-/ {
+&sde_kms {
qcom,sde-reserved-plane {
- qcom,sde-plane-id@0 {
+ reserved_plane0: qcom,sde-reserved-plane@0 {
reg = <0x0>;
qcom,plane-name = "vig0";
+ lm-stage = <4>;
+ };
+ reserved_plane1: qcom,sde-reserved-plane@1 {
+ reg = <0x1>;
+ qcom,plane-name = "vig3";
+ lm-stage = <5>;
+ };
+ };
+};
+
+&msm_kms_edrm {
+ qcom,edrm-assigned-display {
+ qcom,edrm-display-id@0 {
+ qcom,label = "dsi_adv_7533_1";
+ qcom,intf-type = "dsi";
+ qcom,assigned_plane = <&reserved_plane1>;
};
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
index b0ff859d560f..793d91ee8da9 100644
--- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -833,7 +833,7 @@
};
};
- pinctrl@01010000 {
+ pinctrl@1010000 {
pcie2 {
pcie2_perst_default: pcie2_perst_default {
mux {
@@ -1468,11 +1468,28 @@
};
};
-/ {
+&sde_kms {
qcom,sde-reserved-plane {
- qcom,sde-plane-id@0 {
+ reserved_plane0: qcom,sde-reserved-plane@0 {
reg = <0x0>;
qcom,plane-name = "vig0";
+ lm-stage = <4>;
+ };
+ reserved_plane1: qcom,sde-reserved-plane@1 {
+ reg = <0x1>;
+ qcom,plane-name = "rgb3";
+ lm-stage = <5>;
+ };
+ };
+};
+
+&msm_kms_edrm {
+ qcom,edrm-assigned-display {
+ qcom,edrm-display-id@0 {
+ qcom,label = "dsi_adv_7533_1";
+ qcom,intf-type = "dsi";
+ qcom,ctl-id = <1>;
+ qcom,assigned_plane = <&reserved_plane1>;
};
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-mdss.dtsi b/arch/arm/boot/dts/qcom/msm8996-mdss.dtsi
index 301024e80ee3..32d57e6d6902 100644
--- a/arch/arm/boot/dts/qcom/msm8996-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-mdss.dtsi
@@ -537,13 +537,15 @@
qcom,enable-load = <0>;
qcom,disable-load = <0>;
+ qcom,msm_ext_disp = <&msm_ext_disp>;
clocks = <&clock_mmss clk_mdss_mdp_vote_clk>,
<&clock_mmss clk_mdss_ahb_clk>,
<&clock_mmss clk_mdss_hdmi_clk>,
<&clock_mmss clk_mdss_hdmi_ahb_clk>,
<&clock_mmss clk_mdss_extpclk_clk>;
- clock-names = "mdp_core_clk", "iface_clk",
- "core_clk", "alt_iface_clk", "extp_clk";
+ clock-names = "hpd_mdp_core_clk", "hpd_iface_clk",
+ "hpd_core_clk", "hpd_alt_iface_clk",
+ "core_extp_clk";
qcom,hdmi-tx-hpd = <&pm8994_mpps 4 0>;
qcom,mdss-fb-map = <&mdss_fb2>;
diff --git a/arch/arm/boot/dts/qcom/msm8996-mtp-overlay.dts b/arch/arm/boot/dts/qcom/msm8996-mtp-overlay.dts
new file mode 100644
index 000000000000..7f31ceaab42b
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8996-mtp-overlay.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include "msm8996-mtp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM 8996 MTP";
+ compatible = "qcom,mtp";
+ qcom,board-id = <8 0>;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
index 9442239c56a5..f17d8c1f3c46 100644
--- a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,7 +10,7 @@
* GNU General Public License for more details.
*/
-#include "msm8996-pinctrl.dtsi"
+#include <dt-bindings/clock/msm-clocks-8996.h>
#include "msm8996-camera-sensor-mtp.dtsi"
#include "msm8996-wsa881x.dtsi"
@@ -182,17 +182,6 @@
qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
-
- chan@7c {
- label = "xo_therm_buf";
- reg = <0x7c>;
- qcom,decimation = <0>;
- qcom,pre-div-channel-scaling = <0>;
- qcom,calibration-type = "ratiometric";
- qcom,scale-function = <4>;
- qcom,hw-settle-time = <2>;
- qcom,fast-avg-setup = <0>;
- };
};
&pm8994_adc_tm {
diff --git a/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi
index 3e2e7bbf56f6..dc964124829f 100644
--- a/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,7 +11,7 @@
*/
&soc {
- tlmm: pinctrl@01010000 {
+ tlmm: pinctrl@1010000 {
compatible = "qcom,msm8996-pinctrl";
reg = <0x01010000 0x300000>;
interrupts = <0 208 0>;
diff --git a/arch/arm/boot/dts/qcom/msm8996-pm.dtsi b/arch/arm/boot/dts/qcom/msm8996-pm.dtsi
index 74c964bc466d..501da260bcdf 100644
--- a/arch/arm/boot/dts/qcom/msm8996-pm.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, 2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,7 @@
#include <dt-bindings/msm/pm.h>
&soc {
- qcom,spm@9A10000 {
+ qcom,spm@9a10000 {
compatible = "qcom,spm-v2";
#address-cells = <1>;
#size-cells = <1>;
@@ -525,13 +525,13 @@
<69 145>;
};
- rpm_msg_ram: memory@0x200000 {
+ rpm_msg_ram: memory@200000 {
compatible = "qcom,rpm-msg-ram";
reg = <0x200000 0x1000>,
<0x290000 0x1000>;
};
- rpm_code_ram: rpm-memory@0x68000 {
+ rpm_code_ram: rpm-memory@68000 {
compatible = "qcom,rpm-code-ram";
reg = <0x68000 0x5000>;
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi
index 1c81bc433374..9d1655982cce 100644
--- a/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -259,7 +259,7 @@
<&clock_mmss clk_ext_pclk0_clk_src>;
clock-names = "src_byte_clk", "src_pixel_clk";
- qcom,dsi-panel = <&dsi_adv7533_720p>;
+ qcom,dsi-panel = <&dsi_adv7533_1080p>;
vddio-supply = <&pm8994_l14>;
qcom,bridge-index = <0>;
};
@@ -275,7 +275,7 @@
<&clock_mmss clk_ext_pclk1_clk_src>;
clock-names = "src_byte_clk", "src_pixel_clk";
- qcom,dsi-panel = <&dsi_adv7533_720p>;
+ qcom,dsi-panel = <&dsi_adv7533_1080p>;
vddio-supply = <&pm8994_l14>;
qcom,bridge-index = <1>;
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
index cab07d995b79..f5502702981c 100644
--- a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -537,3 +537,12 @@
};
};
};
+
+&soc {
+ msm_kms_edrm: qcom,msm_kms_edrm@900000 {
+
+ compatible = "qcom,msm-kms-edrm";
+ contiguous-region = <&cont_splash_mem &cont_splash_mem_hdmi>;
+
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm8996-v3-pmi8996.dts b/arch/arm/boot/dts/qcom/msm8996-v3-pmi8996.dts
new file mode 100644
index 000000000000..2337447c048e
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8996-v3-pmi8996.dts
@@ -0,0 +1,24 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8996-v3.dtsi"
+#include "msm-pmi8996.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM 8996 v3 + PMI8996";
+ compatible = "qcom,msm8996";
+ qcom,pmic-id = <0x20009 0x10013 0x0 0x0>;
+ qcom,msm-id = <246 0x30001>;
+ qcom,pmic-name = "PMI8996";
+};
diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi
index 192345d3c67c..b626fdb16964 100644
--- a/arch/arm/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996.dtsi
@@ -257,6 +257,7 @@
};
};
+#include "msm8996-pinctrl.dtsi"
#include "msm8996-ion.dtsi"
#include "msm8996-mdss.dtsi"
#include "msm8996-mdss-pll.dtsi"
@@ -317,7 +318,7 @@
qcom,clock-source-override;
};
- intc: interrupt-controller@09bc0000 {
+ intc: interrupt-controller@9bc0000 {
compatible = "arm,gic-v3";
reg = <0x9bc0000 0x10000>, /* GICD */
<0x9c00000 0x100000>; /* GICR * 4 */
@@ -331,7 +332,7 @@
interrupts = <1 9 4>;
- gic-its@09BE0000 {
+ gic-its@9be0000 {
compatible = "arm,gic-v3-its";
msi-contoller;
reg = <0x9be0000 0x20000>;
@@ -360,7 +361,7 @@
qcom,pipe-attr-ee;
};
- uartblsp2dm1: serial@075b0000 {
+ uartblsp2dm1: serial@75b0000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0x75b0000 0x1000>;
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
@@ -369,7 +370,7 @@
clock-names = "core", "iface";
};
- uartblsp1dm1: serial@07570000 {
+ uartblsp1dm1: serial@7570000 {
compatible = "qcom,msm-lsuart-v14";
reg = <0x7570000 0x1000>;
interrupts = <0 108 0>;
@@ -405,7 +406,7 @@
};
- dma_blsp1: qcom,sps-dma@0x7544000{ /* BLSP1 */
+ dma_blsp1: qcom,sps-dma@7544000{ /* BLSP1 */
#dma-cells = <4>;
compatible = "qcom,sps-dma";
reg = <0x7544000 0x2b000>;
@@ -413,7 +414,7 @@
qcom,summing-threshold = <0x10>;
};
- dma_blsp2: qcom,sps-dma@0x7584000{ /* BLSP2 */
+ dma_blsp2: qcom,sps-dma@7584000{ /* BLSP2 */
#dma-cells = <4>;
compatible = "qcom,sps-dma";
reg = <0x7584000 0x2b000>;
@@ -496,7 +497,7 @@
pinctrl-2 = <&i2c_8_bitbang>;
};
- blsp1_uart2: uart@07570000 { /* BLSP1 UART2 */
+ blsp1_uart2: uart@7570000 { /* BLSP1 UART2 */
compatible = "qcom,msm-hsuart-v14";
reg = <0x07570000 0x1000>,
<0x7544000 0x2b000>;
@@ -921,7 +922,7 @@
pinctrl-1 = <&spkr_i2s_clk_active>;
};
- tspp: msm_tspp@075e7000 {
+ tspp: msm_tspp@75e7000 {
compatible = "qcom,msm_tspp";
reg = <0x075e7000 0x1000>, /* MSM_TSIF0_PHYS */
<0x075e8000 0x1000>, /* MSM_TSIF1_PHYS */
@@ -1097,7 +1098,7 @@
status = "disabled";
};
- sdhc_2: sdhci@74A4900 {
+ sdhc_2: sdhci@74a4900 {
compatible = "qcom,sdhci-msm";
reg = <0x74A4900 0x314>, <0x74A4000 0x800>;
reg-names = "hc_mem", "core_mem";
@@ -1302,7 +1303,7 @@
status = "disabled";
};
- pcie0: qcom,pcie@00600000 {
+ pcie0: qcom,pcie@600000 {
compatible = "qcom,pci-msm";
cell-index = <0>;
@@ -1458,7 +1459,7 @@
<0>, <0>, <0>, <0>, <0>, <0>, <0>;
};
- pcie1: qcom,pcie@00608000 {
+ pcie1: qcom,pcie@608000 {
compatible = "qcom,pci-msm";
cell-index = <1>;
@@ -1610,7 +1611,7 @@
<0>, <0>, <0>, <0>, <0>, <0>, <0>;
};
- pcie2: qcom,pcie@00610000 {
+ pcie2: qcom,pcie@610000 {
compatible = "qcom,pci-msm";
cell-index = <2>;
@@ -2552,7 +2553,7 @@
qcom,ipa-advertise-sg-support;
};
- pcie_ep: qcom,pcie-ep@00600000 {
+ pcie_ep: qcom,pcie-ep@600000 {
compatible = "qcom,pcie-ep";
reg = <0x0c001000 0x1000>,
@@ -4258,7 +4259,7 @@
"l2_irq_err_0", "l2_irq_err_1", "l3_irq";
};
- qcom,m4m_erp64@9A40000 {
+ qcom,m4m_erp64@9a40000 {
compatible = "qcom,m4m_erp";
reg = <0x9A40000 0x40000>;
interrupts = <0 22 0>;
@@ -4270,7 +4271,7 @@
interrupts = <0 23 4>;
};
- timer@09840000 {
+ timer@9840000 {
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -4278,7 +4279,7 @@
reg = <0x09840000 0x1000>;
clock-frequency = <19200000>;
- frame@09850000 {
+ frame@9850000 {
frame-number = <0>;
interrupts = <0 31 0x4>,
<0 30 0x4>;
@@ -4286,42 +4287,42 @@
<0x09860000 0x1000>;
};
- frame@09870000 {
+ frame@9870000 {
frame-number = <1>;
interrupts = <0 32 0x4>;
reg = <0x09870000 0x1000>;
status = "disabled";
};
- frame@09880000 {
+ frame@9880000 {
frame-number = <2>;
interrupts = <0 33 0x4>;
reg = <0x09880000 0x1000>;
status = "disabled";
};
- frame@09890000 {
+ frame@9890000 {
frame-number = <3>;
interrupts = <0 34 0x4>;
reg = <0x09890000 0x1000>;
status = "disabled";
};
- frame@098a0000 {
+ frame@98a0000 {
frame-number = <4>;
interrupts = <0 35 0x4>;
reg = <0x098a0000 0x1000>;
status = "disabled";
};
- frame@098b0000 {
+ frame@98b0000 {
frame-number = <5>;
interrupts = <0 36 0x4>;
reg = <0x098b0000 0x1000>;
status = "disabled";
};
- frame@098c0000 {
+ frame@98c0000 {
frame-number = <6>;
interrupts = <0 37 0x4>;
reg = <0x098c0000 0x1000>;
diff --git a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts
index e17556ac43be..f4b209cf69cc 100644
--- a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts
+++ b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts
@@ -73,11 +73,20 @@
};
};
+&sdhc_1 {
+ qcom,tsens-id = <0>;
+};
+
&sdhc_2 {
+ compatible = "qcom,late-sdhci-msm";
+
cd-gpios = <&tlmm 38 GPIO_ACTIVE_LOW>;
pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on_sbc>;
pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
&sdc2_cd_on_sbc>;
+ qcom,tsens-id = <10>;
+ qcom,disable_scaling_threshold_temp = <(-15)>;
+ qcom,enable_scaling_threshold_temp = <(-5)>;
};
&i2c_7 {
diff --git a/arch/arm/boot/dts/qcom/msm8996pro-v1.1-pmi8996.dts b/arch/arm/boot/dts/qcom/msm8996pro-v1.1-pmi8996.dts
new file mode 100644
index 000000000000..321ebb275603
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8996pro-v1.1-pmi8996.dts
@@ -0,0 +1,24 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8996pro-v1.1.dtsi"
+#include "msm-pmi8996.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM 8996pro v1.1 + PMI8996";
+ compatible = "qcom,msm8996";
+ qcom,pmic-id = <0x20009 0x10013 0x0 0x0>;
+ qcom,msm-id = <305 0x10001>;
+ qcom,pmic-name = "PMI8996";
+};
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts
index 96bc2b9d4562..9825176351e3 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts
@@ -271,6 +271,7 @@
&usb2s {
status = "okay";
+ qcom,no-wakeup-src-in-hostmode;
};
&qusb_phy1 {
diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
index 3c6672b3796b..7f5df8992008 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
@@ -97,7 +97,7 @@ int __init imx6sx_cpuidle_init(void)
* except for power up sw2iso which need to be
* larger than LDO ramp up time.
*/
- imx_gpc_set_arm_power_up_timing(2, 1);
+ imx_gpc_set_arm_power_up_timing(0xf, 1);
imx_gpc_set_arm_power_down_timing(1, 1);
return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig
index c620078b587c..ad998649d71f 100644
--- a/arch/arm64/configs/cuttlefish_defconfig
+++ b/arch/arm64/configs/cuttlefish_defconfig
@@ -179,6 +179,8 @@ CONFIG_NET_CLS_U32=y
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_U32=y
CONFIG_NET_CLS_ACT=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=y
CONFIG_CFG80211=y
# CONFIG_CFG80211_DEFAULT_PS is not set
CONFIG_MAC80211=y
@@ -353,6 +355,7 @@ CONFIG_RTC_DRV_PL031=y
CONFIG_VIRTIO_PCI=y
# CONFIG_VIRTIO_PCI_LEGACY is not set
CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_INPUT=y
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_STAGING=y
diff --git a/arch/arm64/configs/msm-auto-perf_defconfig b/arch/arm64/configs/msm-auto-perf_defconfig
index c5714c4674a7..6f282aa1044e 100644
--- a/arch/arm64/configs/msm-auto-perf_defconfig
+++ b/arch/arm64/configs/msm-auto-perf_defconfig
@@ -276,7 +276,6 @@ CONFIG_DM_CRYPT=y
CONFIG_DM_REQ_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
-CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE=1
CONFIG_DM_ANDROID_VERITY=y
CONFIG_NETDEVICES=y
CONFIG_BONDING=y
diff --git a/arch/arm64/configs/msm-auto_defconfig b/arch/arm64/configs/msm-auto_defconfig
index d84126bf2cfa..07a636b76d6e 100644
--- a/arch/arm64/configs/msm-auto_defconfig
+++ b/arch/arm64/configs/msm-auto_defconfig
@@ -277,7 +277,6 @@ CONFIG_DM_CRYPT=y
CONFIG_DM_REQ_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
-CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE=1
CONFIG_DM_ANDROID_VERITY=y
CONFIG_NETDEVICES=y
CONFIG_BONDING=y
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 6a889e943f4e..5385adcd157d 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -23,6 +23,8 @@
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
+#define HCR_API (UL(1) << 41)
+#define HCR_APK (UL(1) << 40)
#define HCR_ID (UL(1) << 33)
#define HCR_CD (UL(1) << 32)
#define HCR_RW_SHIFT 31
@@ -81,6 +83,7 @@
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
#define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
+#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
/* TCR_EL2 Registers bits */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index d5d4afbf0518..92bed80e7ac8 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -32,6 +32,7 @@
#include <asm/cputype.h>
#include <asm/elf.h>
#include <asm/kernel-pgtable.h>
+#include <asm/kvm_arm.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
@@ -503,7 +504,7 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
ret
/* Hyp configuration. */
-2: mov x0, #(1 << 31) // 64-bit EL1
+2: mov_q x0, HCR_HOST_NVHE_FLAGS
msr hcr_el2, x0
/* Generic timers. */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2c93e3b7d318..62c54336ab4f 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -813,6 +813,7 @@ config SIBYTE_SWARM
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SIBYTE_LITTLESUR
bool "Sibyte BCM91250C2-LittleSur"
@@ -835,6 +836,7 @@ config SIBYTE_SENTOSA
select SYS_HAS_CPU_SB1
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SIBYTE_BIGSUR
bool "Sibyte BCM91480B-BigSur"
@@ -848,6 +850,7 @@ config SIBYTE_BIGSUR
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SNI_RM
bool "SNI RM200/300/400"
@@ -3031,6 +3034,7 @@ config MIPS32_O32
config MIPS32_N32
bool "Kernel support for n32 binaries"
depends on 64BIT
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select COMPAT
select MIPS32_COMPAT
select SYSVIPC_COMPAT if SYSVIPC
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
index 37fe58c19a90..542c3ede9722 100644
--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
@@ -13,6 +13,7 @@
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
+#include "../../../../include/linux/sizes.h"
int main(int argc, char *argv[])
{
@@ -45,11 +46,11 @@ int main(int argc, char *argv[])
vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size;
/*
- * Align with 16 bytes: "greater than that used for any standard data
- * types by a MIPS compiler." -- See MIPS Run Linux (Second Edition).
+ * Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE,
+ * which may be as large as 64KB depending on the kernel configuration.
*/
- vmlinuz_load_addr += (16 - vmlinux_size % 16);
+ vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K);
printf("0x%llx\n", vmlinuz_load_addr);
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index cf661a2fb141..16fade4f49dd 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -189,6 +189,11 @@ static inline int pmd_bad(pmd_t pmd)
static inline int pmd_present(pmd_t pmd)
{
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+ return pmd_val(pmd) & _PAGE_PRESENT;
+#endif
+
return pmd_val(pmd) != (unsigned long) invalid_pte_table;
}
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index 2a5bb849b10e..288b58b00dc8 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
int irq;
struct irq_chip *msi;
- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
+ return 0;
+ } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
index b3d6bf23a662..3ef3fb658136 100644
--- a/arch/mips/sibyte/common/Makefile
+++ b/arch/mips/sibyte/common/Makefile
@@ -1,4 +1,5 @@
obj-y := cfe.o
+obj-$(CONFIG_SWIOTLB) += dma.o
obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o
obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o
obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o
diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
new file mode 100644
index 000000000000..eb47a94f3583
--- /dev/null
+++ b/arch/mips/sibyte/common/dma.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DMA support for Broadcom SiByte platforms.
+ *
+ * Copyright (c) 2018 Maciej W. Rozycki
+ */
+
+#include <linux/swiotlb.h>
+#include <asm/bootinfo.h>
+
+void __init plat_swiotlb_setup(void)
+{
+ swiotlb_init(1);
+}
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 5c2199857aa8..a3550e8f1a77 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -15,7 +15,7 @@
RELA = 7
RELACOUNT = 0x6ffffff9
- .text
+ .data
/* A procedure descriptor used when booting this as a COFF file.
* When making COFF, this comes first in the link and we're
* linked at 0x500000.
@@ -23,6 +23,8 @@ RELACOUNT = 0x6ffffff9
.globl _zimage_start_opd
_zimage_start_opd:
.long 0x500000, 0, 0, 0
+ .text
+ b _zimage_start
#ifdef __powerpc64__
.balign 8
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index 8e4a038e9ac2..fc2d30e1361d 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -191,6 +191,8 @@ CONFIG_NET_CLS_U32=y
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_U32=y
CONFIG_NET_CLS_ACT=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=y
CONFIG_CFG80211=y
CONFIG_MAC80211=y
CONFIG_RFKILL=y
@@ -223,7 +225,6 @@ CONFIG_DM_MIRROR=y
CONFIG_DM_ZERO=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
-CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE=1
CONFIG_DM_VERITY_FEC=y
CONFIG_DM_ANDROID_VERITY=y
CONFIG_NETDEVICES=y
@@ -380,6 +381,7 @@ CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_INPUT=y
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_STAGING=y
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
index 8baaff5af0b5..75b9d43069f1 100644
--- a/arch/x86/crypto/chacha20_glue.c
+++ b/arch/x86/crypto/chacha20_glue.c
@@ -77,6 +77,7 @@ static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c048d0d70cc4..2cb49ac1b2b2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1200,7 +1200,7 @@ asmlinkage void kvm_spurious_fault(void);
"cmpb $0, kvm_rebooting \n\t" \
"jne 668b \n\t" \
__ASM_SIZE(push) " $666b \n\t" \
- "call kvm_spurious_fault \n\t" \
+ "jmp kvm_spurious_fault \n\t" \
".popsection \n\t" \
_ASM_EXTABLE(666b, 667b)
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index d76f13d6d8d6..ec894bf5eeb0 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -173,6 +173,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
struct mtrr_gentry gentry;
void __user *arg = (void __user *) __arg;
+ memset(&gentry, 0, sizeof(gentry));
+
switch (cmd) {
case MTRRIOC_ADD_ENTRY:
case MTRRIOC_SET_ENTRY:
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e4b5fd72ca24..3bdb2e747b89 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6163,9 +6163,24 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
- skip_emulated_instruction(vcpu);
trace_kvm_fast_mmio(gpa);
- return 1;
+ /*
+ * Doing kvm_skip_emulated_instruction() depends on undefined
+ * behavior: Intel's manual doesn't mandate
+ * VM_EXIT_INSTRUCTION_LEN to be set in VMCS when EPT MISCONFIG
+ * occurs and while on real hardware it was observed to be set,
+ * other hypervisors (namely Hyper-V) don't set it, we end up
+ * advancing IP with some random value. Disable fast mmio when
+ * running nested and keep it for real hardware in hope that
+ * VM_EXIT_INSTRUCTION_LEN will always be set correctly.
+ */
+ if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) {
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ else
+ return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
+ NULL, 0) == EMULATE_DONE;
}
ret = handle_mmio_page_fault(vcpu, gpa, true);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index aa1a0277a678..1a934bb8ed1c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5436,7 +5436,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
* handle watchpoints yet, those would be handled in
* the emulate_ops.
*/
- if (kvm_vcpu_check_breakpoint(vcpu, &r))
+ if (!(emulation_type & EMULTYPE_SKIP) &&
+ kvm_vcpu_check_breakpoint(vcpu, &r))
return r;
ctxt->interruptibility = 0;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index f9fcdf243a0a..100701a95008 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -249,6 +249,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
limits->max_sectors = max_sectors;
+ q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index a113dc1e3eb7..f86fc869fbbb 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -213,6 +213,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
spin_lock_irq(q->queue_lock);
q->limits.max_sectors = max_sectors_kb << 1;
+ q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
spin_unlock_irq(q->queue_lock);
return ret;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index b7290c5b1eaa..5c25005ff398 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
+
+ /*
+ * RTA_OK() didn't align the rtattr's payload when validating that it
+ * fits in the buffer. Yet, the keys should start on the next 4-byte
+ * aligned boundary. To avoid confusion, require that the rtattr
+ * payload be exactly the param struct, which has a 4-byte aligned size.
+ */
+ if (RTA_PAYLOAD(rta) != sizeof(*param))
return -EINVAL;
+ BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
param = RTA_DATA(rta);
keys->enckeylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
+ key += rta->rta_len;
+ keylen -= rta->rta_len;
if (keylen < keys->enckeylen)
return -EINVAL;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index fa0c4567f697..5fdf3e532310 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -276,7 +276,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
struct aead_request *req = areq->data;
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
- aead_request_complete(req, err);
+ authenc_esn_request_complete(req, err);
}
static int crypto_authenc_esn_decrypt(struct aead_request *req)
diff --git a/crypto/cts.c b/crypto/cts.c
index e467ec0acf9f..e65688d6a4ca 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -137,8 +137,8 @@ static int crypto_cts_encrypt(struct blkcipher_desc *desc,
lcldesc.info = desc->info;
lcldesc.flags = desc->flags;
- if (tot_blocks == 1) {
- err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, bsize);
+ if (tot_blocks <= 1) {
+ err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, nbytes);
} else if (nbytes <= bsize * 2) {
err = cts_cbc_encrypt(ctx, desc, dst, src, 0, nbytes);
} else {
@@ -232,8 +232,8 @@ static int crypto_cts_decrypt(struct blkcipher_desc *desc,
lcldesc.info = desc->info;
lcldesc.flags = desc->flags;
- if (tot_blocks == 1) {
- err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, bsize);
+ if (tot_blocks <= 1) {
+ err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, nbytes);
} else if (nbytes <= bsize * 2) {
err = cts_cbc_decrypt(ctx, desc, dst, src, 0, nbytes);
} else {
diff --git a/drivers/Makefile b/drivers/Makefile
index d563f5c13544..d3f690ab5b27 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -141,6 +141,7 @@ obj-$(CONFIG_OF) += of/
obj-$(CONFIG_SSB) += ssb/
obj-$(CONFIG_BCMA) += bcma/
obj-$(CONFIG_VHOST_RING) += vhost/
+obj-$(CONFIG_VHOST) += vhost/
obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 1c2b846c5776..f28b4949cb9d 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -131,6 +131,23 @@ void acpi_power_resources_list_free(struct list_head *list)
}
}
+static bool acpi_power_resource_is_dup(union acpi_object *package,
+ unsigned int start, unsigned int i)
+{
+ acpi_handle rhandle, dup;
+ unsigned int j;
+
+ /* The caller is expected to check the package element types */
+ rhandle = package->package.elements[i].reference.handle;
+ for (j = start; j < i; j++) {
+ dup = package->package.elements[j].reference.handle;
+ if (dup == rhandle)
+ return true;
+ }
+
+ return false;
+}
+
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list)
{
@@ -150,6 +167,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
err = -ENODEV;
break;
}
+
+ /* Some ACPI tables contain duplicate power resource references */
+ if (acpi_power_resource_is_dup(package, start, i))
+ continue;
+
err = acpi_add_power_resource(rhandle);
if (err)
break;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 0346e46e2871..ecca4ae248e0 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -33,6 +33,9 @@ static struct kset *system_kset;
#define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
+#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
+ struct driver_attribute driver_attr_##_name = \
+ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
static int __must_check bus_rescan_devices_helper(struct device *dev,
void *data);
@@ -198,7 +201,7 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf,
bus_put(bus);
return err;
}
-static DRIVER_ATTR_WO(unbind);
+static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store);
/*
* Manually attach a device to a driver.
@@ -234,7 +237,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
bus_put(bus);
return err;
}
-static DRIVER_ATTR_WO(bind);
+static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store);
static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
{
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 7eac581912f0..599e31899a5d 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -81,7 +81,7 @@
#include <asm/uaccess.h>
static DEFINE_IDR(loop_index_idr);
-static DEFINE_MUTEX(loop_index_mutex);
+static DEFINE_MUTEX(loop_ctl_mutex);
static int max_part;
static int part_shift;
@@ -1044,7 +1044,7 @@ static int loop_clr_fd(struct loop_device *lo)
*/
if (atomic_read(&lo->lo_refcnt) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
return 0;
}
@@ -1094,12 +1094,12 @@ static int loop_clr_fd(struct loop_device *lo)
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
loop_unprepare_queue(lo);
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
/*
- * Need not hold lo_ctl_mutex to fput backing file.
- * Calling fput holding lo_ctl_mutex triggers a circular
+ * Need not hold loop_ctl_mutex to fput backing file.
+ * Calling fput holding loop_ctl_mutex triggers a circular
* lock dependency possibility warning as fput can take
- * bd_mutex which is usually taken before lo_ctl_mutex.
+ * bd_mutex which is usually taken before loop_ctl_mutex.
*/
fput(filp);
return 0;
@@ -1121,6 +1121,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
return -EINVAL;
+ if (lo->lo_offset != info->lo_offset ||
+ lo->lo_sizelimit != info->lo_sizelimit) {
+ sync_blockdev(lo->lo_device);
+ kill_bdev(lo->lo_device);
+ }
+
/* I/O need to be drained during transfer transition */
blk_mq_freeze_queue(lo->lo_queue);
@@ -1148,11 +1154,20 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
goto exit;
if (lo->lo_offset != info->lo_offset ||
- lo->lo_sizelimit != info->lo_sizelimit)
+ lo->lo_sizelimit != info->lo_sizelimit) {
+ /* kill_bdev should have truncated all the pages */
+ if (lo->lo_device->bd_inode->i_mapping->nrpages) {
+ err = -EAGAIN;
+ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+ __func__, lo->lo_number, lo->lo_file_name,
+ lo->lo_device->bd_inode->i_mapping->nrpages);
+ goto exit;
+ }
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
err = -EFBIG;
goto exit;
}
+ }
loop_config_discard(lo);
@@ -1358,20 +1373,37 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
{
+ int err = 0;
+
if (lo->lo_state != Lo_bound)
return -ENXIO;
if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
return -EINVAL;
+ if (lo->lo_queue->limits.logical_block_size != arg) {
+ sync_blockdev(lo->lo_device);
+ kill_bdev(lo->lo_device);
+ }
+
blk_mq_freeze_queue(lo->lo_queue);
+ /* kill_bdev should have truncated all the pages */
+ if (lo->lo_queue->limits.logical_block_size != arg &&
+ lo->lo_device->bd_inode->i_mapping->nrpages) {
+ err = -EAGAIN;
+ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+ __func__, lo->lo_number, lo->lo_file_name,
+ lo->lo_device->bd_inode->i_mapping->nrpages);
+ goto out_unfreeze;
+ }
+
blk_queue_logical_block_size(lo->lo_queue, arg);
loop_update_dio(lo);
-
+out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue);
- return 0;
+ return err;
}
static int lo_ioctl(struct block_device *bdev, fmode_t mode,
@@ -1380,7 +1412,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
struct loop_device *lo = bdev->bd_disk->private_data;
int err;
- mutex_lock_nested(&lo->lo_ctl_mutex, 1);
+ mutex_lock_nested(&loop_ctl_mutex, 1);
switch (cmd) {
case LOOP_SET_FD:
err = loop_set_fd(lo, mode, bdev, arg);
@@ -1389,7 +1421,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
err = loop_change_fd(lo, bdev, arg);
break;
case LOOP_CLR_FD:
- /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
+ /* loop_clr_fd would have unlocked loop_ctl_mutex on success */
err = loop_clr_fd(lo);
if (!err)
goto out_unlocked;
@@ -1430,7 +1462,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
default:
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
}
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
out_unlocked:
return err;
@@ -1563,16 +1595,16 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
switch(cmd) {
case LOOP_SET_STATUS:
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock(&loop_ctl_mutex);
err = loop_set_status_compat(
lo, (const struct compat_loop_info __user *) arg);
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
break;
case LOOP_GET_STATUS:
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock(&loop_ctl_mutex);
err = loop_get_status_compat(
lo, (struct compat_loop_info __user *) arg);
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
break;
case LOOP_SET_CAPACITY:
case LOOP_CLR_FD:
@@ -1595,9 +1627,11 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
static int lo_open(struct block_device *bdev, fmode_t mode)
{
struct loop_device *lo;
- int err = 0;
+ int err;
- mutex_lock(&loop_index_mutex);
+ err = mutex_lock_killable(&loop_ctl_mutex);
+ if (err)
+ return err;
lo = bdev->bd_disk->private_data;
if (!lo) {
err = -ENXIO;
@@ -1606,18 +1640,20 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&lo->lo_refcnt);
out:
- mutex_unlock(&loop_index_mutex);
+ mutex_unlock(&loop_ctl_mutex);
return err;
}
-static void __lo_release(struct loop_device *lo)
+static void lo_release(struct gendisk *disk, fmode_t mode)
{
+ struct loop_device *lo;
int err;
+ mutex_lock(&loop_ctl_mutex);
+ lo = disk->private_data;
if (atomic_dec_return(&lo->lo_refcnt))
- return;
+ goto out_unlock;
- mutex_lock(&lo->lo_ctl_mutex);
if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
/*
* In autoclear mode, stop the loop thread
@@ -1634,14 +1670,8 @@ static void __lo_release(struct loop_device *lo)
loop_flush(lo);
}
- mutex_unlock(&lo->lo_ctl_mutex);
-}
-
-static void lo_release(struct gendisk *disk, fmode_t mode)
-{
- mutex_lock(&loop_index_mutex);
- __lo_release(disk->private_data);
- mutex_unlock(&loop_index_mutex);
+out_unlock:
+ mutex_unlock(&loop_ctl_mutex);
}
static const struct block_device_operations lo_fops = {
@@ -1680,10 +1710,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
struct loop_device *lo = ptr;
struct loop_func_table *xfer = data;
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock(&loop_ctl_mutex);
if (lo->lo_encryption == xfer)
loop_release_xfer(lo);
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
return 0;
}
@@ -1846,7 +1876,6 @@ static int loop_add(struct loop_device **l, int i)
if (!part_shift)
disk->flags |= GENHD_FL_NO_PART_SCAN;
disk->flags |= GENHD_FL_EXT_DEVT;
- mutex_init(&lo->lo_ctl_mutex);
atomic_set(&lo->lo_refcnt, 0);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
@@ -1925,7 +1954,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
struct kobject *kobj;
int err;
- mutex_lock(&loop_index_mutex);
+ mutex_lock(&loop_ctl_mutex);
err = loop_lookup(&lo, MINOR(dev) >> part_shift);
if (err < 0)
err = loop_add(&lo, MINOR(dev) >> part_shift);
@@ -1933,7 +1962,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
kobj = NULL;
else
kobj = get_disk(lo->lo_disk);
- mutex_unlock(&loop_index_mutex);
+ mutex_unlock(&loop_ctl_mutex);
*part = 0;
return kobj;
@@ -1943,9 +1972,13 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
unsigned long parm)
{
struct loop_device *lo;
- int ret = -ENOSYS;
+ int ret;
+
+ ret = mutex_lock_killable(&loop_ctl_mutex);
+ if (ret)
+ return ret;
- mutex_lock(&loop_index_mutex);
+ ret = -ENOSYS;
switch (cmd) {
case LOOP_CTL_ADD:
ret = loop_lookup(&lo, parm);
@@ -1959,19 +1992,15 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
ret = loop_lookup(&lo, parm);
if (ret < 0)
break;
- mutex_lock(&lo->lo_ctl_mutex);
if (lo->lo_state != Lo_unbound) {
ret = -EBUSY;
- mutex_unlock(&lo->lo_ctl_mutex);
break;
}
if (atomic_read(&lo->lo_refcnt) > 0) {
ret = -EBUSY;
- mutex_unlock(&lo->lo_ctl_mutex);
break;
}
lo->lo_disk->private_data = NULL;
- mutex_unlock(&lo->lo_ctl_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
loop_remove(lo);
break;
@@ -1981,7 +2010,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
break;
ret = loop_add(&lo, -1);
}
- mutex_unlock(&loop_index_mutex);
+ mutex_unlock(&loop_ctl_mutex);
return ret;
}
@@ -2064,10 +2093,10 @@ static int __init loop_init(void)
THIS_MODULE, loop_probe, NULL, NULL);
/* pre-create number of devices given by config or max_loop */
- mutex_lock(&loop_index_mutex);
+ mutex_lock(&loop_ctl_mutex);
for (i = 0; i < nr; i++)
loop_add(&lo, i);
- mutex_unlock(&loop_index_mutex);
+ mutex_unlock(&loop_ctl_mutex);
printk(KERN_INFO "loop: module loaded\n");
return 0;
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 60f0fd2c0c65..a923e74495ce 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -55,7 +55,6 @@ struct loop_device {
spinlock_t lo_lock;
int lo_state;
- struct mutex lo_ctl_mutex;
struct kthread_worker worker;
struct task_struct *worker_task;
bool use_dio;
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 7a2e23d6bfdd..b2da2382d544 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -637,8 +637,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
/* Remove the multi-part read marker. */
len -= 2;
+ data += 2;
for (i = 0; i < len; i++)
- ssif_info->data[i] = data[i+2];
+ ssif_info->data[i] = data[i];
ssif_info->multi_len = len;
ssif_info->multi_pos = 1;
@@ -666,8 +667,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
blocknum = data[0];
+ len--;
+ data++;
+
+ if (blocknum != 0xff && len != 31) {
+ /* All blocks but the last must have 31 data bytes. */
+ result = -EIO;
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info("Received middle message <31\n");
- if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
+ goto continue_op;
+ }
+
+ if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
/* Received message too big, abort the operation. */
result = -E2BIG;
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
@@ -676,16 +688,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
goto continue_op;
}
- /* Remove the blocknum from the data. */
- len--;
for (i = 0; i < len; i++)
- ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
+ ssif_info->data[i + ssif_info->multi_len] = data[i];
ssif_info->multi_len += len;
if (blocknum == 0xff) {
/* End of read */
len = ssif_info->multi_len;
data = ssif_info->data;
- } else if (blocknum + 1 != ssif_info->multi_pos) {
+ } else if (blocknum != ssif_info->multi_pos) {
/*
* Out of sequence block, just abort. Block
* numbers start at zero for the second block,
@@ -713,6 +723,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
}
+ continue_op:
if (result < 0) {
ssif_inc_stat(ssif_info, receive_errors);
} else {
@@ -720,8 +731,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
ssif_inc_stat(ssif_info, received_message_parts);
}
-
- continue_op:
if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
pr_info(PFX "DONE 1: state = %d, result=%d.\n",
ssif_info->ssif_state, result);
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index a0df83e6b84b..46c05c9a9354 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -239,8 +239,12 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
* lvds1_gate and lvds2_gate are pseudo-gates. Both can be
* independently configured as clock inputs or outputs. We treat
* the "output_enable" bit as a gate, even though it's really just
- * enabling clock output.
+ * enabling clock output. Initially the gate bits are cleared, as
+ * otherwise the exclusive configuration gets locked in the setup done
+ * by software running before the clock driver, with no way to change
+ * it.
*/
+ writel(readl(base + 0x160) & ~0x3c00, base + 0x160);
clk[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12));
clk[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13));
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 07135e009d8b..601a6c3acc7f 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -240,7 +240,13 @@ static int pseries_idle_probe(void)
return -ENODEV;
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
- if (lppaca_shared_proc(get_lppaca())) {
+ /*
+ * Use local_paca instead of get_lppaca() since
+ * preemption is not disabled, and it is not required in
+ * fact, since lppaca_ptr does not need to be the value
+ * associated to the current CPU, it can be from any CPU.
+ */
+ if (lppaca_shared_proc(local_paca->lppaca_ptr)) {
cpuidle_state_table = shared_states;
max_idle_state = ARRAY_SIZE(shared_states);
} else {
diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c
index 05813fbf3daf..647dfbbc4e1c 100644
--- a/drivers/gpio/gpio-max7301.c
+++ b/drivers/gpio/gpio-max7301.c
@@ -25,7 +25,7 @@ static int max7301_spi_write(struct device *dev, unsigned int reg,
struct spi_device *spi = to_spi_device(dev);
u16 word = ((reg & 0x7F) << 8) | (val & 0xFF);
- return spi_write(spi, (const u8 *)&word, sizeof(word));
+ return spi_write_then_read(spi, &word, sizeof(word), NULL, 0);
}
/* A read from the MAX7301 means two transfers; here, one message each */
@@ -37,14 +37,8 @@ static int max7301_spi_read(struct device *dev, unsigned int reg)
struct spi_device *spi = to_spi_device(dev);
word = 0x8000 | (reg << 8);
- ret = spi_write(spi, (const u8 *)&word, sizeof(word));
- if (ret)
- return ret;
- /*
- * This relies on the fact, that a transfer with NULL tx_buf shifts out
- * zero bytes (=NOOP for MAX7301)
- */
- ret = spi_read(spi, (u8 *)&word, sizeof(word));
+ ret = spi_write_then_read(spi, &word, sizeof(word), &word,
+ sizeof(word));
if (ret)
return ret;
return word & 0xff;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 5ad036741b99..e449f22c8f29 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1109,9 +1109,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct drm_framebuffer *fb = fb_helper->fb;
int depth;
- if (var->pixclock != 0 || in_dbg_master())
+ if (in_dbg_master())
return -EINVAL;
+ if (var->pixclock != 0) {
+ DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
+ var->pixclock = 0;
+ }
+
/* Need to resize the fb object !!! */
if (var->bits_per_pixel > fb->bits_per_pixel ||
var->xres > fb->width || var->yres > fb->height ||
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ebb7e1d1778c..b31c02783d69 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -36,6 +36,7 @@
#include <linux/pci.h>
#include <linux/export.h>
+#include <linux/nospec.h>
static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -705,13 +706,17 @@ long drm_ioctl(struct file *filp,
if (is_driver_ioctl) {
/* driver ioctl */
- if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
+ unsigned int index = nr - DRM_COMMAND_BASE;
+
+ if (index >= dev->driver->num_ioctls)
goto err_i1;
- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ index = array_index_nospec(index, dev->driver->num_ioctls);
+ ioctl = &dev->driver->ioctls[index];
} else {
/* core ioctl */
if (nr >= DRM_CORE_IOCTL_COUNT)
goto err_i1;
+ nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
ioctl = &drm_ioctls[nr];
}
@@ -813,6 +818,7 @@ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
if (nr >= DRM_CORE_IOCTL_COUNT)
return false;
+ nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
*flags = drm_ioctls[nr].flags;
return true;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index cf95a8b9b68d..d2240c53edd3 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -28,6 +28,16 @@ config DRM_MSM_REGISTER_LOGGING
that can be parsed by envytools demsm tool. If enabled, register
logging can be switched on via msm.reglog=y module param.
+config DRM_MSM_EARLY_CARD
+ bool "Enable Early DRM in MSM DRM driver"
+ depends on DRM_MSM
+ default y
+ help
+ Choose this option if one wants to enable Early DRM driver
+ for MSM/snapdragon. Early DRM will create one DRI card to
+ support early application. One should also check device tree
+ to assign proper display resources to early DRM
+
config DRM_MSM_DSI
bool "Enable DSI support in MSM DRM driver"
depends on DRM_MSM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 6edbca08536f..dd721cd8b0e6 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -5,6 +5,7 @@ ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android
ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
ccflags-y += -Idrivers/gpu/drm/msm/sde
+ccflags-$(CONFIG_DRM_MSM_EARLY_CARD) += -Idrivers/gpu/drm/msm/ekms
msm_drm-y := \
hdmi/hdmi.o \
@@ -57,6 +58,14 @@ msm_drm-y := \
sde_edid_parser.o \
sde_hdcp_1x.o
+msm_drm-$(CONFIG_DRM_MSM_EARLY_CARD) += ekms/edrm_kms.o \
+ ekms/edrm_plane.o \
+ ekms/edrm_encoder.o \
+ ekms/edrm_connector.o \
+ ekms/edrm_crtc.o \
+ ekms/edrm_drv.o \
+ ekms/edrm_splash.o
+
# use drm gpu driver only if qcom_kgsl driver not available
ifneq ($(CONFIG_QCOM_KGSL),y)
msm_drm-y += adreno/adreno_device.o \
diff --git a/drivers/gpu/drm/msm/ekms/edrm_connector.c b/drivers/gpu/drm/msm/ekms/edrm_connector.c
new file mode 100644
index 000000000000..8beaa598aa5e
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_connector.c
@@ -0,0 +1,123 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "edrm_connector.h"
+
+struct edrm_connector {
+ struct drm_connector base;
+ struct drm_encoder *encoder;
+ struct msm_edrm_display *display;
+};
+
+#define to_edrm_connector(x) container_of(x, struct edrm_connector, base)
+
+static enum drm_connector_status
+edrm_connector_detect(struct drm_connector *conn, bool force)
+{
+ return connector_status_connected;
+}
+
+static int
+edrm_connector_get_modes(struct drm_connector *connector)
+{
+ struct edrm_connector *edrm_conn = to_edrm_connector(connector);
+ struct drm_display_mode *m;
+
+ m = drm_mode_duplicate(connector->dev, &edrm_conn->display->mode);
+ drm_mode_set_name(m);
+ drm_mode_probed_add(connector, m);
+
+ return 1;
+}
+
+static enum drm_mode_status
+edrm_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+edrm_connector_best_encoder(struct drm_connector *connector)
+{
+ struct edrm_connector *edrm_conn = to_edrm_connector(connector);
+
+ return edrm_conn->encoder;
+}
+
+void edrm_connector_destroy(struct drm_connector *connector)
+{
+ struct edrm_connector *edrm_conn = to_edrm_connector(connector);
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(edrm_conn);
+}
+
+static const struct drm_connector_helper_funcs edrm_connector_helper_funcs = {
+ .get_modes = edrm_connector_get_modes,
+ .mode_valid = edrm_mode_valid,
+ .best_encoder = edrm_connector_best_encoder,
+};
+
+static const struct drm_connector_funcs edrm_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = edrm_connector_detect,
+ .destroy = edrm_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+struct drm_connector *edrm_connector_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ struct msm_edrm_display *display)
+{
+ struct edrm_connector *edrm_conn;
+ struct drm_connector *connector;
+ int ret;
+
+ edrm_conn = kzalloc(sizeof(*edrm_conn), GFP_KERNEL);
+ if (!edrm_conn)
+ return ERR_PTR(-ENOMEM);
+ connector = &edrm_conn->base;
+
+ ret = drm_connector_init(dev, connector,
+ &edrm_connector_funcs,
+ display->connector_type);
+ if (ret) {
+ pr_err("edrm drm_connector_init failed\n");
+ goto fail;
+ }
+
+ drm_connector_helper_add(connector, &edrm_connector_helper_funcs);
+
+ edrm_conn->display = display;
+ edrm_conn->encoder = encoder;
+
+ ret = drm_connector_register(&edrm_conn->base);
+ if (ret) {
+ pr_err("failed to register drm connector, %d\n", ret);
+ goto fail;
+ }
+
+ ret = drm_mode_connector_attach_encoder(&edrm_conn->base, encoder);
+ if (ret) {
+ pr_err("failed to attach encoder to connector, %d\n", ret);
+ goto fail;
+ }
+
+ return connector;
+fail:
+ kfree(edrm_conn);
+ return ERR_PTR(ret);
+
+}
diff --git a/drivers/gpu/drm/msm/ekms/edrm_connector.h b/drivers/gpu/drm/msm/ekms/edrm_connector.h
new file mode 100644
index 000000000000..4bd6deb7b6d0
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_connector.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _EDRM_CONNECTOR_H_
+#define _EDRM_CONNECTOR_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "edrm_kms.h"
+
+struct drm_connector *edrm_connector_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ struct msm_edrm_display *display);
+
+void edrm_connector_destroy(struct drm_connector *connector);
+
+#endif /* _EDRM_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/ekms/edrm_crtc.c b/drivers/gpu/drm/msm/ekms/edrm_crtc.c
new file mode 100644
index 000000000000..b39ec94dc69c
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_crtc.c
@@ -0,0 +1,263 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "edrm_crtc.h"
+#include "edrm_plane.h"
+#include "edrm_encoder.h"
+#include "sde_kms.h"
+
+/* display control path Flush register offset */
+#define FLUSH_OFFSET 0x18
+#define SSPP_SRC_FORMAT 0x30
+#define SSPP_SRC_UNPACK_PATTERN 0x34
+#define SSPP_SRC_OP_MODE 0x38
+#define SSPP_CONSTANT_COLOR 0x3c
+#define LAYER_BLEND5_OP 0x260
+#define FLUST_CTL_BIT 17
+#define LAYER_OP_ENABLE_ALPHA_BLEND 0x600
+
+static void edrm_crtc_plane_attach(struct drm_crtc *crtc,
+ struct drm_plane *plane)
+{
+ struct drm_device *dev = crtc->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_edrm_kms *edrm_kms = to_edrm_kms(kms);
+ struct msm_drm_private *master_priv =
+ edrm_kms->master_dev->dev_private;
+ struct sde_kms *master_kms = to_sde_kms(master_priv->kms);
+ u32 layer_val, ctl_off, lm_idx;
+ struct edrm_plane *edrm_plane = to_edrm_plane(plane);
+ struct edrm_crtc *edrm_crtc = to_edrm_crtc(crtc);
+ struct msm_edrm_display *display;
+
+ display = &edrm_kms->display[edrm_crtc->display_id];
+ ctl_off = display->ctl_off;
+ lm_idx = (display->ctl_id - 1) * 0x4;
+
+ layer_val = readl_relaxed(master_kms->mmio + ctl_off + lm_idx);
+ switch (edrm_plane->sspp_cfg_id) {
+ case 1: /* vig 0 */
+ layer_val |= edrm_plane->lm_stage + 2;
+ break;
+ case 2: /* vig 1 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 3;
+ break;
+ case 3: /* vig 2 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 6;
+ break;
+ case 4: /* vig 3 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 26;
+ break;
+ case 5: /* rgb 0 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 9;
+ break;
+ case 6: /* rgb 1 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 12;
+ break;
+ case 7: /* rgb 2 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 15;
+ break;
+ case 8: /* rgb 3 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 29;
+ break;
+ case 9: /* dma 0 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 18;
+ break;
+ case 10: /* dma 1 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 21;
+ break;
+ }
+ writel_relaxed(layer_val, master_kms->mmio + ctl_off + lm_idx);
+ plane->crtc = crtc;
+}
+
+void edrm_crtc_postinit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_edrm_kms *edrm_kms;
+ struct sde_kms *master_kms;
+ struct msm_drm_private *master_priv;
+ struct msm_edrm_display *display;
+ struct edrm_crtc *edrm_crtc;
+ struct edrm_plane *edrm_plane;
+ u32 lm_off, flush_val;
+ const struct drm_plane_helper_funcs *funcs;
+ u32 sspp_flush_mask_bit[10] = {
+ 0, 1, 2, 18, 3, 4, 5, 19, 11, 12};
+
+ edrm_kms = to_edrm_kms(kms);
+ master_priv = edrm_kms->master_dev->dev_private;
+ master_kms = to_sde_kms(master_priv->kms);
+ edrm_plane = to_edrm_plane(crtc->primary);
+ edrm_crtc = to_edrm_crtc(crtc);
+ funcs = crtc->primary->helper_private;
+ funcs->atomic_disable(crtc->primary, crtc->primary->state);
+ display = &edrm_kms->display[edrm_crtc->display_id];
+ lm_off = display->lm_off;
+
+ edrm_crtc_plane_attach(crtc, crtc->primary);
+
+ /* Update CTL bit, layer mixer flush bit and sspp flush bit */
+ flush_val = BIT(FLUST_CTL_BIT);
+ flush_val |= BIT(display->ctl_id + 5);
+ flush_val |= BIT(sspp_flush_mask_bit[edrm_plane->sspp_cfg_id - 1]);
+
+ /* setup alpha blending for mixer stage 5 */
+ writel_relaxed(LAYER_OP_ENABLE_ALPHA_BLEND, master_kms->mmio + lm_off +
+ LAYER_BLEND5_OP);
+ edrm_crtc->sspp_flush_mask |= flush_val;
+
+ edrm_crtc_commit_kickoff(crtc);
+}
+
+static void edrm_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct drm_plane *plane = NULL;
+
+ if (!crtc) {
+ pr_err("invalid crtc\n");
+ return;
+ }
+
+ /* TODO: wait for acquire fences before anything else is done */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ /* update SSPP bit in sspp_flush_mask */
+ edrm_plane_flush(plane);
+ }
+}
+
+static void edrm_crtc_enable(struct drm_crtc *crtc)
+{
+ crtc->state->enable = true;
+}
+
+static void edrm_crtc_disable(struct drm_crtc *crtc)
+{
+ struct edrm_plane *edrm_plane;
+ struct edrm_crtc *edrm_crtc = to_edrm_crtc(crtc);
+ const struct drm_plane_helper_funcs *funcs;
+ u32 sspp_flush_mask_bit[10] = {
+ 0, 1, 2, 18, 3, 4, 5, 19, 11, 12};
+
+ edrm_plane = to_edrm_plane(crtc->primary);
+ funcs = crtc->primary->helper_private;
+ funcs->atomic_disable(crtc->primary, crtc->primary->state);
+
+ edrm_crtc->sspp_flush_mask |=
+ BIT(sspp_flush_mask_bit[edrm_plane->sspp_cfg_id - 1]);
+ edrm_crtc_commit_kickoff(crtc);
+}
+
+void edrm_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct edrm_crtc *edrm_crtc = to_edrm_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(edrm_crtc);
+}
+
+static const struct drm_crtc_funcs edrm_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = edrm_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_crtc_helper_funcs edrm_crtc_helper_funcs = {
+ .disable = edrm_crtc_disable,
+ .enable = edrm_crtc_enable,
+ .atomic_flush = edrm_crtc_atomic_flush,
+};
+
+struct drm_crtc *edrm_crtc_init(struct drm_device *dev,
+ struct msm_edrm_display *display,
+ struct drm_plane *primary_plane)
+{
+ struct edrm_crtc *edrm_crtc;
+ struct drm_crtc *crtc;
+ int ret;
+
+ edrm_crtc = kzalloc(sizeof(*edrm_crtc), GFP_KERNEL);
+ if (!edrm_crtc) {
+ ret = -ENOMEM;
+ goto fail_no_mem;
+ }
+
+ crtc = &edrm_crtc->base;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &edrm_crtc_funcs);
+ if (ret)
+ goto fail;
+
+ drm_crtc_helper_add(crtc, &edrm_crtc_helper_funcs);
+ edrm_crtc->display_id = display->display_id;
+
+ return crtc;
+fail:
+ kfree(edrm_crtc);
+fail_no_mem:
+ return ERR_PTR(ret);
+}
+
+void edrm_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ struct msm_edrm_kms *edrm_kms;
+ struct msm_edrm_display *display;
+ struct edrm_crtc *edrm_crtc;
+ struct sde_kms *master_kms;
+ struct msm_drm_private *master_priv;
+ u32 ctl_off;
+
+ dev = crtc->dev;
+ priv = dev->dev_private;
+ edrm_kms = to_edrm_kms(priv->kms);
+ master_priv = edrm_kms->master_dev->dev_private;
+ master_kms = to_sde_kms(master_priv->kms);
+ edrm_crtc = to_edrm_crtc(crtc);
+
+ display = &edrm_kms->display[edrm_crtc->display_id];
+ ctl_off = display->ctl_off;
+
+ /* Trigger the flush */
+ writel_relaxed(edrm_crtc->sspp_flush_mask, master_kms->mmio + ctl_off +
+ FLUSH_OFFSET);
+}
+
+void edrm_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ struct drm_encoder *encoder;
+
+ dev = crtc->dev;
+ priv = dev->dev_private;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ edrm_encoder_wait_for_commit_done(encoder);
+ }
+}
+
+void edrm_crtc_prepare_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+}
diff --git a/drivers/gpu/drm/msm/ekms/edrm_crtc.h b/drivers/gpu/drm/msm/ekms/edrm_crtc.h
new file mode 100644
index 000000000000..761a6a97e5b2
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_crtc.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _EDRM_CRTC_H_
+#define _EDRM_CRTC_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "edrm_kms.h"
+
+struct edrm_crtc {
+ struct drm_crtc base;
+ u32 sspp_flush_mask;
+ int display_id;
+};
+
+#define to_edrm_crtc(x) container_of(x, struct edrm_crtc, base)
+
+struct drm_crtc *edrm_crtc_init(struct drm_device *dev,
+ struct msm_edrm_display *display, struct drm_plane *primary_plane);
+
+/**
+ * Helper function to setup the control path
+ * @crtc: Pointer to drm crtc object
+ */
+void edrm_crtc_postinit(struct drm_crtc *crtc);
+
+/**
+ * edrm_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void edrm_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * edrm_crtc_complete_commit - callback to prepare for output fences
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void edrm_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state);
+
+void edrm_crtc_prepare_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state);
+
+/**
+ * edrm_crtc_destroy - free up edrm_crtc structure
+ * @crtc: Pointer to drm crtc object
+ */
+void edrm_crtc_destroy(struct drm_crtc *crtc);
+
+#endif /* _EDRM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/msm/ekms/edrm_drv.c b/drivers/gpu/drm/msm/ekms/edrm_drv.c
new file mode 100644
index 000000000000..69b8c01e59d4
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_drv.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/of_address.h>
+#include <linux/sde_io_util.h>
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "edrm_kms.h"
+
+static int msm_edrm_unload(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ int i;
+
+ /* clean up display commit worker threads */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ if (priv->disp_thread[i].thread) {
+ flush_kthread_worker(&priv->disp_thread[i].worker);
+ kthread_stop(priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+ }
+
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+
+ if (kms)
+ kms->funcs->destroy(kms);
+
+ dev->dev_private = NULL;
+
+ kfree(priv);
+
+ return 0;
+}
+
+static int msm_edrm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+ struct drm_device *master_dev;
+ struct msm_drm_private *master_priv;
+ struct drm_minor *minor;
+ int ret, i;
+ struct sched_param param;
+
+ /* main DRM's minor ID is zero */
+ minor = drm_minor_acquire(0);
+ if (IS_ERR(minor)) {
+ pr_err("master drm_minor has no dev, stop early drm loading\n");
+ return -ENODEV;
+ }
+ master_dev = minor->dev;
+ drm_minor_release(minor);
+ master_priv = master_dev->dev_private;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("msm_edrm", 0);
+ init_waitqueue_head(&priv->fence_event);
+ init_waitqueue_head(&priv->pending_crtcs_event);
+ INIT_LIST_HEAD(&priv->client_event_list);
+ INIT_LIST_HEAD(&priv->inactive_list);
+ INIT_LIST_HEAD(&priv->fence_cbs);
+ hash_init(priv->mn_hash);
+ mutex_init(&priv->mn_lock);
+
+ drm_mode_config_init(dev);
+
+ platform_set_drvdata(pdev, dev);
+ priv->pclient = master_priv->pclient;
+ memcpy((void *)&priv->phandle.mp, (void *) &master_priv->phandle.mp,
+ sizeof(struct dss_module_power));
+ INIT_LIST_HEAD(&priv->phandle.power_client_clist);
+ mutex_init(&priv->phandle.phandle_lock);
+
+ priv->vram.size = 0;
+ kms = msm_edrm_kms_init(dev);
+ if (IS_ERR(kms)) {
+ priv->kms = NULL;
+ dev_err(dev->dev, "failed to load kms\n");
+ ret = PTR_ERR(kms);
+ goto fail;
+ }
+
+ priv->kms = kms;
+ if (kms && kms->funcs && kms->funcs->hw_init) {
+ ret = kms->funcs->hw_init(kms);
+ if (ret) {
+ dev_err(dev->dev, "kms hw init failed: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ /**
+ * this priority was found during empiric testing to have appropriate
+ * realtime scheduling to process display updates and interact with
+ * other real time and normal priority task
+ */
+ param.sched_priority = 16;
+ /* initialize commit thread structure */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ init_kthread_worker(&priv->disp_thread[i].worker);
+ priv->disp_thread[i].dev = dev;
+ priv->disp_thread[i].thread =
+ kthread_run(kthread_worker_fn,
+ &priv->disp_thread[i].worker,
+ "crtc_commit:%d",
+ priv->disp_thread[i].crtc_id);
+ ret = sched_setscheduler(priv->disp_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ pr_warn("display thread priority update failed: %d\n",
+ ret);
+
+ if (IS_ERR(priv->disp_thread[i].thread)) {
+ dev_err(dev->dev, "failed to create kthread\n");
+ priv->disp_thread[i].thread = NULL;
+ /* clean up previously created threads if any */
+ for (i -= 1; i >= 0; i--) {
+ kthread_stop(priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+ goto fail;
+ }
+ }
+
+ /* share same function from master drm */
+ dev->mode_config.funcs = master_dev->mode_config.funcs;
+
+ ret = drm_vblank_init(dev, priv->num_crtcs);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize vblank\n");
+ goto fail;
+ }
+
+ drm_mode_config_reset(dev);
+ /* perform subdriver post initialization */
+ if (kms && kms->funcs && kms->funcs->postinit) {
+ ret = kms->funcs->postinit(kms);
+ if (ret) {
+ dev_err(dev->dev, "kms post init failed: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ drm_kms_helper_poll_init(dev);
+ return 0;
+
+fail:
+ msm_edrm_unload(dev);
+ return ret;
+}
+
+static int msm_edrm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_file_private *ctx = NULL;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+
+ if (!dev || !dev->dev_private)
+ return -ENODEV;
+ priv = dev->dev_private;
+
+ file->driver_priv = ctx;
+ kms = priv->kms;
+
+ if (kms) {
+ struct msm_edrm_kms *edrm_kms;
+
+ edrm_kms = to_edrm_kms(kms);
+ /* return failure if eDRM already handoff display resource
+ * to main DRM
+ */
+ if (edrm_kms->handoff_flag)
+ return -ENODEV;
+ }
+
+ if (kms && kms->funcs && kms->funcs->postopen)
+ kms->funcs->postopen(kms, file);
+
+ return 0;
+}
+
+static void msm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ if (kms && kms->funcs && kms->funcs->preclose)
+ kms->funcs->preclose(kms, file);
+}
+
+static void msm_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_kms *kms = priv->kms;
+
+ if (kms && kms->funcs && kms->funcs->postclose)
+ kms->funcs->postclose(kms, file);
+
+ if (!ctx)
+ return;
+
+ kfree(ctx);
+}
+
+static void msm_lastclose(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ struct msm_kms *kms = priv->kms;
+
+ /* wait for pending vblank requests to be executed by worker thread */
+ flush_workqueue(priv->wq);
+
+ if (kms && kms->funcs && kms->funcs->lastclose)
+ kms->funcs->lastclose(kms);
+}
+
+static int msm_edrm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ return 0;
+}
+
+static void msm_edrm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+}
+
+
+static const struct vm_operations_struct vm_ops = {
+ .fault = msm_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = msm_gem_mmap,
+};
+
+static struct drm_driver msm_edrm_driver = {
+ .driver_features = DRIVER_HAVE_IRQ |
+ DRIVER_GEM |
+ DRIVER_PRIME |
+ DRIVER_RENDER |
+ DRIVER_ATOMIC |
+ DRIVER_MODESET,
+ .load = msm_edrm_load,
+ .unload = msm_edrm_unload,
+ .open = msm_edrm_open,
+ .preclose = msm_preclose,
+ .postclose = msm_postclose,
+ .lastclose = msm_lastclose,
+ .set_busid = drm_platform_set_busid,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = msm_edrm_enable_vblank,
+ .disable_vblank = msm_edrm_disable_vblank,
+ .gem_free_object = msm_gem_free_object,
+ .gem_vm_ops = &vm_ops,
+ .dumb_create = msm_gem_dumb_create,
+ .dumb_map_offset = msm_gem_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_res_obj = msm_gem_prime_res_obj,
+ .gem_prime_pin = msm_gem_prime_pin,
+ .gem_prime_unpin = msm_gem_prime_unpin,
+ .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
+ .gem_prime_vmap = msm_gem_prime_vmap,
+ .gem_prime_vunmap = msm_gem_prime_vunmap,
+ .gem_prime_mmap = msm_gem_prime_mmap,
+
+ .ioctls = NULL,
+ .num_ioctls = 0,
+ .fops = &fops,
+ .name = "msm",
+ .desc = "MSM Snapdragon DRM",
+ .date = "20181024",
+ .major = 1,
+ .minor = 1,
+};
+
+static int msm_pdev_edrm_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct drm_minor *minor;
+ struct drm_device *master_dev;
+ struct msm_drm_private *master_priv;
+ struct msm_kms *master_kms;
+
+ /* main DRM's minor ID is zero */
+ minor = drm_minor_acquire(0);
+ if (IS_ERR(minor)) {
+ pr_err("drm_minor has no dev, defer the probe\n");
+ return -EPROBE_DEFER;
+ }
+ master_dev = minor->dev;
+ drm_minor_release(minor);
+ if (!master_dev) {
+ pr_err("master_dev is null, defer the probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ master_priv = master_dev->dev_private;
+ if (!master_priv) {
+ pr_err("master_priv is null, defer the probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ master_kms = master_priv->kms;
+ if (!master_kms) {
+ pr_err("master KMS is null, defer the probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* on all devices that I am aware of, iommu's which cna map
+ * any address the cpu can see are used:
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
+ if (ret) {
+ pr_err("dma_set_mask_and_coherent return %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_platform_init(&msm_edrm_driver,
+ to_platform_device(&pdev->dev));
+ if (ret)
+ DRM_ERROR("drm_platform_init failed: %d\n", ret);
+
+ return ret;
+}
+
+static int msm_pdev_edrm_remove(struct platform_device *pdev)
+{
+ drm_put_dev(platform_get_drvdata(to_platform_device(&pdev->dev)));
+ return 0;
+}
+
+static const struct platform_device_id msm_edrm_id[] = {
+ { "edrm_mdp", 0 },
+ { }
+};
+
+static void msm_edrm_lastclose(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ if (kms && kms->funcs && kms->funcs->lastclose)
+ kms->funcs->lastclose(kms);
+}
+
+static void msm_pdev_edrm_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *ddev = platform_get_drvdata(pdev);
+ struct msm_drm_private *priv = NULL;
+
+ priv = ddev->dev_private;
+ msm_edrm_lastclose(ddev);
+
+ /* set this after lastclose to allow kickoff from lastclose */
+ priv->shutdown_in_progress = true;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,msm-kms-edrm" }, /* sde */
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static struct platform_driver msm_platform_driver = {
+ .probe = msm_pdev_edrm_probe,
+ .remove = msm_pdev_edrm_remove,
+ .shutdown = msm_pdev_edrm_shutdown,
+ .driver = {
+ .name = "msm_early_drm",
+ .of_match_table = dt_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .id_table = msm_edrm_id,
+};
+
+static int __init msm_edrm_register(void)
+{
+ DBG("init");
+ return platform_driver_register(&msm_platform_driver);
+}
+
+static void __exit msm_edrm_unregister(void)
+{
+ DBG("fini");
+ platform_driver_unregister(&msm_platform_driver);
+}
+
+module_init(msm_edrm_register);
+module_exit(msm_edrm_unregister);
+
+MODULE_DESCRIPTION("MSM EARLY DRM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/msm/ekms/edrm_encoder.c b/drivers/gpu/drm/msm/ekms/edrm_encoder.c
new file mode 100644
index 000000000000..0cee78c73f50
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_encoder.c
@@ -0,0 +1,112 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "edrm_encoder.h"
+#include "edrm_crtc.h"
+#include "sde_kms.h"
+
+static void edrm_encoder_enable(struct drm_encoder *drm_enc)
+{
+ pr_err("eDRM Encoder enable\n");
+}
+
+static void edrm_encoder_disable(struct drm_encoder *drm_enc)
+{
+ pr_err("eDRM Encoder disable\n");
+}
+
+void edrm_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct edrm_encoder *edrm_enc = to_edrm_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(edrm_enc);
+}
+
+static const struct drm_encoder_helper_funcs edrm_encoder_helper_funcs = {
+ .disable = edrm_encoder_disable,
+ .enable = edrm_encoder_enable,
+};
+
+static const struct drm_encoder_funcs edrm_encoder_funcs = {
+ .destroy = edrm_encoder_destroy,
+};
+
+int edrm_encoder_wait_for_commit_done(struct drm_encoder *drm_enc)
+{
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ struct msm_edrm_kms *edrm_kms;
+ struct msm_edrm_display *display;
+ struct edrm_crtc *edrm_crtc;
+ struct sde_kms *master_kms;
+ struct msm_drm_private *master_priv;
+ struct sde_mdss_cfg *cfg;
+ u32 ctl_off;
+ u32 flush_register = 0;
+ int i;
+
+ dev = drm_enc->dev;
+ priv = dev->dev_private;
+ edrm_kms = to_edrm_kms(priv->kms);
+ master_priv = edrm_kms->master_dev->dev_private;
+ master_kms = to_sde_kms(master_priv->kms);
+ cfg = master_kms->catalog;
+ edrm_crtc = to_edrm_crtc(drm_enc->crtc);
+ display = &edrm_kms->display[edrm_crtc->display_id];
+ ctl_off = display->ctl_off;
+
+ /* poll edrm_crtc->sspp_flush_mask until cleared */
+ for (i = 0; i < 20; i++) {
+ flush_register = readl_relaxed(master_kms->mmio +
+ ctl_off + 0x18);
+ if ((flush_register & edrm_crtc->sspp_flush_mask) != 0)
+ usleep_range(1000, 2000);
+ else
+ break;
+ }
+
+ /* reset sspp_flush_mask */
+ edrm_crtc->sspp_flush_mask = 0;
+
+ return 0;
+}
+
+
+struct drm_encoder *edrm_encoder_init(struct drm_device *dev,
+ struct msm_edrm_display *display)
+{
+ struct edrm_encoder *edrm_encoder;
+ struct drm_encoder *encoder;
+ int ret;
+
+ edrm_encoder = kzalloc(sizeof(*edrm_encoder), GFP_KERNEL);
+ if (!edrm_encoder)
+ return ERR_PTR(-ENOMEM);
+
+ encoder = &edrm_encoder->base;
+
+ ret = drm_encoder_init(dev, encoder,
+ &edrm_encoder_funcs,
+ display->encoder_type);
+ if (ret)
+ goto fail;
+
+ drm_encoder_helper_add(encoder, &edrm_encoder_helper_funcs);
+
+ edrm_encoder->intf_idx = display->intf_id;
+
+ return encoder;
+fail:
+ kfree(edrm_encoder);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/ekms/edrm_encoder.h b/drivers/gpu/drm/msm/ekms/edrm_encoder.h
new file mode 100644
index 000000000000..eeb91d659535
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_encoder.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _EDRM_ENCODER_H_
+#define _EDRM_ENCODER_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "edrm_kms.h"
+
+struct edrm_encoder {
+ struct drm_encoder base;
+ u32 sspp_mask;
+ int intf_idx;
+};
+
+#define to_edrm_encoder(x) container_of(x, struct edrm_encoder, base)
+
+/**
+ * edrm_encoder_wait_for_commit_done - wait until the register flush is done
+ * @drm_enc: Pointer to drm_encoder object
+ */
+int edrm_encoder_wait_for_commit_done(struct drm_encoder *drm_enc);
+
+/**
+ * edrm_encoder_destroy - free up drm_encoder object
+ * @drm_enc: Pointer to drm encoder object
+ */
+void edrm_encoder_destroy(struct drm_encoder *encoder);
+
+/**
+ * edrm_encoder_init - create drm_encoder object
+ * @dev: drm_device that this encoder going to register.
+ * @display: display structure that associate with this encoder.
+ */
+struct drm_encoder *edrm_encoder_init(struct drm_device *dev,
+ struct msm_edrm_display *display);
+
+#endif /* _EDRM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/msm/ekms/edrm_kms.c b/drivers/gpu/drm/msm/ekms/edrm_kms.c
new file mode 100644
index 000000000000..4749b6a8d776
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_kms.c
@@ -0,0 +1,723 @@
+/*
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
+#include <soc/qcom/boot_stats.h>
+#include "msm_kms.h"
+#include "edrm_kms.h"
+#include "edrm_crtc.h"
+#include "edrm_encoder.h"
+#include "edrm_plane.h"
+#include "edrm_connector.h"
+#include "sde_kms.h"
+#include "sde_formats.h"
+#include "edrm_splash.h"
+#include "sde_hdmi.h"
+#include "dsi_display.h"
+#include "sde_crtc.h"
+
+#define MMSS_MDP_CTL_TOP_OFFSET 0x14
+
+static bool first_commit = true;
+
+static void edrm_kms_prepare_commit(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ struct msm_edrm_kms *edrm_kms = to_edrm_kms(kms);
+ struct drm_device *dev = edrm_kms->master_dev;
+ struct msm_drm_private *master_priv = edrm_kms->master_dev->dev_private;
+ struct sde_kms *master_kms;
+ int i, nplanes;
+ struct drm_plane *plane;
+ bool valid_commit = false;
+
+ master_kms = to_sde_kms(master_priv->kms);
+ nplanes = dev->mode_config.num_total_plane;
+ for (i = 0; i < nplanes; i++) {
+ plane = state->planes[i];
+ if (plane && plane->fb) {
+ valid_commit = true;
+ break;
+ }
+ }
+
+ if (valid_commit && first_commit) {
+ first_commit = false;
+ place_marker("eDRM display first valid commit");
+ }
+
+ sde_power_resource_enable(&master_priv->phandle,
+ master_kms->core_client, true);
+
+ /* Notify bootloader splash to stop */
+ if (valid_commit && edrm_kms->lk_running_flag) {
+
+ /* if LK is still running, notify LK to stop */
+ if (edrm_splash_get_lk_status(kms) !=
+ SPLASH_STATUS_NOT_START) {
+ edrm_splash_notify_lk_stop_splash(kms);
+ edrm_splash_poll_lk_stop_splash(kms);
+ }
+
+ /* next eDRM close will trigger display resources handoff */
+ edrm_kms->handoff_flag = true;
+ }
+}
+
+static void edrm_kms_commit(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ if (crtc->state->active)
+ edrm_crtc_commit_kickoff(crtc);
+ }
+}
+
+static void edrm_kms_complete_commit(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct msm_edrm_kms *edrm_kms = to_edrm_kms(kms);
+ struct msm_drm_private *master_priv = edrm_kms->master_dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ struct sde_kms *master_kms;
+ int i;
+
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
+ edrm_crtc_complete_commit(crtc, old_crtc_state);
+
+ master_kms = to_sde_kms(master_priv->kms);
+ sde_power_resource_enable(&master_priv->phandle,
+ master_kms->core_client, false);
+}
+
+static void edrm_kms_wait_for_commit_done(struct msm_kms *kms,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ int ret;
+
+ dev = crtc->dev;
+ if (!dev)
+ return;
+
+ if (!crtc->state->enable) {
+ pr_err("[crtc:%d] not enable\n", crtc->base.id);
+ return;
+ }
+
+ if (!crtc->state->active) {
+ pr_err("[crtc:%d] not active\n", crtc->base.id);
+ return;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+ ret = edrm_encoder_wait_for_commit_done(encoder);
+ if (ret && ret != -EWOULDBLOCK) {
+ pr_err("wait for commit done returned %d\n", ret);
+ break;
+ }
+ }
+}
+
+static void edrm_kms_prepare_fence(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
+ pr_err("invalid argument(s)\n");
+ return;
+ }
+
+ /* old_state contains updated crtc pointers */
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
+ edrm_crtc_prepare_commit(crtc, old_crtc_state);
+}
+
+static void _edrm_kms_drm_obj_destroy(struct msm_edrm_kms *edrm_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!edrm_kms) {
+ pr_err("invalid sde_kms\n");
+ return;
+ } else if (!edrm_kms->dev) {
+ pr_err("invalid dev\n");
+ return;
+ } else if (!edrm_kms->dev->dev_private) {
+ pr_err("invalid dev_private\n");
+ return;
+ }
+ priv = edrm_kms->dev->dev_private;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
+ priv->num_crtcs = 0;
+
+ for (i = 0; i < priv->num_planes; i++)
+ priv->planes[i]->funcs->destroy(priv->planes[i]);
+ priv->num_planes = 0;
+
+ for (i = 0; i < priv->num_connectors; i++)
+ priv->connectors[i]->funcs->destroy(priv->connectors[i]);
+ priv->num_connectors = 0;
+
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->funcs->destroy(priv->encoders[i]);
+ priv->num_encoders = 0;
+}
+
+static void convert_dsi_to_drm_mode(const struct dsi_display_mode *dsi_mode,
+ struct drm_display_mode *drm_mode)
+{
+ memset(drm_mode, 0, sizeof(*drm_mode));
+
+ drm_mode->hdisplay = dsi_mode->timing.h_active;
+ drm_mode->hsync_start = drm_mode->hdisplay +
+ dsi_mode->timing.h_front_porch;
+ drm_mode->hsync_end = drm_mode->hsync_start +
+ dsi_mode->timing.h_sync_width;
+ drm_mode->htotal = drm_mode->hsync_end + dsi_mode->timing.h_back_porch;
+ drm_mode->hskew = dsi_mode->timing.h_skew;
+
+ drm_mode->vdisplay = dsi_mode->timing.v_active;
+ drm_mode->vsync_start = drm_mode->vdisplay +
+ dsi_mode->timing.v_front_porch;
+ drm_mode->vsync_end = drm_mode->vsync_start +
+ dsi_mode->timing.v_sync_width;
+ drm_mode->vtotal = drm_mode->vsync_end + dsi_mode->timing.v_back_porch;
+
+ drm_mode->vrefresh = dsi_mode->timing.refresh_rate;
+ drm_mode->clock = dsi_mode->pixel_clk_khz;
+
+ if (dsi_mode->flags & DSI_MODE_FLAG_SEAMLESS)
+ drm_mode->flags |= DRM_MODE_FLAG_SEAMLESS;
+ if (dsi_mode->flags & DSI_MODE_FLAG_DFPS)
+ drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS;
+ if (dsi_mode->flags & DSI_MODE_FLAG_VBLANK_PRE_MODESET)
+ drm_mode->private_flags |= MSM_MODE_FLAG_VBLANK_PRE_MODESET;
+ drm_mode->flags |= (dsi_mode->timing.h_sync_polarity) ?
+ DRM_MODE_FLAG_NHSYNC : DRM_MODE_FLAG_PHSYNC;
+ drm_mode->flags |= (dsi_mode->timing.v_sync_polarity) ?
+ DRM_MODE_FLAG_NVSYNC : DRM_MODE_FLAG_PVSYNC;
+
+ drm_mode_set_name(drm_mode);
+}
+
+static int setup_edrm_displays(struct sde_kms *master_kms,
+ struct msm_edrm_display *display,
+ const char *label, const char *type)
+{
+ int i, ret;
+ struct dsi_display *dsi_disp;
+ struct sde_hdmi *hdmi_display;
+ struct sde_mdss_cfg *cfg;
+ u32 reg_value;
+
+ cfg = master_kms->catalog;
+ ret = -EINVAL;
+ /* check main DRM for the matching display */
+ if (!strcmp(type, "dsi")) {
+ int mode_cnt;
+ struct dsi_display_mode *dsi_mode;
+ /* check main DRM's DSI display list */
+ for (i = 0; i < master_kms->dsi_display_count; i++) {
+ dsi_disp = (struct dsi_display *)
+ master_kms->dsi_displays[i];
+ if (!strcmp(dsi_disp->name, label)) {
+ dsi_display_get_modes(dsi_disp, NULL,
+ &mode_cnt);
+ dsi_mode = kcalloc(mode_cnt, sizeof(*dsi_mode),
+ GFP_KERNEL);
+ if (!dsi_mode)
+ return -ENOMEM;
+ dsi_display_get_modes(dsi_disp, dsi_mode,
+ &mode_cnt);
+
+ /* convert to DRM mode */
+ convert_dsi_to_drm_mode(&dsi_mode[0],
+ &display->mode);
+ display->encoder_type = DRM_MODE_ENCODER_DSI;
+ display->connector_type =
+ DRM_MODE_CONNECTOR_DSI;
+ ret = 0;
+ break;
+ }
+ }
+ if (ret) {
+ pr_err("Cannot find %s in main DRM\n", label);
+ return ret;
+ }
+ ret = -EINVAL;
+ for (i = 0; i < cfg->ctl_count; i++) {
+ reg_value = readl_relaxed(master_kms->mmio +
+ cfg->ctl[i].base + MMSS_MDP_CTL_TOP_OFFSET);
+ reg_value &= 0x000000F0;
+
+ /* Check the interface from TOP register */
+ if ((((reg_value >> 4) == 0x2) &&
+ (dsi_disp->ctrl[0].ctrl->index == 0)) ||
+ (((reg_value >> 4) == 0x3) &&
+ (dsi_disp->ctrl[0].ctrl->index == 1))) {
+ display->ctl_id = i + 1;
+ display->ctl_off = cfg->ctl[i].base;
+ display->lm_off = cfg->mixer[i].base;
+ ret = 0;
+ break;
+ }
+ }
+ if (ret) {
+ pr_err("LK does not enable %s\n", label);
+ kfree(dsi_mode);
+ return -EINVAL;
+ }
+ } else if (!strcmp(type, "hdmi")) {
+ /* for HDMI interface, check main DRM's HDMI display list */
+ for (i = 0; i < master_kms->hdmi_display_count; i++) {
+ hdmi_display = (struct sde_hdmi *)
+ master_kms->hdmi_displays[i];
+
+ if (!strcmp(hdmi_display->name, label)) {
+ drm_mode_copy(&display->mode,
+ (struct drm_display_mode *)
+ hdmi_display->mode_list.next);
+ display->encoder_type = DRM_MODE_ENCODER_TMDS;
+ display->connector_type =
+ DRM_MODE_CONNECTOR_HDMIA;
+ ret = 0;
+ break;
+ }
+ }
+ if (ret) {
+ pr_err("Cannot find %s in main DRM\n", label);
+ return ret;
+ }
+ ret = -EINVAL;
+ for (i = 0; i < cfg->ctl_count; i++) {
+ reg_value = readl_relaxed(master_kms->mmio +
+ cfg->ctl[i].base + MMSS_MDP_CTL_TOP_OFFSET);
+ reg_value &= 0x000000F0;
+
+ /* Check the interface from TOP register */
+ if ((reg_value >> 4) == 0x4) {
+ display->ctl_id = i + 1;
+ display->ctl_off = cfg->ctl[i].base;
+ display->lm_off = cfg->mixer[i].base;
+ ret = 0;
+ break;
+ }
+ }
+ if (ret) {
+ pr_err("No LK does not enable %s\n", label);
+ return -EINVAL;
+ }
+ }
+ return ret;
+}
+
+static int _sspp_search(const char *p_name, struct sde_mdss_cfg *cfg,
+ u32 *sspp_offset, u32 *sspp_cfg_id, u32 *sspp_type)
+{
+ int i, ret;
+
+ ret = -1;
+ for (i = 0; i < cfg->sspp_count; i++)
+ if (!strcmp(cfg->sspp[i].name, p_name)) {
+ *sspp_offset = cfg->sspp[i].base;
+ *sspp_cfg_id = cfg->sspp[i].id;
+ *sspp_type = cfg->sspp[i].type;
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+static int _edrm_kms_parse_dt(struct msm_edrm_kms *edrm_kms)
+{
+ struct sde_kms *master_kms;
+ struct msm_drm_private *master_priv;
+ struct msm_drm_private *priv;
+ struct sde_mdss_cfg *cfg;
+ struct device_node *parent, *node;
+ int i, ret, disp_cnt, plane_cnt;
+ const char *clabel;
+ const char *ctype;
+ struct device_node *plane_node;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct edrm_plane *edrm_plane;
+ const char *p_name;
+ u32 lm_stage, sspp_offset, sspp_cfg_id, sspp_type;
+
+ master_priv = edrm_kms->master_dev->dev_private;
+ master_kms = to_sde_kms(master_priv->kms);
+ priv = edrm_kms->dev->dev_private;
+ cfg = master_kms->catalog;
+ ret = 0;
+ parent = of_get_child_by_name(edrm_kms->dev->dev->of_node,
+ "qcom,edrm-assigned-display");
+ if (!parent) {
+ pr_err("cannot find qcom,edrm-assigned-display\n");
+ return 0;
+ }
+
+ /* parse the dtsi and retrieve information from main DRM */
+ disp_cnt = 0;
+ for_each_child_of_node(parent, node) {
+ of_property_read_string(node, "qcom,intf-type", &ctype);
+ of_property_read_string(node, "qcom,label", &clabel);
+
+ plane_cnt = 0;
+ do {
+ plane_node = of_parse_phandle(node,
+ "qcom,assigned_plane", plane_cnt);
+ /* Initialize plane */
+ if (!plane_node)
+ break;
+
+ of_property_read_string(plane_node, "qcom,plane-name",
+ &p_name);
+ of_property_read_u32(plane_node, "lm-stage",
+ &lm_stage);
+ if (_sspp_search(p_name, cfg, &sspp_offset,
+ &sspp_cfg_id, &sspp_type)) {
+ pr_err("Cannot find %s in main DRM\n",
+ p_name);
+ continue;
+ }
+
+ plane = edrm_plane_init(edrm_kms->dev,
+ edrm_kms->plane_id[disp_cnt],
+ sspp_type);
+ if (IS_ERR(plane)) {
+ pr_err("edrm_plane_init failed\n");
+ ret = PTR_ERR(plane);
+ of_node_put(plane_node);
+ goto fail;
+ }
+ priv->planes[priv->num_planes] = plane;
+ edrm_plane = to_edrm_plane(plane);
+ edrm_plane->display_id = disp_cnt;
+ edrm_plane->lm_stage = lm_stage;
+ edrm_plane->sspp_offset = sspp_offset;
+ edrm_plane->sspp_cfg_id = sspp_cfg_id;
+ edrm_plane->sspp_type = sspp_type;
+ plane->possible_crtcs = (1 << disp_cnt);
+ priv->num_planes++;
+ plane_cnt++;
+ of_node_put(plane_node);
+ } while (plane_node);
+
+ edrm_kms->display[disp_cnt].plane_cnt = plane_cnt;
+ ret = setup_edrm_displays(master_kms,
+ &edrm_kms->display[disp_cnt], clabel, ctype);
+ if (ret)
+ goto fail;
+
+ /* Initialize crtc */
+ crtc = edrm_crtc_init(edrm_kms->dev,
+ &edrm_kms->display[disp_cnt], priv->planes[disp_cnt]);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+
+ /* Initialize encoder */
+ encoder = edrm_encoder_init(edrm_kms->dev,
+ &edrm_kms->display[disp_cnt]);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ goto fail;
+ }
+ encoder->possible_crtcs = (1 << disp_cnt);
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ /* Initialize connector */
+ connector = edrm_connector_init(edrm_kms->dev,
+ priv->encoders[disp_cnt],
+ &edrm_kms->display[disp_cnt]);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(connector);
+ goto fail;
+ }
+ priv->connectors[priv->num_connectors++] = connector;
+
+ disp_cnt++;
+ }
+ of_node_put(parent);
+
+ edrm_kms->display_count = disp_cnt;
+ edrm_kms->plane_count = priv->num_planes;
+ return ret;
+fail:
+ for (i = 0; i < priv->num_planes; i++)
+ edrm_plane_destroy(priv->planes[i]);
+ priv->num_planes = 0;
+
+ for (i = 0; i < disp_cnt; i++) {
+ if (priv->crtcs[i]) {
+ edrm_crtc_destroy(priv->crtcs[i]);
+ priv->num_crtcs--;
+ }
+ if (priv->encoders[i]) {
+ edrm_encoder_destroy(priv->encoders[i]);
+ priv->num_encoders--;
+ }
+ if (priv->connectors[i]) {
+ edrm_connector_destroy(priv->connectors[i]);
+ priv->num_connectors--;
+ }
+ }
+ disp_cnt = 0;
+ edrm_kms->display_count = 0;
+ edrm_kms->plane_count = 0;
+ of_node_put(parent);
+ return ret;
+}
+
+static int _edrm_kms_drm_obj_init(struct msm_edrm_kms *edrm_kms)
+{
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ int ret;
+
+ if (!edrm_kms || !edrm_kms->dev || !edrm_kms->dev->dev) {
+ pr_err("invalid edrm_kms\n");
+ return -EINVAL;
+ }
+
+ dev = edrm_kms->dev;
+ priv = dev->dev_private;
+
+ ret = _edrm_kms_parse_dt(edrm_kms);
+ if (ret)
+ goto fail;
+
+ return 0;
+fail:
+ _edrm_kms_drm_obj_destroy(edrm_kms);
+ return ret;
+}
+
+static int edrm_kms_postinit(struct msm_kms *kms)
+{
+ struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct msm_edrm_kms *edrm_kms;
+
+ edrm_kms = to_edrm_kms(kms);
+ dev = edrm_kms->dev;
+
+ drm_for_each_crtc(crtc, dev)
+ edrm_crtc_postinit(crtc);
+
+ place_marker("eDRM driver init completed");
+ return 0;
+}
+
+static void edrm_kms_destroy(struct msm_kms *kms)
+{
+ struct msm_edrm_kms *edrm_kms;
+ struct drm_device *dev;
+
+ if (!kms) {
+ pr_err("edrm_kms_destroy invalid kms\n");
+ return;
+ }
+
+ edrm_kms = to_edrm_kms(kms);
+ dev = edrm_kms->dev;
+ if (!dev) {
+ pr_err("invalid device\n");
+ return;
+ }
+
+ kfree(edrm_kms);
+}
+
+static void edrm_kms_lastclose(struct msm_kms *kms)
+{
+ /* handoff early drm resource */
+ struct msm_edrm_kms *edrm_kms = to_edrm_kms(kms);
+
+ /* notify main DRM that eDRM is relased. main DRM can
+ * reclaim all eDRM resource. Main DRM will clear eDRM
+ * plane stage in next commit
+ */
+ if (edrm_kms->handoff_flag) {
+ pr_info("handoff eDRM resource to main DRM\n");
+ edrm_display_release(kms);
+ }
+}
+
+static int edrm_kms_hw_init(struct msm_kms *kms)
+{
+ struct msm_edrm_kms *edrm_kms;
+ struct sde_kms *sde_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ struct msm_drm_private *master_priv;
+ int rc = -EINVAL;
+ u32 lk_status;
+
+ if (!kms) {
+ pr_err("edrm_kms_hw_init invalid kms\n");
+ goto error;
+ }
+
+ edrm_kms = to_edrm_kms(kms);
+ dev = edrm_kms->dev;
+ if (!dev || !dev->platformdev) {
+ pr_err("invalid device\n");
+ goto error;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ pr_err("invalid private data\n");
+ goto error;
+ }
+
+ master_priv = edrm_kms->master_dev->dev_private;
+ sde_kms = to_sde_kms(master_priv->kms);
+ rc = sde_power_resource_enable(&master_priv->phandle,
+ sde_kms->core_client, true);
+ if (rc) {
+ pr_err("resource enable failed: %d\n", rc);
+ goto error;
+ }
+
+ /* check bootloader status register */
+ lk_status = edrm_splash_get_lk_status(kms);
+ if (lk_status == SPLASH_STATUS_RUNNING)
+ edrm_kms->lk_running_flag = true;
+ else
+ edrm_kms->lk_running_flag = false;
+
+ /* if early domain is not start, eDRM cannot initialize
+ * display interface and bridge chip. Need to return err
+ * ToDo: implement interface and bridge chip startup functions
+ */
+ if (lk_status == SPLASH_STATUS_NOT_START) {
+ rc = -EINVAL;
+ pr_err("LK does not start, eDRM cannot initialize\n");
+ goto power_error;
+ }
+
+ /* only unsecure buffer is support for now */
+ edrm_kms->aspace = sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * max crtc width is equal to the max mixer width * 2 and max height is
+ * is 4K
+ */
+ dev->mode_config.max_width = sde_kms->catalog->max_sspp_linewidth * 2;
+ dev->mode_config.max_height = 4096;
+
+ /*
+ * Support format modifiers for compression etc.
+ */
+ dev->mode_config.allow_fb_modifiers = true;
+
+ rc = _edrm_kms_drm_obj_init(edrm_kms);
+ if (rc) {
+ pr_err("drm obj init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ /* notify main DRM that eDRM is started */
+ edrm_display_acquire(kms);
+
+ sde_power_resource_enable(&master_priv->phandle,
+ sde_kms->core_client, false);
+ return 0;
+power_error:
+ sde_power_resource_enable(&master_priv->phandle,
+ sde_kms->core_client, false);
+error:
+ return rc;
+}
+
+static long edrm_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ return rate;
+}
+
+static const struct msm_kms_funcs edrm_kms_funcs = {
+ .hw_init = edrm_kms_hw_init,
+ .postinit = edrm_kms_postinit,
+ .prepare_fence = edrm_kms_prepare_fence,
+ .prepare_commit = edrm_kms_prepare_commit,
+ .commit = edrm_kms_commit,
+ .complete_commit = edrm_kms_complete_commit,
+ .wait_for_crtc_commit_done = edrm_kms_wait_for_commit_done,
+ .check_modified_format = sde_format_check_modified_format,
+ .get_format = sde_get_msm_format,
+ .round_pixclk = edrm_kms_round_pixclk,
+ .destroy = edrm_kms_destroy,
+ .lastclose = edrm_kms_lastclose,
+};
+
+struct msm_kms *msm_edrm_kms_init(struct drm_device *dev)
+{
+ struct msm_edrm_kms *edrm_kms;
+ struct drm_minor *minor;
+
+ if (!dev || !dev->dev_private) {
+ pr_err("drm device node invalid\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ minor = drm_minor_acquire(0);
+ if (IS_ERR_OR_NULL(minor))
+ return ERR_PTR(-EINVAL);
+
+ edrm_kms = kzalloc(sizeof(*edrm_kms), GFP_KERNEL);
+ if (!edrm_kms) {
+ drm_minor_release(minor);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ msm_kms_init(&edrm_kms->base, &edrm_kms_funcs);
+ edrm_kms->dev = dev;
+ edrm_kms->master_dev = minor->dev;
+ drm_minor_release(minor);
+
+ return &edrm_kms->base;
+}
diff --git a/drivers/gpu/drm/msm/ekms/edrm_kms.h b/drivers/gpu/drm/msm/ekms/edrm_kms.h
new file mode 100644
index 000000000000..214c5b85e614
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_kms.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _EDRM_KMS_H_
+#define _EDRM_KMS_H_
+
+#include <drm/drmP.h>
+#include "msm_kms.h"
+
+#define MAX_CTRLS_PER_DISPLAY 2
+
+struct msm_edrm_display {
+ int display_id;
+ int ctl_id;
+ int intf_id;
+ int encoder_type;
+ int connector_type;
+ struct drm_display_mode mode;
+ int ctl_off;
+ int lm_off;
+ int plane_cnt;
+};
+
+struct msm_edrm_kms {
+ struct msm_kms base;
+ struct drm_device *dev;
+ struct drm_device *master_dev;
+ struct msm_gem_address_space *aspace;
+
+ struct msm_edrm_display display[MAX_ENCODERS];
+ int display_count;
+
+ int plane_id[MAX_PLANES];
+ int plane_count;
+
+ /* when this flag is set, the next lastclose() will trigger
+ * handoff eDRM resource to main kernel.
+ */
+ bool handoff_flag;
+ bool lk_running_flag;
+};
+
+struct msm_kms *msm_edrm_kms_init(struct drm_device *dev);
+
+#define to_edrm_kms(x) container_of(x, struct msm_edrm_kms, base)
+
+#endif /* _EDRM_KMS_H_ */
diff --git a/drivers/gpu/drm/msm/ekms/edrm_plane.c b/drivers/gpu/drm/msm/ekms/edrm_plane.c
new file mode 100644
index 000000000000..efe43543dc18
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_plane.c
@@ -0,0 +1,912 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "edrm_plane.h"
+#include "edrm_crtc.h"
+#include "sde_kms.h"
+#include "edrm_kms.h"
+
+/* SDE_SSPP_SRC */
+#define SSPP_SRC_SIZE 0x00
+#define SSPP_SRC_XY 0x08
+#define SSPP_OUT_SIZE 0x0c
+#define SSPP_OUT_XY 0x10
+#define SSPP_SRC0_ADDR 0x14
+#define SSPP_SRC1_ADDR 0x18
+#define SSPP_SRC2_ADDR 0x1C
+#define SSPP_SRC3_ADDR 0x20
+#define SSPP_SRC_YSTRIDE0 0x24
+#define SSPP_SRC_YSTRIDE1 0x28
+#define SSPP_SRC_FORMAT 0x30
+#define SSPP_SRC_UNPACK_PATTERN 0x34
+#define SSPP_SRC_OP_MODE 0x38
+#define SSPP_CONSTANT_COLOR 0x3c
+#define PIPE_SW_PIX_EXT_C0_LR 0x100
+#define PIPE_SW_PIX_EXT_C0_TB 0x104
+#define PIPE_SW_PIXEL_EXT_C0_REQ 0x108
+#define PIPE_SW_PIX_EXT_C1C2_LR 0x110
+#define PIPE_SW_PIX_EXT_C1C2_TB 0x114
+#define PIPE_SW_PIXEL_EXT_C1C2_REQ 0x118
+#define PIPE_SW_PIX_EXT_C3_LR 0x120
+#define PIPE_SW_PIX_EXT_C3_TB 0x124
+#define PIPE_SW_PIXEL_EXT_C3_REQ 0x128
+#define SSPP_CDP_CNTL 0x134
+#define FLUSH_OFFSET 0x18
+#define PIPE_OP_MODE 0x200
+#define PIPE_CSC_1_MATRIX_COEFF_0 0x320
+#define PIPE_CSC_1_MATRIX_COEFF_1 0x324
+#define PIPE_CSC_1_MATRIX_COEFF_2 0x328
+#define PIPE_CSC_1_MATRIX_COEFF_3 0x32C
+#define PIPE_CSC_1_MATRIX_COEFF_4 0x330
+#define PIPE_CSC_1_COMP_0_PRE_CLAMP 0x334
+#define PIPE_CSC_1_COMP_1_PRE_CLAMP 0x338
+#define PIPE_CSC_1_COMP_2_PRE_CLAMP 0x33C
+#define PIPE_CSC_1_COMP_0_POST_CAMP 0x340
+#define PIPE_CSC_1_COMP_1_POST_CLAMP 0x344
+#define PIPE_CSC_1_COMP_2_POST_CLAMP 0x348
+#define PIPE_CSC_1_COMP_0_PRE_BIAS 0x34C
+#define PIPE_CSC_1_COMP_1_PRE_BIAS 0x350
+#define PIPE_CSC_1_COMP_2_PRE_BIAS 0x354
+#define PIPE_CSC_1_COMP_0_POST_BIAS 0x358
+#define PIPE_CSC_1_COMP_1_POST_BIAS 0x35C
+#define PIPE_CSC_1_COMP_2_POST_BIAS 0x360
+#define PIPE_VP_0_QSEED2_CONFIG 0x204
+#define PIPE_COMP0_3_PHASE_STEP_X 0x210
+#define PIPE_COMP0_3_PHASE_STEP_Y 0x214
+#define PIPE_COMP1_2_PHASE_STEP_X 0x218
+#define PIPE_COMP1_2_PHASE_STEP_Y 0x21C
+#define PIPE_VP_0_QSEED2_SHARP_SMOOTH_STRENGTH 0x230
+#define PIPE_VP_0_QSEED2_SHARP_THRESHOLD_EDGE 0x234
+#define PIPE_VP_0_QSEED2_SHARP_THRESHOLD_SMOOTH 0x238
+#define PIPE_VP_0_QSEED2_SHARP_THRESHOLD_NOISE 0x23C
+
+#define SSPP_SOLID_FILL_FORMAT 0x004237FF
+#define SSPP_ARGB8888_FORMAT 0x000237FF
+#define SSPP_XRGB8888_FORMAT 0x000236FF
+#define SSPP_ARGB1555_FORMAT 0x00023315
+#define SSPP_XRGB1555_FORMAT 0x00023215
+#define SSPP_ARGB4444_FORMAT 0x00023340
+#define SSPP_XRGB4444_FORMAT 0x00023240
+#define SSPP_NV12_FORMAT 0x0192923F
+#define SSPP_NV16_FORMAT 0x0092923F
+#define SSPP_YUYV_FORMAT 0x0082B23F
+#define SSPP_YUV420_FORMAT 0x018A803F
+#define SSPP_RGB888_FORMAT 0x0002243F
+#define SSPP_RGB565_FORMAT 0x00022216
+#define SSPP_ARGB_PATTERN 0x03020001
+#define SSPP_ABGR_PATTERN 0x03010002
+#define SSPP_RGBA_PATTERN 0x02000103
+#define SSPP_BGRA_PATTERN 0x01000203
+
+#define LAYER_BLEND5_OP 0x260
+#define LAYER_OP_ENABLE_ALPHA_BLEND 0x600
+#define LAYER_OP_DISABLE_ALPHA_BLEND 0x200
+
+static u32 edrm_plane_formats_RGB[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_BGRX4444
+};
+
+static u32 edrm_plane_formats_YUV[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_BGRX4444,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420
+};
+
+static void edrm_plane_enable_csc(struct sde_kms *master_kms,
+ u32 plane_offset)
+{
+ writel_relaxed(0x00060000, master_kms->mmio + plane_offset +
+ PIPE_OP_MODE);
+
+ writel_relaxed(0x9, master_kms->mmio + plane_offset + SSPP_CDP_CNTL);
+ writel_relaxed(0x00000254, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_MATRIX_COEFF_0);
+ writel_relaxed(0x02540396, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_MATRIX_COEFF_1);
+ writel_relaxed(0x1eef1f93, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_MATRIX_COEFF_2);
+ writel_relaxed(0x043e0254, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_MATRIX_COEFF_3);
+ writel_relaxed(0x00000000, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_MATRIX_COEFF_4);
+
+ writel_relaxed(0x000010eb, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_0_PRE_CLAMP);
+ writel_relaxed(0x000010f0, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_1_PRE_CLAMP);
+ writel_relaxed(0x000010f0, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_2_PRE_CLAMP);
+ writel_relaxed(0x000000ff, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_0_POST_CAMP);
+ writel_relaxed(0x000000ff, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_1_POST_CLAMP);
+ writel_relaxed(0x000000ff, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_2_POST_CLAMP);
+ writel_relaxed(0x0000fff0, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_0_PRE_BIAS);
+ writel_relaxed(0x0000ff80, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_1_PRE_BIAS);
+ writel_relaxed(0x0000ff80, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_2_PRE_BIAS);
+ writel_relaxed(0x00000000, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_0_POST_BIAS);
+ writel_relaxed(0x00000000, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_1_POST_BIAS);
+ writel_relaxed(0x00000000, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_2_POST_BIAS);
+
+ writel_relaxed(0x200000, master_kms->mmio + plane_offset +
+ PIPE_COMP0_3_PHASE_STEP_X);
+ writel_relaxed(0x200000, master_kms->mmio + plane_offset +
+ PIPE_COMP0_3_PHASE_STEP_Y);
+ writel_relaxed(0x100000, master_kms->mmio + plane_offset +
+ PIPE_COMP1_2_PHASE_STEP_X);
+ writel_relaxed(0x100000, master_kms->mmio + plane_offset +
+ PIPE_COMP1_2_PHASE_STEP_Y);
+}
+
+static void edrm_plane_set_yuv_plane(struct drm_plane *plane,
+ struct sde_kms *master_kms, u32 lm_off)
+{
+ u32 img_size, ystride0, ystride1;
+ u32 plane0_addr, plane1_addr, plane2_addr, plane3_addr;
+ struct edrm_plane *edrm_plane;
+
+ edrm_plane = to_edrm_plane(plane);
+ edrm_plane_enable_csc(master_kms, edrm_plane->sspp_offset);
+ if ((plane->state->fb->pixel_format == DRM_FORMAT_NV12) ||
+ (plane->state->fb->pixel_format == DRM_FORMAT_NV21) ||
+ (plane->state->fb->pixel_format == DRM_FORMAT_NV16) ||
+ (plane->state->fb->pixel_format == DRM_FORMAT_NV61)) {
+ ystride0 = (plane->state->fb->width << 16) |
+ plane->state->fb->width;
+ ystride1 = 0;
+ plane0_addr = msm_framebuffer_iova(plane->state->fb,
+ edrm_plane->aspace, 0);
+ plane1_addr = msm_framebuffer_iova(plane->state->fb,
+ edrm_plane->aspace, 1);
+ plane2_addr = 0;
+ plane3_addr = 0;
+ } else if ((plane->state->fb->pixel_format == DRM_FORMAT_YUYV) ||
+ (plane->state->fb->pixel_format == DRM_FORMAT_YVYU) ||
+ (plane->state->fb->pixel_format == DRM_FORMAT_VYUY) ||
+ (plane->state->fb->pixel_format == DRM_FORMAT_UYVY)) {
+ /* YUYV formats are single plane */
+ ystride0 = plane->state->fb->width * 2;
+ ystride1 = 0;
+ plane0_addr = msm_framebuffer_iova(plane->state->fb,
+ edrm_plane->aspace, 0);
+ plane1_addr = 0;
+ plane2_addr = 0;
+ plane3_addr = 0;
+ } else if ((plane->state->fb->pixel_format == DRM_FORMAT_YUV420) ||
+ (plane->state->fb->pixel_format == DRM_FORMAT_YVU420)) {
+ ystride0 = ((plane->state->fb->width/2) << 16) |
+ plane->state->fb->width;
+ ystride1 = plane->state->fb->width/2;
+ plane0_addr = msm_framebuffer_iova(plane->state->fb,
+ edrm_plane->aspace, 0);
+ plane1_addr = msm_framebuffer_iova(plane->state->fb,
+ edrm_plane->aspace, 1);
+ plane2_addr = msm_framebuffer_iova(plane->state->fb,
+ edrm_plane->aspace, 2);
+ plane3_addr = 0;
+ } else {
+ pr_err("Format %x not supported in eDRM\n",
+ plane->state->fb->pixel_format);
+ return;
+ }
+
+ writel_relaxed(ystride0, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_YSTRIDE0);
+ writel_relaxed(ystride1, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_YSTRIDE1);
+ writel_relaxed(plane0_addr, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC0_ADDR);
+ writel_relaxed(plane1_addr, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC1_ADDR);
+ writel_relaxed(plane2_addr, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC2_ADDR);
+ writel_relaxed(plane3_addr, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC3_ADDR);
+ writel_relaxed(0x00055f03, master_kms->mmio + edrm_plane->sspp_offset
+ + PIPE_VP_0_QSEED2_CONFIG);
+ writel_relaxed(0x00000020, master_kms->mmio + edrm_plane->sspp_offset
+ + PIPE_VP_0_QSEED2_SHARP_SMOOTH_STRENGTH);
+ writel_relaxed(0x00000070, master_kms->mmio + edrm_plane->sspp_offset
+ + PIPE_VP_0_QSEED2_SHARP_THRESHOLD_EDGE);
+ writel_relaxed(0x00000008, master_kms->mmio + edrm_plane->sspp_offset
+ + PIPE_VP_0_QSEED2_SHARP_THRESHOLD_SMOOTH);
+ writel_relaxed(0x00000002, master_kms->mmio + edrm_plane->sspp_offset
+ + PIPE_VP_0_QSEED2_SHARP_THRESHOLD_NOISE);
+
+ writel_relaxed(0x00020001, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C0_LR);
+ writel_relaxed(0x00020001, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C0_TB);
+ img_size = ((plane->state->fb->height + 3) << 16) |
+ (plane->state->fb->width + 3);
+ writel_relaxed(img_size, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIXEL_EXT_C0_REQ);
+
+ writel_relaxed(0x00010000, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C1C2_LR);
+ writel_relaxed(0x00010000, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C1C2_TB);
+ img_size = ((plane->state->fb->height/2 + 1) << 16) |
+ (plane->state->fb->width/2 + 1);
+ writel_relaxed(img_size, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIXEL_EXT_C1C2_REQ);
+
+ writel_relaxed(0x00010000, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C3_LR);
+ writel_relaxed(0x00010000, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C3_TB);
+ img_size = ((plane->state->fb->height + 1) << 16) |
+ (plane->state->fb->width + 1);
+ writel_relaxed(img_size, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIXEL_EXT_C3_REQ);
+
+
+ /* do a solid fill of transparent color */
+ writel_relaxed(0xFF000000, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_CONSTANT_COLOR);
+
+ /* setup blending for mixer stage 5 */
+ writel_relaxed(LAYER_OP_DISABLE_ALPHA_BLEND, master_kms->mmio + lm_off
+ + LAYER_BLEND5_OP);
+}
+
+static void edrm_plane_set_rgb_plane(struct drm_plane *plane,
+ struct sde_kms *master_kms, u32 lm_off)
+{
+ u32 img_size, ystride0, ystride1, plane_addr;
+ struct edrm_plane *edrm_plane;
+
+ edrm_plane = to_edrm_plane(plane);
+
+ ystride0 = (plane->state->fb->width *
+ plane->state->fb->bits_per_pixel/8);
+ ystride1 = 0;
+ plane_addr = msm_framebuffer_iova(plane->state->fb,
+ edrm_plane->aspace, 0);
+ img_size = (plane->state->fb->height << 16) | plane->state->fb->width;
+ writel_relaxed(plane_addr, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC0_ADDR);
+ writel_relaxed(ystride0, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_YSTRIDE0);
+ writel_relaxed(ystride1, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_YSTRIDE1);
+ writel_relaxed(0x0, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C0_LR);
+ writel_relaxed(0x0, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C0_TB);
+ writel_relaxed(img_size, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIXEL_EXT_C0_REQ);
+ writel_relaxed(0x0, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C1C2_LR);
+ writel_relaxed(0x0, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C1C2_TB);
+ writel_relaxed(img_size, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIXEL_EXT_C1C2_REQ);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C3_LR);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C3_TB);
+ writel_relaxed(img_size, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIXEL_EXT_C3_REQ);
+ /* do a solid fill of transparent color */
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_CONSTANT_COLOR);
+
+ /* setup blending for mixer stage 5 */
+ writel_relaxed(LAYER_OP_ENABLE_ALPHA_BLEND, master_kms->mmio + lm_off
+ + LAYER_BLEND5_OP);
+
+ /* disable CSC */
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_OP_MODE);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset
+ + PIPE_VP_0_QSEED2_CONFIG);
+}
+
+static int edrm_plane_modeset(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_edrm_kms *edrm_kms;
+ struct msm_drm_private *master_priv;
+ struct sde_kms *master_kms;
+ struct edrm_plane *edrm_plane;
+ bool yuv_format;
+ u32 img_size, src_xy, dst_xy, lm_off;
+ struct msm_edrm_display *display;
+
+ edrm_kms = to_edrm_kms(kms);
+ master_priv = edrm_kms->master_dev->dev_private;
+ master_kms = to_sde_kms(master_priv->kms);
+ edrm_plane = to_edrm_plane(plane);
+ display = &edrm_kms->display[edrm_plane->display_id];
+ lm_off = display->lm_off;
+
+ switch (plane->state->fb->pixel_format) {
+ case DRM_FORMAT_ARGB8888:
+ writel_relaxed(SSPP_ARGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ writel_relaxed(SSPP_ARGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ABGR_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGBA8888:
+ writel_relaxed(SSPP_ARGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_RGBA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ writel_relaxed(SSPP_XRGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_BGRA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGRA8888:
+ writel_relaxed(SSPP_ARGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_BGRA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ writel_relaxed(SSPP_XRGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ writel_relaxed(SSPP_XRGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ABGR_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGBX8888:
+ writel_relaxed(SSPP_XRGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_RGBA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGB888:
+ writel_relaxed(SSPP_RGB888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00020001, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGR888:
+ writel_relaxed(SSPP_RGB888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00010002, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGB565:
+ writel_relaxed(SSPP_RGB565_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00020001, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGR565:
+ writel_relaxed(SSPP_RGB565_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00010002, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_ARGB1555:
+ writel_relaxed(SSPP_ARGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_ABGR1555:
+ writel_relaxed(SSPP_ARGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ABGR_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGBA5551:
+ writel_relaxed(SSPP_ARGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_RGBA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGRA5551:
+ writel_relaxed(SSPP_ARGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_BGRA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ writel_relaxed(SSPP_XRGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_XBGR1555:
+ writel_relaxed(SSPP_XRGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ABGR_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGBX5551:
+ writel_relaxed(SSPP_XRGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_RGBA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGRX5551:
+ writel_relaxed(SSPP_XRGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_BGRA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_ARGB4444:
+ writel_relaxed(SSPP_ARGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_ABGR4444:
+ writel_relaxed(SSPP_ARGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGBA4444:
+ writel_relaxed(SSPP_ARGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_RGBA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGRA4444:
+ writel_relaxed(SSPP_ARGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_BGRA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_XRGB4444:
+ writel_relaxed(SSPP_ARGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_XBGR4444:
+ writel_relaxed(SSPP_XRGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ABGR_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGBX4444:
+ writel_relaxed(SSPP_XRGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_RGBA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGRX4444:
+ writel_relaxed(SSPP_XRGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_BGRA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_NV12:
+ writel_relaxed(SSPP_NV12_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00000201, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_NV21:
+ writel_relaxed(SSPP_NV12_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00000102, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_NV16:
+ writel_relaxed(SSPP_NV16_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00000201, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_NV61:
+ writel_relaxed(SSPP_NV16_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00000102, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_VYUY:
+ writel_relaxed(SSPP_YUYV_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00010002, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_UYVY:
+ writel_relaxed(SSPP_YUYV_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00020001, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_YUYV:
+ writel_relaxed(SSPP_YUYV_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x02000100, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_YVYU:
+ writel_relaxed(SSPP_YUYV_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x01000200, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_YUV420:
+ writel_relaxed(SSPP_YUV420_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00000102, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_YVU420:
+ writel_relaxed(SSPP_YUV420_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00000201, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ default:
+ pr_err("Format %x not supported in eDRM\n",
+ plane->state->fb->pixel_format);
+ return -EINVAL;
+ }
+
+ if (yuv_format)
+ edrm_plane_set_yuv_plane(plane, master_kms, lm_off);
+ else
+ edrm_plane_set_rgb_plane(plane, master_kms, lm_off);
+
+ img_size = (plane->state->fb->height << 16) | plane->state->fb->width;
+ src_xy = (plane->state->src_x << 16) | plane->state->src_y;
+ dst_xy = (plane->state->crtc_x << 16) | plane->state->crtc_y;
+
+ writel_relaxed(img_size, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_SRC_SIZE);
+ writel_relaxed(src_xy, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_SRC_XY);
+ writel_relaxed(img_size, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_OUT_SIZE);
+ writel_relaxed(dst_xy, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_OUT_XY);
+
+ return 0;
+}
+
+void edrm_plane_destroy(struct drm_plane *plane)
+{
+ struct edrm_plane *edrm_plane = to_edrm_plane(plane);
+
+ drm_plane_helper_disable(plane);
+ drm_plane_cleanup(plane);
+ kfree(edrm_plane);
+}
+
+int edrm_plane_flush(struct drm_plane *plane)
+{
+ struct edrm_plane *edrm_plane = to_edrm_plane(plane);
+ struct edrm_crtc *edrm_crtc = to_edrm_crtc(plane->state->crtc);
+ u32 sspp_flush_mask_bit[10] = {
+ 0, 1, 2, 18, 3, 4, 5, 19, 11, 12};
+
+ edrm_crtc->sspp_flush_mask |=
+ BIT(sspp_flush_mask_bit[edrm_plane->sspp_cfg_id - 1]);
+ return 0;
+}
+
+static int edrm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ /* TODO: check plane setting */
+ return 0;
+}
+
+static void edrm_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (!plane->state->crtc) {
+ pr_err("state crtc is null, skip pipe programming\n");
+ return;
+ }
+ if (!plane->state->fb) {
+ pr_err("state fb is null, skip pipe programming\n");
+ return;
+ }
+
+ if (edrm_plane_modeset(plane))
+ pr_err("Plane modeset failed\n");
+}
+
+/* Plane disable should setup the sspp to show a transparent frame
+ * If the pipe still attached with a buffer pointer, the buffer could
+ * be released and cause SMMU fault. We don't want to change CTL and
+ * LM during eDRM closing because main DRM could be updating CTL and
+ * LM at any moment. In eDRM lastclose(), it will notify main DRM to
+ * release eDRM display resouse. The next main DRM commit will clear
+ * the stage setup by eDRM
+ */
+static void edrm_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_edrm_kms *edrm_kms;
+ struct msm_drm_private *master_priv;
+ struct sde_kms *master_kms;
+ struct msm_edrm_display *display;
+ struct edrm_plane *edrm_plane;
+ u32 img_size, stride, lm_off;
+
+ edrm_kms = to_edrm_kms(kms);
+ master_priv = edrm_kms->master_dev->dev_private;
+ master_kms = to_sde_kms(master_priv->kms);
+ dev = edrm_kms->dev;
+ priv = dev->dev_private;
+
+ edrm_plane = to_edrm_plane(plane);
+ display = &edrm_kms->display[edrm_plane->display_id];
+ lm_off = display->lm_off;
+
+ /* setup SSPP */
+ img_size = (display->mode.vdisplay << 16) | display->mode.hdisplay;
+ stride = display->mode.hdisplay * 4;
+ writel_relaxed(img_size, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_SRC_SIZE);
+ writel_relaxed(0, master_kms->mmio + edrm_plane->sspp_offset
+ + SSPP_SRC_XY);
+ writel_relaxed(img_size, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_OUT_SIZE);
+ writel_relaxed(0, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_OUT_XY);
+ writel_relaxed(stride, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_SRC_YSTRIDE0);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C0_LR);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C0_TB);
+ writel_relaxed(img_size, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIXEL_EXT_C0_REQ);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C1C2_LR);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C1C2_TB);
+ writel_relaxed(img_size, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIXEL_EXT_C1C2_REQ);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C3_LR);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C3_TB);
+ writel_relaxed(img_size, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIXEL_EXT_C3_REQ);
+
+ /* RGB format */
+ writel_relaxed(SSPP_SOLID_FILL_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ /* do a solid fill of transparent color */
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_CONSTANT_COLOR);
+ writel_relaxed(LAYER_OP_ENABLE_ALPHA_BLEND, master_kms->mmio + lm_off
+ + LAYER_BLEND5_OP);
+
+ /* disable CSC */
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_OP_MODE);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset
+ + PIPE_VP_0_QSEED2_CONFIG);
+}
+
+static int edrm_plane_prepare_fb(struct drm_plane *plane,
+ const struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb;
+ struct edrm_plane *edrm_plane;
+
+ if (!plane || !new_state)
+ return -EINVAL;
+
+ if (!new_state->fb)
+ return 0;
+ edrm_plane = to_edrm_plane(plane);
+ fb = new_state->fb;
+ return msm_framebuffer_prepare(fb, edrm_plane->aspace);
+}
+
+static void edrm_plane_cleanup_fb(struct drm_plane *plane,
+ const struct drm_plane_state *old_state)
+{
+ struct drm_framebuffer *fb = old_state ? old_state->fb : NULL;
+ struct edrm_plane *edrm_plane = plane ? to_edrm_plane(plane) : NULL;
+
+ if (!fb || !plane)
+ return;
+
+ msm_framebuffer_cleanup(fb, edrm_plane->aspace);
+}
+
+static const struct drm_plane_funcs edrm_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = edrm_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static const struct drm_plane_helper_funcs edrm_plane_helper_funcs = {
+ .prepare_fb = edrm_plane_prepare_fb,
+ .cleanup_fb = edrm_plane_cleanup_fb,
+ .atomic_check = edrm_plane_atomic_check,
+ .atomic_update = edrm_plane_atomic_update,
+ .atomic_disable = edrm_plane_atomic_disable,
+};
+
+struct drm_plane *edrm_plane_init(struct drm_device *dev, int pipe,
+ u32 pipe_type)
+{
+ struct msm_drm_private *priv;
+ struct msm_edrm_kms *edrm_kms;
+ struct edrm_plane *edrm_plane;
+ struct drm_plane *plane;
+ int ret;
+
+ edrm_plane = kzalloc(sizeof(*edrm_plane), GFP_KERNEL);
+ if (!edrm_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ plane = &edrm_plane->base;
+ if (pipe_type == SSPP_TYPE_VIG)
+ ret = drm_universal_plane_init(dev, plane, 0,
+ &edrm_plane_funcs,
+ edrm_plane_formats_YUV,
+ ARRAY_SIZE(edrm_plane_formats_YUV),
+ DRM_PLANE_TYPE_PRIMARY);
+ else
+ ret = drm_universal_plane_init(dev, plane, 0,
+ &edrm_plane_funcs,
+ edrm_plane_formats_RGB,
+ ARRAY_SIZE(edrm_plane_formats_RGB),
+ DRM_PLANE_TYPE_PRIMARY);
+ if (ret)
+ goto fail;
+
+ drm_plane_helper_add(plane, &edrm_plane_helper_funcs);
+
+ priv = dev->dev_private;
+ edrm_kms = to_edrm_kms(priv->kms);
+
+ edrm_plane->pipe = pipe;
+ edrm_plane->aspace = edrm_kms->aspace;
+
+ return plane;
+fail:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/ekms/edrm_plane.h b/drivers/gpu/drm/msm/ekms/edrm_plane.h
new file mode 100644
index 000000000000..3136bb86d124
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_plane.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _EDRM_PLANE_H_
+#define _EDRM_PLANE_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "edrm_kms.h"
+
+struct edrm_plane {
+ struct drm_plane base;
+ struct msm_gem_address_space *aspace;
+ int pipe;
+ int display_id;
+ u32 sspp_offset;
+ u32 sspp_cfg_id;
+ u32 lm_stage;
+ u32 sspp_type;
+};
+
+int edrm_plane_flush(struct drm_plane *plane);
+
+struct drm_plane *edrm_plane_init(struct drm_device *dev, int id, u32 type);
+
+void edrm_plane_destroy(struct drm_plane *plane);
+
+#define to_edrm_plane(x) container_of(x, struct edrm_plane, base)
+
+#endif /* _EDRM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/msm/ekms/edrm_splash.c b/drivers/gpu/drm/msm/ekms/edrm_splash.c
new file mode 100644
index 000000000000..ec94586b1261
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_splash.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/of_address.h>
+#include <linux/debugfs.h>
+#include <linux/memblock.h>
+#include <soc/qcom/early_domain.h>
+#include "msm_drv.h"
+#include "sde_kms.h"
+#include "edrm_kms.h"
+#include "sde_splash.h"
+#include "edrm_splash.h"
+
+/* scratch registers */
+#define SCRATCH_REGISTER_0 0x014
+#define SCRATCH_REGISTER_1 0x018
+#define SCRATCH_REGISTER_2 0x01C
+#define SCRATCH_REGISTER_3 0x020
+
+#define SDE_RUNNING_VALUE 0xC001CAFE
+#define SDE_LK_STOP_VALUE 0xDEADDEAD
+#define SDE_EXIT_VALUE 0xDEADBEEF
+#define SDE_LK_IMMEDIATE_STOP_VALUE 0xFEFEFEFE
+
+/**
+ * edrm_splash_notify_lk_stop_splash.
+ *
+ * Function to stop early splash in LK.
+ */
+void edrm_splash_notify_lk_stop_splash(struct msm_kms *kms)
+{
+ request_early_service_shutdown(EARLY_DISPLAY);
+}
+
+/**
+ * edrm_splash_poll_lk_stop_splash.
+ *
+ * Function to poll for early splash stop in LK.
+ */
+void edrm_splash_poll_lk_stop_splash(struct msm_kms *kms)
+{
+ int i = 0;
+ struct msm_edrm_kms *edrm_kms = to_edrm_kms(kms);
+
+ /* each read may wait up to 10000us, worst case polling is 4 sec */
+ while (i < 400) {
+ /* read LK status from scratch register*/
+ if (!get_early_service_status(EARLY_DISPLAY)) {
+ edrm_kms->lk_running_flag = false;
+ break;
+ }
+ usleep_range(8000, 10000);
+ i++;
+ }
+}
+
+/*
+ * Below function will indicate early display exited or not started.
+ */
+int edrm_splash_get_lk_status(struct msm_kms *kms)
+{
+ if (get_early_service_status(EARLY_DISPLAY))
+ return SPLASH_STATUS_RUNNING;
+ else
+ return SPLASH_STATUS_NOT_START;
+}
+
+
+/*
+ * Below function will indicate early display started.
+ */
+void edrm_display_acquire(struct msm_kms *kms)
+{
+ struct msm_edrm_kms *edrm_kms = to_edrm_kms(kms);
+ struct sde_kms *master_kms;
+ struct sde_splash_info *master_sinfo;
+ struct msm_drm_private *master_priv =
+ edrm_kms->master_dev->dev_private;
+
+ master_kms = to_sde_kms(master_priv->kms);
+ master_sinfo = &master_kms->splash_info;
+ master_sinfo->early_display_enabled = true;
+}
+
+/*
+ * Below function will indicate early display exited or not started.
+ */
+void edrm_display_release(struct msm_kms *kms)
+{
+ struct msm_edrm_kms *edrm_kms = to_edrm_kms(kms);
+ struct sde_kms *master_kms;
+ struct sde_splash_info *master_sinfo;
+ struct msm_drm_private *master_priv =
+ edrm_kms->master_dev->dev_private;
+
+ master_kms = to_sde_kms(master_priv->kms);
+ master_sinfo = &master_kms->splash_info;
+ master_sinfo->early_display_enabled = false;
+}
diff --git a/drivers/gpu/drm/msm/ekms/edrm_splash.h b/drivers/gpu/drm/msm/ekms/edrm_splash.h
new file mode 100644
index 000000000000..78d3806a7adf
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_splash.h
@@ -0,0 +1,59 @@
+/**
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef EDRM_SPLASH_H_
+#define EDRM_SPLASH_H_
+
+#define SPLASH_STATUS_NOT_START 0
+#define SPLASH_STATUS_RUNNING 1
+#define SPLASH_STATUS_STOP 2
+
+/* APIs for early splash handoff functions */
+
+/**
+ * edrm_splash_notify_lk_stop_splash.
+ *
+ * Tell LK to stop display splash. LK may continue to run until last frame.
+ */
+void edrm_splash_notify_lk_stop_splash(struct msm_kms *kms);
+
+
+/**
+ * edrm_splash_poll_lk_stop_splash.
+ *
+ * Wait unitl LK stop the spash at last frame or it exited the splash app.
+ */
+void edrm_splash_poll_lk_stop_splash(struct msm_kms *kms);
+
+/**
+ * edrm_splash_get_lk_status
+ *
+ * Get early display status to set the status flag.
+ */
+int edrm_splash_get_lk_status(struct msm_kms *kms);
+
+/**
+ * edrm_display_acquire
+ *
+ * Update main DRM that eDRM is active and eDRM display resource is being used.
+ */
+void edrm_display_acquire(struct msm_kms *kms);
+
+/**
+ * edrm_splash_get_lk_status
+ *
+ * Update main DRM that eDRM is active and eDRM display resource no longer
+ * being use. Main DRM can claim back the resource anytime.
+ */
+void edrm_display_release(struct msm_kms *kms);
+
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 341738f624db..8b5d8614f7ec 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -258,33 +258,55 @@ static inline int sde_hw_ctl_get_bitmask_cdm(struct sde_hw_ctl *ctx,
return 0;
}
-static inline int sde_hw_ctl_get_splash_mixercfg(const u32 *resv_pipes,
- u32 length)
+static inline void sde_hw_ctl_get_splash_mixer_mask(const u32 *resv_pipes,
+ u32 length, u32 *mixercfg, u32 *mixercfg_ext)
{
int i = 0;
- u32 mixercfg = 0;
+ u32 mixer_mask = 0;
+ u32 mixer_ext_mask = 0;
for (i = 0; i < length; i++) {
- /* LK's splash VIG layer always stays on top */
+ /* LK's splash VIG layer always stays on second top */
+ /* most layerearly HMI RGB layer stays at top most layer */
switch (resv_pipes[i]) {
case SSPP_VIG0:
- mixercfg |= 0x7 << 0;
+ mixer_mask |= 0x7 << 0;
+ mixer_ext_mask |= BIT(0);
break;
case SSPP_VIG1:
- mixercfg |= 0x7 << 3;
+ mixer_mask |= 0x7 << 3;
+ mixer_ext_mask |= BIT(2);
break;
case SSPP_VIG2:
- mixercfg |= 0x7 << 6;
+ mixer_mask |= 0x7 << 6;
+ mixer_ext_mask |= BIT(4);
break;
case SSPP_VIG3:
- mixercfg |= 0x7 << 26;
+ mixer_mask |= 0x7 << 26;
+ mixer_ext_mask |= BIT(6);
+ break;
+ case SSPP_RGB0:
+ mixer_mask |= 0x7 << 9;
+ mixer_ext_mask |= BIT(8);
+ break;
+ case SSPP_RGB1:
+ mixer_mask |= 0x7 << 12;
+ mixer_ext_mask |= BIT(10);
+ break;
+ case SSPP_RGB2:
+ mixer_mask |= 0x7 << 15;
+ mixer_ext_mask |= BIT(12);
+ break;
+ case SSPP_RGB3:
+ mixer_mask |= 0x7 << 29;
+ mixer_ext_mask |= BIT(14);
break;
default:
break;
}
}
-
- return mixercfg;
+ *mixercfg = mixer_mask;
+ *mixercfg_ext = mixer_ext_mask;
}
static u32 sde_hw_ctl_poll_reset_status(struct sde_hw_ctl *ctx, u32 count)
@@ -346,25 +368,33 @@ static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx,
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
int i;
+ u32 mixercfg = 0;
+ u32 mixercfg_ext = 0;
+ u32 mixer_mask, mixerext_mask;
+ int mixer_id;
for (i = 0; i < ctx->mixer_count; i++) {
- int mixer_id = ctx->mixer_hw_caps[i].id;
- u32 mixercfg = 0;
+ mixer_id = ctx->mixer_hw_caps[i].id;
/*
* if bootloaer still has early RVC running, mixer status
* can't be direcly cleared.
*/
if (handoff) {
- mixercfg =
- sde_hw_ctl_get_splash_mixercfg(resv_pipes,
- resv_pipes_length);
-
- mixercfg &= SDE_REG_READ(c, CTL_LAYER(mixer_id));
+ /*
+ * if bootloaer still has early display or early RVC
+ * running,mixer status can't be direcly cleared.
+ */
+ mixercfg = SDE_REG_READ(c, CTL_LAYER(mixer_id));
+ mixercfg_ext = SDE_REG_READ(c,
+ CTL_LAYER_EXT(mixer_id));
+ sde_hw_ctl_get_splash_mixer_mask(resv_pipes,
+ resv_pipes_length, &mixer_mask, &mixerext_mask);
+ mixercfg &= mixer_mask;
+ mixercfg_ext &= mixerext_mask;
}
-
SDE_REG_WRITE(c, CTL_LAYER(mixer_id), mixercfg);
- SDE_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), mixercfg_ext);
SDE_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
SDE_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
}
@@ -376,6 +406,7 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
u32 mixercfg, mixercfg_ext, mix, ext, mixercfg_ext2;
+ u32 mixer_mask, mixerext_mask;
int i, j;
u8 stages;
int pipes_per_stage;
@@ -402,13 +433,13 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
* should be updated to kernel's mixer setup.
*/
if (handoff) {
- mixercfg =
- sde_hw_ctl_get_splash_mixercfg(resv_pipes,
- resv_pipes_length);
-
- mixercfg &= SDE_REG_READ(c, CTL_LAYER(lm));
+ mixercfg = SDE_REG_READ(c, CTL_LAYER(lm));
+ mixercfg_ext = SDE_REG_READ(c, CTL_LAYER_EXT(lm));
+ sde_hw_ctl_get_splash_mixer_mask(resv_pipes,
+ resv_pipes_length, &mixer_mask, &mixerext_mask);
+ mixercfg &= mixer_mask;
+ mixercfg_ext &= mixerext_mask;
mixercfg |= BIT(24);
- stages--;
}
for (i = 0; i <= stages; i++) {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index a008ecf4a11d..5538c807692c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -146,7 +146,7 @@ struct sde_hw_ctl_ops {
/**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer
- * @handoff : handoff flag
+ * @handoff : indicate if lk is prepare for handoff
* @resv_pipes : reserved pipes in DT
* @resv_pipes_length: array size of array reserved_pipes
*/
@@ -158,7 +158,7 @@ struct sde_hw_ctl_ops {
* @ctx : ctl path ctx pointer
* @lm : layer mixer enumeration
* @cfg : blend stage configuration
- * @handoff : handoff flag
+ * @handoff : indicate if lk is prepare for handoff
* @resv_pipes : reserved pipes in DT
* @resv_pipes_length: array size of array reserved_pipes
*/
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 44a5f8c4535b..2ad6d608d9be 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -359,13 +359,13 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
struct drm_device *dev = sde_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, true);
+
if (sde_kms->splash_info.handoff &&
sde_kms->splash_info.display_splash_enabled)
sde_splash_lk_stop_splash(kms, state);
- sde_power_resource_enable(&priv->phandle,
- sde_kms->core_client, true);
-
shd_display_prepare_commit(sde_kms, state);
}
@@ -1508,7 +1508,7 @@ static int sde_kms_hw_init(struct msm_kms *kms)
goto power_error;
}
- rc = sde_splash_parse_reserved_plane_dt(sinfo,
+ rc = sde_splash_parse_reserved_plane_dt(dev, sinfo,
sde_kms->catalog);
if (rc)
SDE_ERROR("parse reserved plane dt failed: %d\n", rc);
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.c b/drivers/gpu/drm/msm/sde/sde_splash.c
index b0d63ec4c64c..2b9a070db0fe 100644
--- a/drivers/gpu/drm/msm/sde/sde_splash.c
+++ b/drivers/gpu/drm/msm/sde/sde_splash.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -505,7 +505,8 @@ static inline u32 _sde_splash_parse_sspp_id(struct sde_mdss_cfg *cfg,
return 0;
}
-int sde_splash_parse_reserved_plane_dt(struct sde_splash_info *splash_info,
+int sde_splash_parse_reserved_plane_dt(struct drm_device *dev,
+ struct sde_splash_info *splash_info,
struct sde_mdss_cfg *cfg)
{
struct device_node *parent, *node;
@@ -516,7 +517,8 @@ int sde_splash_parse_reserved_plane_dt(struct sde_splash_info *splash_info,
if (!splash_info || !cfg)
return -EINVAL;
- parent = of_find_node_by_path("/qcom,sde-reserved-plane");
+ parent = of_get_child_by_name(dev->dev->of_node,
+ "qcom,sde-reserved-plane");
if (!parent)
return -EINVAL;
@@ -761,6 +763,8 @@ bool sde_splash_get_lk_complete_status(struct msm_kms *kms)
intr = sde_kms->hw_intr;
if (sde_kms->splash_info.handoff &&
+ !sde_kms->splash_info.display_splash_enabled &&
+ !sde_kms->splash_info.early_display_enabled &&
!_sde_splash_lk_check()) {
SDE_DEBUG("LK totally exits\n");
return true;
@@ -940,11 +944,6 @@ int sde_splash_lk_stop_splash(struct msm_kms *kms,
sinfo = &sde_kms->splash_info;
- if (!sinfo) {
- SDE_ERROR("%s(%d): invalid splash info\n", __func__, __LINE__);
- return -EINVAL;
- }
-
/* Monitor LK's status and tell it to exit. */
mutex_lock(&sde_splash_lock);
if (_sde_splash_validate_commit(sde_kms, state) &&
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.h b/drivers/gpu/drm/msm/sde/sde_splash.h
index ee94c348e25b..8332c99c6f1b 100644
--- a/drivers/gpu/drm/msm/sde/sde_splash.h
+++ b/drivers/gpu/drm/msm/sde/sde_splash.h
@@ -1,5 +1,5 @@
/**
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -56,6 +56,12 @@ struct sde_splash_info {
/* to indicate LK is totally exited */
bool lk_is_exited;
+ /* flag of early display status */
+ bool early_display_enabled;
+
+ /* flag of early RVC status */
+ bool early_camera_enabled;
+
/* memory node used for display buffer */
uint32_t splash_mem_num;
@@ -140,8 +146,9 @@ int sde_splash_parse_memory_dt(struct drm_device *dev);
*
* Parse reserved plane information from DT for early RVC case.
*/
-int sde_splash_parse_reserved_plane_dt(struct sde_splash_info *splash_info,
- struct sde_mdss_cfg *cfg);
+int sde_splash_parse_reserved_plane_dt(struct drm_device *dev,
+ struct sde_splash_info *splash_info,
+ struct sde_mdss_cfg *cfg);
/*
* sde_splash_query_plane_is_reserved
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 06496a128162..4150873d432e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -130,7 +130,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
static vq_callback_t *callbacks[] = {
virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
};
- static const char *names[] = { "control", "cursor" };
+ static const char * const names[] = { "control", "cursor" };
struct virtio_gpu_device *vgdev;
/* this will expand later */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 802dcb409030..b877cce0409b 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -316,6 +316,8 @@ static ssize_t out_intr_mask_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
}
@@ -329,6 +331,8 @@ static ssize_t out_read_index_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
return sprintf(buf, "%d\n", outbound.current_read_index);
}
@@ -343,6 +347,8 @@ static ssize_t out_write_index_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
return sprintf(buf, "%d\n", outbound.current_write_index);
}
@@ -357,6 +363,8 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
}
@@ -371,6 +379,8 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
}
@@ -384,6 +394,8 @@ static ssize_t in_intr_mask_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
}
@@ -397,6 +409,8 @@ static ssize_t in_read_index_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
return sprintf(buf, "%d\n", inbound.current_read_index);
}
@@ -410,6 +424,8 @@ static ssize_t in_write_index_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
return sprintf(buf, "%d\n", inbound.current_write_index);
}
@@ -424,6 +440,8 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
}
@@ -438,6 +456,8 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
}
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 70ca27e45602..9d9e47eb0842 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -1418,7 +1418,8 @@ nr_pages_store(struct device *dev, struct device_attribute *attr,
if (!end)
break;
- len -= end - p;
+ /* consume the number and the following comma, hence +1 */
+ len -= end - p + 1;
p = end + 1;
} while (len);
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 94c837046786..57e3790c87b1 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -459,9 +459,15 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return i2cdev_ioctl_smbus(client, arg);
case I2C_RETRIES:
+ if (arg > INT_MAX)
+ return -EINVAL;
+
client->adapter->retries = arg;
break;
case I2C_TIMEOUT:
+ if (arg > INT_MAX)
+ return -EINVAL;
+
/* For historical reasons, user-space sets the timeout
* value in units of 10 ms.
*/
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index f78c464899db..3d2c60c8de83 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -126,12 +126,8 @@ static irqreturn_t omap4_keypad_irq_handler(int irq, void *dev_id)
{
struct omap4_keypad *keypad_data = dev_id;
- if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)) {
- /* Disable interrupts */
- kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
- OMAP4_VAL_IRQDISABLE);
+ if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS))
return IRQ_WAKE_THREAD;
- }
return IRQ_NONE;
}
@@ -173,11 +169,6 @@ static irqreturn_t omap4_keypad_irq_thread_fn(int irq, void *dev_id)
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
- /* enable interrupts */
- kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
- OMAP4_DEF_IRQENABLE_EVENTEN |
- OMAP4_DEF_IRQENABLE_LONGKEY);
-
return IRQ_HANDLED;
}
@@ -214,9 +205,10 @@ static void omap4_keypad_close(struct input_dev *input)
disable_irq(keypad_data->irq);
- /* Disable interrupts */
+ /* Disable interrupts and wake-up events */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
OMAP4_VAL_IRQDISABLE);
+ kbd_writel(keypad_data, OMAP4_KBD_WAKEUPENABLE, 0);
/* clear pending interrupts */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
@@ -364,7 +356,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
}
error = request_threaded_irq(keypad_data->irq, omap4_keypad_irq_handler,
- omap4_keypad_irq_thread_fn, 0,
+ omap4_keypad_irq_thread_fn, IRQF_ONESHOT,
"omap4-keypad", keypad_data);
if (error) {
dev_err(&pdev->dev, "failed to register interrupt\n");
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 471984ec2db0..30adc5745cba 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1240,6 +1240,7 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
+ { "ELAN0501", 0 },
{ "ELAN0600", 0 },
{ "ELAN0602", 0 },
{ "ELAN0605", 0 },
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 0881d68f34d8..66c2abd358f8 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017,2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -513,12 +513,22 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
av8l_fast_iopte *ptep;
unsigned long flags;
struct sg_mapping_iter miter;
- unsigned int count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
+ size_t count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
int prot = IOMMU_READ | IOMMU_WRITE; /* TODO: extract from attrs */
bool is_coherent = is_dma_coherent(dev, attrs);
pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
struct page **pages;
+ /*
+ * sg_alloc_table_from_pages accepts unsigned int value for count
+ * so check count doesn't exceed UINT_MAX.
+ */
+
+ if (count > UINT_MAX) {
+ dev_err(dev, "count: %zx exceeds UNIT_MAX\n", count);
+ return NULL;
+ }
+
prot = __get_iommu_pgprot(attrs, prot, is_coherent);
*handle = DMA_ERROR_CODE;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 7feaa82f8c7c..8b4a4d95669a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2041,7 +2041,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
* than default. Unnecessary for PT mode.
*/
if (translation != CONTEXT_TT_PASS_THROUGH) {
- for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+ for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
ret = -ENOMEM;
pgd = phys_to_virt(dma_pte_addr(pgd));
if (!dma_pte_present(pgd))
@@ -2055,7 +2055,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
translation = CONTEXT_TT_MULTI_LEVEL;
context_set_address_root(context, virt_to_phys(pgd));
- context_set_address_width(context, iommu->agaw);
+ context_set_address_width(context, agaw);
} else {
/*
* In pass through mode, AW must be programmed to
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index dd7e38ac29bd..d15347de415a 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -851,7 +851,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
u16 ret;
if (contr == 0) {
- strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
+ strncpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
return CAPI_NOERROR;
}
@@ -859,7 +859,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
ctr = get_capi_ctr_by_nr(contr);
if (ctr && ctr->state == CAPI_CTR_RUNNING) {
- strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
+ strncpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
ret = CAPI_NOERROR;
} else
ret = CAPI_REGNOTINSTALLED;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 0ad06670fa99..7bf1cdb582f5 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -476,21 +476,6 @@ config DM_VERITY
If unsure, say N.
-config DM_VERITY_HASH_PREFETCH_MIN_SIZE_128
- bool "Prefetch size 128"
-
-config DM_VERITY_HASH_PREFETCH_MIN_SIZE
- int "Verity hash prefetch minimum size"
- depends on DM_VERITY
- range 1 4096
- default 128 if DM_VERITY_HASH_PREFETCH_MIN_SIZE_128
- default 1
- ---help---
- This sets minimum number of hash blocks to prefetch for dm-verity.
- For devices like eMMC, having larger prefetch size like 128 can improve
- performance with increased memory consumption for keeping more hashes
- in RAM.
-
config DM_VERITY_FEC
bool "Verity forward error correction support"
depends on DM_VERITY
@@ -554,7 +539,6 @@ config DM_ANDROID_VERITY
depends on ASYMMETRIC_KEY_TYPE
depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
depends on MD_LINEAR=y
- select DM_VERITY_HASH_PREFETCH_MIN_SIZE_128
---help---
This device-mapper target is virtually a VERITY target. This
target is setup by reading the metadata contents piggybacked
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 54c308e6704f..04248394843e 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -55,15 +55,17 @@ struct dm_kcopyd_client {
struct dm_kcopyd_throttle *throttle;
/*
- * We maintain three lists of jobs:
+ * We maintain four lists of jobs:
*
* i) jobs waiting for pages
* ii) jobs that have pages, and are waiting for the io to be issued.
- * iii) jobs that have completed.
+ * iii) jobs that don't need to do any IO and just run a callback
+ * iv) jobs that have completed.
*
- * All three of these are protected by job_lock.
+ * All four of these are protected by job_lock.
*/
spinlock_t job_lock;
+ struct list_head callback_jobs;
struct list_head complete_jobs;
struct list_head io_jobs;
struct list_head pages_jobs;
@@ -583,6 +585,7 @@ static void do_work(struct work_struct *work)
struct dm_kcopyd_client *kc = container_of(work,
struct dm_kcopyd_client, kcopyd_work);
struct blk_plug plug;
+ unsigned long flags;
/*
* The order that these are called is *very* important.
@@ -591,6 +594,10 @@ static void do_work(struct work_struct *work)
* list. io jobs call wake when they complete and it all
* starts again.
*/
+ spin_lock_irqsave(&kc->job_lock, flags);
+ list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
+ spin_unlock_irqrestore(&kc->job_lock, flags);
+
blk_start_plug(&plug);
process_jobs(&kc->complete_jobs, kc, run_complete_job);
process_jobs(&kc->pages_jobs, kc, run_pages_job);
@@ -608,7 +615,7 @@ static void dispatch_job(struct kcopyd_job *job)
struct dm_kcopyd_client *kc = job->kc;
atomic_inc(&kc->nr_jobs);
if (unlikely(!job->source.count))
- push(&kc->complete_jobs, job);
+ push(&kc->callback_jobs, job);
else if (job->pages == &zero_page_list)
push(&kc->io_jobs, job);
else
@@ -795,7 +802,7 @@ void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
job->read_err = read_err;
job->write_err = write_err;
- push(&kc->complete_jobs, job);
+ push(&kc->callback_jobs, job);
wake(kc);
}
EXPORT_SYMBOL(dm_kcopyd_do_callback);
@@ -825,6 +832,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
return ERR_PTR(-ENOMEM);
spin_lock_init(&kc->job_lock);
+ INIT_LIST_HEAD(&kc->callback_jobs);
INIT_LIST_HEAD(&kc->complete_jobs);
INIT_LIST_HEAD(&kc->io_jobs);
INIT_LIST_HEAD(&kc->pages_jobs);
@@ -874,6 +882,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
/* Wait for completion of all jobs submitted by this client. */
wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
+ BUG_ON(!list_empty(&kc->callback_jobs));
BUG_ON(!list_empty(&kc->complete_jobs));
BUG_ON(!list_empty(&kc->io_jobs));
BUG_ON(!list_empty(&kc->pages_jobs));
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index e4d1bafe78c1..2a855e5429ab 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -19,6 +19,7 @@
#include <linux/vmalloc.h>
#include <linux/log2.h>
#include <linux/dm-kcopyd.h>
+#include <linux/semaphore.h>
#include "dm.h"
@@ -105,6 +106,9 @@ struct dm_snapshot {
/* The on disk metadata handler */
struct dm_exception_store *store;
+ /* Maximum number of in-flight COW jobs. */
+ struct semaphore cow_count;
+
struct dm_kcopyd_client *kcopyd_client;
/* Wait for events based on state_bits */
@@ -145,6 +149,19 @@ struct dm_snapshot {
#define RUNNING_MERGE 0
#define SHUTDOWN_MERGE 1
+/*
+ * Maximum number of chunks being copied on write.
+ *
+ * The value was decided experimentally as a trade-off between memory
+ * consumption, stalling the kernel's workqueues and maintaining a high enough
+ * throughput.
+ */
+#define DEFAULT_COW_THRESHOLD 2048
+
+static int cow_threshold = DEFAULT_COW_THRESHOLD;
+module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
+MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
+
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
"A percentage of time allocated for copy on write");
@@ -1189,6 +1206,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
+ sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
+
s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(s->kcopyd_client)) {
r = PTR_ERR(s->kcopyd_client);
@@ -1560,6 +1579,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
}
list_add(&pe->out_of_order_entry, lh);
}
+ up(&s->cow_count);
}
/*
@@ -1583,6 +1603,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
dest.count = src.count;
/* Hand over to kcopyd */
+ down(&s->cow_count);
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
}
@@ -1602,6 +1623,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
pe->full_bio = bio;
pe->full_bio_end_io = bio->bi_end_io;
+ down(&s->cow_count);
callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
copy_callback, pe);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 48b09dedd0fb..52f261381b0c 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1542,6 +1542,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
smp_mb();
if (dm_table_request_based(t))
queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
+
+ /* io_pages is used for readahead */
+ q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index d2e3abc182b3..131077aabd08 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -529,7 +529,6 @@ static void verity_prefetch_io(struct work_struct *work)
container_of(work, struct dm_verity_prefetch_work, work);
struct dm_verity *v = pw->v;
int i;
- sector_t prefetch_size;
for (i = v->levels - 2; i >= 0; i--) {
sector_t hash_block_start;
@@ -552,14 +551,8 @@ static void verity_prefetch_io(struct work_struct *work)
hash_block_end = v->hash_blocks - 1;
}
no_prefetch_cluster:
- // for emmc, it is more efficient to send bigger read
- prefetch_size = max((sector_t)CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE,
- hash_block_end - hash_block_start + 1);
- if ((hash_block_start + prefetch_size) >= (v->hash_start + v->hash_blocks)) {
- prefetch_size = hash_block_end - hash_block_start + 1;
- }
dm_bufio_prefetch(v->bufio, hash_block_start,
- prefetch_size);
+ hash_block_end - hash_block_start + 1);
}
kfree(pw);
diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
index 251a556112a9..280b5ffea592 100644
--- a/drivers/media/firewire/firedtv-avc.c
+++ b/drivers/media/firewire/firedtv-avc.c
@@ -968,7 +968,8 @@ static int get_ca_object_length(struct avc_response_frame *r)
return r->operand[7];
}
-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
+int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
+ unsigned int *len)
{
struct avc_command_frame *c = (void *)fdtv->avc_data;
struct avc_response_frame *r = (void *)fdtv->avc_data;
@@ -1009,7 +1010,8 @@ out:
return ret;
}
-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
+int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
+ unsigned int *len)
{
struct avc_command_frame *c = (void *)fdtv->avc_data;
struct avc_response_frame *r = (void *)fdtv->avc_data;
diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h
index 345d1eda8c05..5b18a08c6285 100644
--- a/drivers/media/firewire/firedtv.h
+++ b/drivers/media/firewire/firedtv.h
@@ -124,8 +124,10 @@ int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst,
struct dvb_diseqc_master_cmd *diseqcmd);
void avc_remote_ctrl_work(struct work_struct *work);
int avc_register_remote_control(struct firedtv *fdtv);
-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
+int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
+ unsigned int *len);
+int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
+ unsigned int *len);
int avc_ca_reset(struct firedtv *fdtv);
int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length);
int avc_ca_get_time_date(struct firedtv *fdtv, int *interval);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index fa5aee08c37d..c0bf239e0a35 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -35,6 +35,7 @@
#include "msm_isp44.h"
#include "msm_isp40.h"
#include "msm_isp32.h"
+#include "msm_cam_cx_ipeak.h"
static struct msm_sd_req_vb2_q vfe_vb2_ops;
static struct msm_isp_buf_mgr vfe_buf_mgr;
@@ -675,6 +676,11 @@ int vfe_hw_probe(struct platform_device *pdev)
"qcom,vfe-cx-ipeak", NULL)) {
vfe_dev->vfe_cx_ipeak = cx_ipeak_register(
pdev->dev.of_node, "qcom,vfe-cx-ipeak");
+ if (vfe_dev->vfe_cx_ipeak)
+ cam_cx_ipeak_register_cx_ipeak(
+ vfe_dev->vfe_cx_ipeak, &vfe_dev->cx_ipeak_bit);
+ pr_debug("%s: register cx_ipeak received bit %d\n",
+ __func__, vfe_dev->cx_ipeak_bit);
}
} else {
vfe_dev->hw_info = (struct msm_vfe_hardware_info *)
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 0c18bfe55842..9bc6fde36774 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -791,6 +791,7 @@ struct vfe_device {
enum cam_ahb_clk_vote ahb_vote;
enum cam_ahb_clk_vote user_requested_ahb_vote;
struct cx_ipeak_client *vfe_cx_ipeak;
+ int cx_ipeak_bit;
/* Sync variables*/
struct completion reset_complete;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index 0daf2d914be5..a8341a7ff3e6 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -25,6 +25,7 @@
#include "cam_soc_api.h"
#include "msm_isp48.h"
#include "linux/iopoll.h"
+#include "msm_cam_cx_ipeak.h"
#undef CDBG
#define CDBG(fmt, args...) pr_debug(fmt, ##args)
@@ -2680,7 +2681,9 @@ int msm_vfe47_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
prev_clk_rate <
vfe_dev->vfe_clk_rates[MSM_VFE_CLK_RATE_NOMINAL]
[vfe_dev->hw_info->vfe_clk_idx]) {
- ret = cx_ipeak_update(vfe_dev->vfe_cx_ipeak, true);
+ pr_debug("%s: clk is more than Nominal vfe %d, ipeak bit %d\n",
+ __func__, vfe_dev->pdev->id, vfe_dev->cx_ipeak_bit);
+ ret = cam_cx_ipeak_update_vote_cx_ipeak(vfe_dev->cx_ipeak_bit);
if (ret) {
pr_err("%s: cx_ipeak_update failed %d\n",
__func__, ret);
@@ -2703,7 +2706,9 @@ int msm_vfe47_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
prev_clk_rate >=
vfe_dev->vfe_clk_rates[MSM_VFE_CLK_RATE_NOMINAL]
[vfe_dev->hw_info->vfe_clk_idx]) {
- ret = cx_ipeak_update(vfe_dev->vfe_cx_ipeak, false);
+ pr_debug("%s:clk is less than Nominal vfe %d, ipeak bit %d\n",
+ __func__, vfe_dev->pdev->id, vfe_dev->cx_ipeak_bit);
+ ret = cam_cx_ipeak_unvote_cx_ipeak(vfe_dev->cx_ipeak_bit);
if (ret) {
pr_err("%s: cx_ipeak_update failed %d\n",
__func__, ret);
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 4b5671cd9c28..d61e6beabfa4 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -36,6 +36,7 @@
#include "msm_camera_io_util.h"
#include <linux/debugfs.h>
#include "cam_smmu_api.h"
+#include "msm_cam_cx_ipeak.h"
#define MSM_CPP_DRV_NAME "msm_cpp"
@@ -1537,7 +1538,9 @@ static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
}
if (cpp_dev->turbo_vote == 1) {
- rc = cx_ipeak_update(cpp_dev->cpp_cx_ipeak, false);
+ pr_debug("%s:cx_ipeak_update unvote. ipeak bit %d\n",
+ __func__, cpp_dev->cx_ipeak_bit);
+ rc = cam_cx_ipeak_unvote_cx_ipeak(cpp_dev->cx_ipeak_bit);
if (rc)
pr_err("cx_ipeak_update failed");
else
@@ -3109,7 +3112,9 @@ unsigned long cpp_cx_ipeak_update(struct cpp_device *cpp_dev,
if ((clock >= cpp_dev->hw_info.freq_tbl
[(cpp_dev->hw_info.freq_tbl_count) - 1]) &&
(cpp_dev->turbo_vote == 0)) {
- ret = cx_ipeak_update(cpp_dev->cpp_cx_ipeak, true);
+ pr_debug("%s: clk is more than Nominal cpp, ipeak bit %d\n",
+ __func__, cpp_dev->cx_ipeak_bit);
+ ret = cam_cx_ipeak_update_vote_cx_ipeak(cpp_dev->cx_ipeak_bit);
if (ret) {
pr_err("cx_ipeak voting failed setting clock below turbo");
clock = cpp_dev->hw_info.freq_tbl
@@ -3122,7 +3127,10 @@ unsigned long cpp_cx_ipeak_update(struct cpp_device *cpp_dev,
[(cpp_dev->hw_info.freq_tbl_count) - 1]) {
clock_rate = msm_cpp_set_core_clk(cpp_dev, clock, idx);
if (cpp_dev->turbo_vote == 1) {
- ret = cx_ipeak_update(cpp_dev->cpp_cx_ipeak, false);
+ pr_debug("%s:clk is less than Nominal, ipeak bit %d\n",
+ __func__, cpp_dev->cx_ipeak_bit);
+ ret = cam_cx_ipeak_unvote_cx_ipeak(
+ cpp_dev->cx_ipeak_bit);
if (ret)
pr_err("cx_ipeak unvoting failed");
else
@@ -4605,8 +4613,12 @@ static int cpp_probe(struct platform_device *pdev)
if (of_find_property(pdev->dev.of_node, "qcom,cpp-cx-ipeak", NULL)) {
cpp_dev->cpp_cx_ipeak = cx_ipeak_register(
pdev->dev.of_node, "qcom,cpp-cx-ipeak");
- if (cpp_dev->cpp_cx_ipeak)
- CPP_DBG("Cx ipeak Registration Successful ");
+ if (cpp_dev->cpp_cx_ipeak) {
+ cam_cx_ipeak_register_cx_ipeak(cpp_dev->cpp_cx_ipeak,
+ &cpp_dev->cx_ipeak_bit);
+ pr_err("%s register cx_ipeak received bit %d\n",
+ __func__, cpp_dev->cx_ipeak_bit);
+ }
else
pr_err("Cx ipeak Registration Unsuccessful");
}
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
index f2c544785f46..0f5d6759acd3 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, 2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -293,6 +293,7 @@ struct cpp_device {
struct msm_cpp_vbif_data *vbif_data;
bool turbo_vote;
struct cx_ipeak_client *cpp_cx_ipeak;
+ int cx_ipeak_bit;
enum cpp_iommu_fault_state fault_status;
};
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index 83cc6d3b4784..81ba454a6d95 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -863,8 +863,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
"%s-vid-cap", dev->v4l2_dev.name);
if (IS_ERR(dev->kthread_vid_cap)) {
+ int err = PTR_ERR(dev->kthread_vid_cap);
+
+ dev->kthread_vid_cap = NULL;
v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
- return PTR_ERR(dev->kthread_vid_cap);
+ return err;
}
*pstreaming = true;
vivid_grab_controls(dev, true);
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
index c2c46dcdbe95..2c5dbdcb576a 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -248,8 +248,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
"%s-vid-out", dev->v4l2_dev.name);
if (IS_ERR(dev->kthread_vid_out)) {
+ int err = PTR_ERR(dev->kthread_vid_out);
+
+ dev->kthread_vid_out = NULL;
v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
- return PTR_ERR(dev->kthread_vid_out);
+ return err;
}
*pstreaming = true;
vivid_grab_controls(dev, true);
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index ef5412311b2f..a84954f1be34 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -461,6 +461,8 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
break;
}
+ vfree(dev->bitmap_cap);
+ dev->bitmap_cap = NULL;
vivid_update_quality(dev);
tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
dev->crop_cap = dev->src_rect;
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 1678b730dba2..2e82f520a869 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -33,7 +33,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000,
+ V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 6cfcdcea27e0..873948e429e8 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -930,6 +930,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
em28xx_videodbg("%s\n", __func__);
+ dev->v4l2->field_count = 0;
+
/* Make sure streaming is not already in progress for this type
of filehandle (e.g. video, vbi) */
rc = res_get(dev, vq->type);
@@ -1149,8 +1151,6 @@ static void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv)
{
struct em28xx *dev = priv;
- dev->v4l2->field_count = 0;
-
/*
* In the case of non-AC97 volume controls, we still need
* to do some setups at em28xx, in order to mute/unmute
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 8ce9c63dfc59..e0041fcfa783 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1976,9 +1976,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
return -EINVAL;
}
}
+
+ mutex_lock(&q->mmap_lock);
+
if (vb2_fileio_is_active(q)) {
dprintk(1, "mmap: file io in progress\n");
- return -EBUSY;
+ ret = -EBUSY;
+ goto unlock;
}
/*
@@ -1986,7 +1990,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
*/
ret = __find_plane_by_offset(q, off, &buffer, &plane);
if (ret)
- return ret;
+ goto unlock;
vb = q->bufs[buffer];
@@ -1999,11 +2003,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
if (length < (vma->vm_end - vma->vm_start)) {
dprintk(1,
"MMAP invalid, as it would overflow buffer length\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
- mutex_lock(&q->mmap_lock);
ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
+
+unlock:
mutex_unlock(&q->mmap_lock);
if (ret)
return ret;
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 5628a6b5b19b..c5c320efc7b4 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -594,6 +594,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
return 0;
}
+static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
+{
+ struct tps6586x *tps6586x = dev_get_drvdata(dev);
+
+ if (tps6586x->client->irq)
+ disable_irq(tps6586x->client->irq);
+
+ return 0;
+}
+
+static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
+{
+ struct tps6586x *tps6586x = dev_get_drvdata(dev);
+
+ if (tps6586x->client->irq)
+ enable_irq(tps6586x->client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
+ tps6586x_i2c_resume);
+
static const struct i2c_device_id tps6586x_id_table[] = {
{ "tps6586x", 0 },
{ },
@@ -604,6 +627,7 @@ static struct i2c_driver tps6586x_driver = {
.driver = {
.name = "tps6586x",
.of_match_table = of_match_ptr(tps6586x_of_match),
+ .pm = &tps6586x_pm_ops,
},
.probe = tps6586x_i2c_probe,
.remove = tps6586x_i2c_remove,
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 524660510599..0c15ba21fa54 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -217,7 +217,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
dma_addr_t *dma_handle)
{
- if (get_order(size) > MAX_ORDER)
+ if (get_order(size) >= MAX_ORDER)
return NULL;
return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c
index e486a0c26267..f6ed57d3125c 100644
--- a/drivers/misc/mic/card/mic_virtio.c
+++ b/drivers/misc/mic/card/mic_virtio.c
@@ -311,7 +311,7 @@ unmap:
static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
- const char *names[])
+ const char * const names[])
{
struct mic_vdev *mvdev = to_micvdev(vdev);
struct mic_device_ctrl __iomem *dc = mvdev->dc;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 76dbbbde884b..db4f4c8638b4 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -285,6 +285,7 @@ static int mmc_devfreq_get_dev_status(struct device *dev,
{
struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
struct mmc_devfeq_clk_scaling *clk_scaling;
+ bool disable = false;
if (!host) {
pr_err("bad host parameter\n");
@@ -312,7 +313,14 @@ static int mmc_devfreq_get_dev_status(struct device *dev,
}
}
- status->busy_time = clk_scaling->total_busy_time_us;
+ if (host->ops->check_temp &&
+ host->card->clk_scaling_highest > UHS_DDR50_MAX_DTR)
+ disable = host->ops->check_temp(host);
+ /* busy_time=0 for running at low freq*/
+ if (disable)
+ status->busy_time = 0;
+ else
+ status->busy_time = clk_scaling->total_busy_time_us;
status->total_time = ktime_to_us(ktime_sub(ktime_get(),
clk_scaling->measure_interval_start));
clk_scaling->total_busy_time_us = 0;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 999fdb9bad7d..1ba28b350e91 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -2087,9 +2087,11 @@ reinit:
if (err) {
pr_warn("%s: Enabling HPI failed\n",
mmc_hostname(card->host));
+ card->ext_csd.hpi_en = 0;
err = 0;
- } else
+ } else {
card->ext_csd.hpi_en = 1;
+ }
}
/*
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index bf896b605487..9a7f9d27be1f 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -33,6 +33,8 @@
#define UHS_SDR25_MIN_DTR (25 * 1000 * 1000)
#define UHS_SDR12_MIN_DTR (12.5 * 1000 * 1000)
+#define ENOCALLBACK 1
+
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
0, 0, 0, 0
@@ -498,7 +500,11 @@ static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
err = -EBUSY;
} else {
mmc_set_timing(card->host, timing);
- mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
+ if (card->host->ops->check_temp(card->host) &&
+ timing == MMC_TIMING_UHS_SDR104)
+ mmc_set_clock(card->host, UHS_SDR50_MAX_DTR);
+ else
+ mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
}
return err;
@@ -1122,6 +1128,34 @@ free_card:
return err;
}
+static int mmc_sd_init_temp_control_clk_scaling(struct mmc_host *host)
+{
+ int ret;
+
+ if (host->ops->reg_temp_callback) {
+ ret = host->ops->reg_temp_callback(host);
+ } else {
+ pr_err("%s: %s: couldn't find init temp control clk scaling cb\n",
+ mmc_hostname(host), __func__);
+ ret = -ENOCALLBACK;
+ }
+ return ret;
+}
+
+static int mmc_sd_dereg_temp_control_clk_scaling(struct mmc_host *host)
+{
+ int ret;
+
+ if (host->ops->dereg_temp_callback) {
+ ret = host->ops->dereg_temp_callback(host);
+ } else {
+ pr_err("%s: %s: couldn't find dereg temp control clk scaling cb\n",
+ mmc_hostname(host), __func__);
+ ret = -ENOCALLBACK;
+ }
+ return ret;
+}
+
/*
* Host is being removed. Free up the current card.
*/
@@ -1131,6 +1165,7 @@ static void mmc_sd_remove(struct mmc_host *host)
BUG_ON(!host->card);
mmc_exit_clk_scaling(host);
+ mmc_sd_dereg_temp_control_clk_scaling(host);
mmc_remove_card(host->card);
mmc_claim_host(host);
@@ -1458,6 +1493,9 @@ int mmc_attach_sd(struct mmc_host *host)
goto err;
}
+ if (mmc_sd_init_temp_control_clk_scaling(host))
+ pr_err("%s: failed to init temp control clk scaling\n",
+ mmc_hostname(host));
/*
* Detect and init the card.
*/
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index bf62e429f7fc..98be9eb3184b 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1840,13 +1840,14 @@ static void atmci_tasklet_func(unsigned long priv)
}
atmci_request_end(host, host->mrq);
- state = STATE_IDLE;
+ goto unlock; /* atmci_request_end() sets host->state */
break;
}
} while (state != prev_state);
host->state = state;
+unlock:
spin_unlock(&host->lock);
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 6b814d7d6560..af937d3e8c3e 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2117,7 +2117,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
- mmc->max_seg_size = mmc->max_req_size;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
@@ -2174,6 +2173,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
goto err_irq;
}
+ /*
+ * Limit the maximum segment size to the lower of the request size
+ * and the DMA engine device segment size limits. In reality, with
+ * 32-bit transfers, the DMA engine can do longer segments than this
+ * but there is no way to represent that in the DMA model - if we
+ * increase this figure here, we get warnings from the DMA API debug.
+ */
+ mmc->max_seg_size = min3(mmc->max_req_size,
+ dma_get_max_seg_size(host->rx_chan->device->dev),
+ dma_get_max_seg_size(host->tx_chan->device->dev));
+
/* Request IRQ for MMC operations */
ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
mmc_hostname(mmc), host);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 81a781c1f9d6..0468ea464055 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -40,6 +40,8 @@
#include <linux/pm_runtime.h>
#include <trace/events/mmc.h>
#include <soc/qcom/boot_stats.h>
+#include <linux/msm_thermal.h>
+#include <linux/msm_tsens.h>
#include "sdhci-msm.h"
#include "sdhci-msm-ice.h"
@@ -169,6 +171,8 @@
#define MAX_DRV_TYPES_SUPPORTED_HS200 4
#define MSM_AUTOSUSPEND_DELAY_MS 100
+#define CENTI_DEGREE_TO_DEGREE 10
+
struct sdhci_msm_offset {
u32 CORE_MCI_DATA_CNT;
u32 CORE_MCI_STATUS;
@@ -3534,6 +3538,151 @@ int sdhci_msm_notify_load(struct sdhci_host *host, enum mmc_load state)
return 0;
}
+static void sdhci_msm_tsens_threshold_notify(
+ struct therm_threshold *tsens_cb_data)
+{
+ struct threshold_info *info = tsens_cb_data->parent;
+ struct sdhci_msm_host *msm_host = container_of(info,
+ struct sdhci_msm_host, tsens_threshold_config);
+ int ret = 0;
+
+ pr_debug("%s: Triggered tsens-notification type=%d zone_id =%d\n",
+ mmc_hostname(msm_host->mmc), tsens_cb_data->trip_triggered,
+ tsens_cb_data->sensor_id);
+
+ switch (tsens_cb_data->trip_triggered) {
+ case THERMAL_TRIP_CONFIGURABLE_HI:
+ atomic_set(&msm_host->clk_scaling_disable, 0);
+ break;
+ case THERMAL_TRIP_CONFIGURABLE_LOW:
+ atomic_set(&msm_host->clk_scaling_disable, 1);
+ break;
+ default:
+ pr_err("%s: trip type %d not supported\n",
+ mmc_hostname(msm_host->mmc),
+ tsens_cb_data->trip_triggered);
+ break;
+ }
+
+ ret = sensor_mgr_set_threshold(tsens_cb_data->sensor_id,
+ tsens_cb_data->threshold);
+ if (ret < 0)
+ pr_err("%s: failed to set threshold temp, ret==%d\n",
+ __func__, ret);
+}
+
+static int sdhci_msm_check_tsens(struct sdhci_msm_host *msm_host)
+{
+ int ret = 0;
+ int temp = 0;
+ bool disable;
+ struct tsens_device tsens_dev;
+
+ if (tsens_is_ready() > 0) {
+ tsens_dev.sensor_num = msm_host->tsens_id;
+ ret = tsens_get_temp(&tsens_dev, &temp);
+ if (ret < 0) {
+ pr_err("%s: failed to read tsens, ret = %d\n",
+ mmc_hostname(msm_host->mmc), ret);
+ return ret;
+ }
+ /* convert centidegree to degree*/
+ temp /= CENTI_DEGREE_TO_DEGREE;
+ disable = temp <= msm_host->disable_scaling_threshold_temp;
+ if (disable)
+ atomic_set(&msm_host->clk_scaling_disable, 1);
+ }
+ return ret;
+}
+
+static int sdhci_msm_register_cb(struct sdhci_msm_host *msm_host)
+{
+ int ret;
+
+ ret = sdhci_msm_check_tsens(msm_host);
+ if (ret) {
+ pr_err("%s: unable to check tsens\n",
+ mmc_hostname(msm_host->mmc));
+ return ret;
+ }
+
+ ret = sensor_mgr_init_threshold(&msm_host->tsens_threshold_config,
+ msm_host->tsens_id,
+ msm_host->enable_scaling_threshold_temp,/*high*/
+ msm_host->disable_scaling_threshold_temp,/*low*/
+ sdhci_msm_tsens_threshold_notify);
+ if (ret) {
+ pr_err("%s: failed to register cb for tsens, ret = %d\n",
+ mmc_hostname(msm_host->mmc), ret);
+ return ret;
+ }
+
+ ret = sensor_mgr_convert_id_and_set_threshold(
+ &msm_host->tsens_threshold_config);
+ if (ret) {
+ pr_err("%s: failed to set tsens threshold, ret = %d\n",
+ mmc_hostname(msm_host->mmc), ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int sdhci_msm_tsens_pltfm_init(struct sdhci_msm_host *msm_host)
+{
+ int ret = 0;
+ struct device *dev = &msm_host->pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ of_property_read_u32(np, "qcom,tsens-id", &msm_host->tsens_id);
+ of_property_read_s32(np, "qcom,disable_scaling_threshold_temp",
+ &msm_host->disable_scaling_threshold_temp);
+ of_property_read_s32(np, "qcom,enable_scaling_threshold_temp",
+ &msm_host->enable_scaling_threshold_temp);
+
+ if (msm_host->tsens_id)
+ msm_host->temp_control_scaling = true;
+ else
+ msm_host->temp_control_scaling = false;
+
+ atomic_set(&msm_host->clk_scaling_disable, 0);
+ return ret;
+}
+
+static int sdhci_msm_dereg_temp_callback(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ if (msm_host->temp_control_scaling)
+ sensor_mgr_remove_threshold(
+ &msm_host->tsens_threshold_config);
+ return 0;
+}
+
+static int sdhci_msm_reg_temp_callback(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int ret = 0;
+
+ if (msm_host->temp_control_scaling) {
+ ret = sdhci_msm_register_cb(msm_host);
+ if (ret)
+ pr_err("%s: failed register temp monitoring call back, ret = %d\n",
+ mmc_hostname(msm_host->mmc), ret);
+ }
+ return ret;
+}
+
+static int sdhci_msm_check_temp(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ return atomic_read(&msm_host->clk_scaling_disable);
+}
+
+
void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
{
u32 vendor_func2;
@@ -4080,6 +4229,9 @@ static struct sdhci_ops sdhci_msm_ops = {
.clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
.enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
.notify_load = sdhci_msm_notify_load,
+ .check_temp = sdhci_msm_check_temp,
+ .reg_temp_callback = sdhci_msm_reg_temp_callback,
+ .dereg_temp_callback = sdhci_msm_dereg_temp_callback,
.reset_workaround = sdhci_msm_reset_workaround,
.init = sdhci_msm_init,
.pre_req = sdhci_msm_pre_req,
@@ -4650,6 +4802,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
MMC_CAP2_PACKED_WR_CONTROL);
}
+ sdhci_msm_tsens_pltfm_init(msm_host);
init_completion(&msm_host->pwr_irq_completion);
if (gpio_is_valid(msm_host->pdata->status_gpio)) {
@@ -5073,5 +5226,30 @@ static struct platform_driver sdhci_msm_driver = {
module_platform_driver(sdhci_msm_driver);
+static const struct of_device_id late_sdhci_msm_dt_match[] = {
+ {.compatible = "qcom,late-sdhci-msm"},
+ {.compatible = "qcom,sdhci-msm-v5"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, late_sdhci_msm_dt_match);
+
+static struct platform_driver late_sdhci_msm_driver = {
+ .probe = sdhci_msm_probe,
+ .remove = sdhci_msm_remove,
+ .driver = {
+ .name = "late_sdhci_msm",
+ .owner = THIS_MODULE,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = late_sdhci_msm_dt_match,
+ .pm = SDHCI_MSM_PMOPS,
+ },
+};
+
+static int __init late_sdhci_msm_init_driver(void)
+{
+ return platform_driver_register(&late_sdhci_msm_driver);
+}
+late_initcall(late_sdhci_msm_init_driver);
+
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 79949c2c537f..bb15cd58c664 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,8 @@
#include <linux/mmc/mmc.h>
#include <linux/pm_qos.h>
+#include <linux/msm_thermal.h>
+#include <linux/msm_tsens.h>
#include "sdhci-pltfm.h"
/* This structure keeps information per regulator */
@@ -227,6 +229,14 @@ struct sdhci_msm_host {
const struct sdhci_msm_offset *offset;
bool core_3_0v_support;
bool pltfm_init_done;
+
+ /* temperature controlled scaling */
+ int tsens_id;
+ int disable_scaling_threshold_temp;
+ int enable_scaling_threshold_temp;
+ bool temp_control_scaling;
+ atomic_t clk_scaling_disable;
+ struct threshold_info tsens_threshold_config;
};
extern char *saved_command_line;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 40a08a520861..b96f27f861ba 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1583,6 +1583,27 @@ static void sdhci_notify_halt(struct mmc_host *mmc, bool halt)
}
}
+static int sdhci_reg_temp_callback(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ return host->ops->reg_temp_callback(host);
+}
+
+static int sdhci_check_temp(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ return host->ops->check_temp(host);
+}
+
+static int sdhci_dereg_temp_callback(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ return host->ops->dereg_temp_callback(host);
+}
+
static inline void sdhci_update_power_policy(struct sdhci_host *host,
enum sdhci_power_policy policy)
{
@@ -2769,6 +2790,9 @@ static const struct mmc_host_ops sdhci_ops = {
.notify_load = sdhci_notify_load,
.notify_halt = sdhci_notify_halt,
.force_err_irq = sdhci_force_err_irq,
+ .check_temp = sdhci_check_temp,
+ .dereg_temp_callback = sdhci_dereg_temp_callback,
+ .reg_temp_callback = sdhci_reg_temp_callback,
};
/*****************************************************************************\
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 81aecb90ac8d..86b7066a81c2 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -691,6 +691,9 @@ struct sdhci_ops {
unsigned int max_dtr, int host_drv,
int card_drv, int *drv_type);
int (*notify_load)(struct sdhci_host *host, enum mmc_load state);
+ int (*check_temp)(struct sdhci_host *host);
+ int (*reg_temp_callback)(struct sdhci_host *host);
+ int (*dereg_temp_callback)(struct sdhci_host *host);
void (*reset_workaround)(struct sdhci_host *host, u32 enable);
void (*init)(struct sdhci_host *host);
void (*pre_req)(struct sdhci_host *host, struct mmc_request *req);
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 547098086773..f81df91a9ce1 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -52,5 +52,5 @@ config CAIF_VIRTIO
The caif driver for CAIF over Virtio.
if CAIF_VIRTIO
-source "drivers/vhost/Kconfig"
+source "drivers/vhost/Kconfig.vringh"
endif
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 2f9b12cf9ee5..61a9ab4fe047 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1163,11 +1163,15 @@ out:
map_failed_frags:
last = i+1;
- for (i = 0; i < last; i++)
+ for (i = 1; i < last; i++)
dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
DMA_TO_DEVICE);
+ dma_unmap_single(&adapter->vdev->dev,
+ descs[0].fields.address,
+ descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
map_failed:
if (!firmware_has_feature(FW_FEATURE_CMO))
netdev_err(netdev, "tx: unable to map xmit buffer\n");
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 25a0ad5102d6..855cf8c15c8a 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -111,10 +111,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
- u64 ns;
+ u64 cycles, ns;
spin_lock_irqsave(&adapter->systim_lock, flags);
- ns = timecounter_read(&adapter->tc);
+
+ /* Use timecounter_cyc2time() to allow non-monotonic SYSTIM readings */
+ cycles = adapter->cc.read(&adapter->cc);
+ ns = timecounter_cyc2time(&adapter->tc, cycles);
+
spin_unlock_irqrestore(&adapter->systim_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -170,9 +174,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
systim_overflow_work.work);
struct e1000_hw *hw = &adapter->hw;
struct timespec64 ts;
+ u64 ns;
- adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
+ /* Update the timecounter */
+ ns = timecounter_read(&adapter->tc);
+ ts = ns_to_timespec64(ns);
e_dbg("SYSTIM overflow check at %lld.%09lu\n",
(long long) ts.tv_sec, ts.tv_nsec);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 93543e176829..8f40e121f7d4 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -324,6 +324,8 @@ enum cfg_version {
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
+ { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 },
+ { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 111d907e0c11..79cede19e0c4 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2825,6 +2825,12 @@ static int hso_get_config_data(struct usb_interface *interface)
return -EIO;
}
+ /* check if we have a valid interface */
+ if (if_num > 16) {
+ kfree(config_data);
+ return -EINVAL;
+ }
+
switch (config_data[if_num]) {
case 0x0:
result = 0;
@@ -2895,10 +2901,18 @@ static int hso_probe(struct usb_interface *interface,
/* Get the interface/port specification from either driver_info or from
* the device itself */
- if (id->driver_info)
+ if (id->driver_info) {
+ /* if_num is controlled by the device, driver_info is a 0 terminated
+ * array. Make sure, the access is in bounds! */
+ for (i = 0; i <= if_num; ++i)
+ if (((u32 *)(id->driver_info))[i] == 0)
+ goto exit;
port_spec = ((u32 *)(id->driver_info))[if_num];
- else
+ } else {
port_spec = hso_get_config_data(interface);
+ if (port_spec < 0)
+ goto exit;
+ }
/* Check if we need to switch to alt interfaces prior to port
* configuration */
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index ec2b9c577b90..3644c9edaf81 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -616,7 +616,7 @@ struct b43_c32 b43_cordic(int theta)
u8 i;
s32 tmp;
s8 signx = 1;
- u32 angle = 0;
+ s32 angle = 0;
struct b43_c32 ret = { .i = 39797, .q = 0, };
while (theta > (180 << 16))
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 0a4bd73caae5..6f55ab4f7959 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -889,7 +889,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
- BUG_ON(pull_to <= skb_headlen(skb));
+ BUG_ON(pull_to < skb_headlen(skb));
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index 99da549d5d06..0118287a8a10 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -40,8 +40,10 @@
#define P2A_INT_ENABLE 0x3070
#define P2A_INT_ENA_ALL 0xf
#define RP_LTSSM 0x3c64
+#define RP_LTSSM_MASK 0x1f
#define LTSSM_L0 0xf
+#define PCIE_CAP_OFFSET 0x80
/* TLP configuration type 0 and 1 */
#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */
#define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */
@@ -60,6 +62,9 @@
#define TLP_LOOP 500
#define RP_DEVFN 0
+#define LINK_UP_TIMEOUT HZ
+#define LINK_RETRAIN_TIMEOUT HZ
+
#define INTX_NUM 4
#define DWORD_MASK 3
@@ -80,25 +85,21 @@ struct tlp_rp_regpair_t {
u32 reg1;
};
-static void altera_pcie_retrain(struct pci_dev *dev)
+static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
{
- u16 linkcap, linkstat;
-
- /*
- * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
- * current speed is 2.5 GB/s.
- */
- pcie_capability_read_word(dev, PCI_EXP_LNKCAP, &linkcap);
+ writel_relaxed(value, pcie->cra_base + reg);
+}
- if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
- return;
+static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
+{
+ return readl_relaxed(pcie->cra_base + reg);
+}
- pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat);
- if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB)
- pcie_capability_set_word(dev, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_RL);
+static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
+{
+ return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
}
-DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain);
/*
* Altera PCIe port uses BAR0 of RC's configuration space as the translation
@@ -119,17 +120,6 @@ static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
return false;
}
-static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
- const u32 reg)
-{
- writel_relaxed(value, pcie->cra_base + reg);
-}
-
-static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
-{
- return readl_relaxed(pcie->cra_base + reg);
-}
-
static void tlp_write_tx(struct altera_pcie *pcie,
struct tlp_rp_regpair_t *tlp_rp_regdata)
{
@@ -138,11 +128,6 @@ static void tlp_write_tx(struct altera_pcie *pcie,
cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
}
-static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
-{
- return !!(cra_readl(pcie, RP_LTSSM) & LTSSM_L0);
-}
-
static bool altera_pcie_valid_config(struct altera_pcie *pcie,
struct pci_bus *bus, int dev)
{
@@ -286,22 +271,14 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
return PCIBIOS_SUCCESSFUL;
}
-static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 *value)
+static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size,
+ u32 *value)
{
- struct altera_pcie *pcie = bus->sysdata;
int ret;
u32 data;
u8 byte_en;
- if (altera_pcie_hide_rc_bar(bus, devfn, where))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
- *value = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
switch (size) {
case 1:
byte_en = 1 << (where & 3);
@@ -314,7 +291,7 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
break;
}
- ret = tlp_cfg_dword_read(pcie, bus->number, devfn,
+ ret = tlp_cfg_dword_read(pcie, busno, devfn,
(where & ~DWORD_MASK), byte_en, &data);
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
@@ -334,20 +311,14 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_SUCCESSFUL;
}
-static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 value)
+static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size,
+ u32 value)
{
- struct altera_pcie *pcie = bus->sysdata;
u32 data32;
u32 shift = 8 * (where & 3);
u8 byte_en;
- if (altera_pcie_hide_rc_bar(bus, devfn, where))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
switch (size) {
case 1:
data32 = (value & 0xff) << shift;
@@ -363,8 +334,40 @@ static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
break;
}
- return tlp_cfg_dword_write(pcie, bus->number, devfn,
- (where & ~DWORD_MASK), byte_en, data32);
+ return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK),
+ byte_en, data32);
+}
+
+static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct altera_pcie *pcie = bus->sysdata;
+
+ if (altera_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
+ *value = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size,
+ value);
+}
+
+static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct altera_pcie *pcie = bus->sysdata;
+
+ if (altera_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size,
+ value);
}
static struct pci_ops altera_pcie_ops = {
@@ -372,6 +375,90 @@ static struct pci_ops altera_pcie_ops = {
.write = altera_pcie_cfg_write,
};
+static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int offset, u16 *value)
+{
+ u32 data;
+ int ret;
+
+ ret = _altera_pcie_cfg_read(pcie, busno, devfn,
+ PCIE_CAP_OFFSET + offset, sizeof(*value),
+ &data);
+ *value = data;
+ return ret;
+}
+
+static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int offset, u16 value)
+{
+ return _altera_pcie_cfg_write(pcie, busno, devfn,
+ PCIE_CAP_OFFSET + offset, sizeof(value),
+ value);
+}
+
+static void altera_wait_link_retrain(struct altera_pcie *pcie)
+{
+ u16 reg16;
+ unsigned long start_jiffies;
+
+ /* Wait for link training end. */
+ start_jiffies = jiffies;
+ for (;;) {
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+ PCI_EXP_LNKSTA, &reg16);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ break;
+
+ if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
+ dev_err(&pcie->pdev->dev, "link retrain timeout\n");
+ break;
+ }
+ udelay(100);
+ }
+
+ /* Wait for link is up */
+ start_jiffies = jiffies;
+ for (;;) {
+ if (altera_pcie_link_is_up(pcie))
+ break;
+
+ if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
+ dev_err(&pcie->pdev->dev, "link up timeout\n");
+ break;
+ }
+ udelay(100);
+ }
+}
+
+static void altera_pcie_retrain(struct altera_pcie *pcie)
+{
+ u16 linkcap, linkstat, linkctl;
+
+ if (!altera_pcie_link_is_up(pcie))
+ return;
+
+ /*
+ * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
+ * current speed is 2.5 GB/s.
+ */
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP,
+ &linkcap);
+ if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
+ return;
+
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA,
+ &linkstat);
+ if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+ PCI_EXP_LNKCTL, &linkctl);
+ linkctl |= PCI_EXP_LNKCTL_RL;
+ altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+ PCI_EXP_LNKCTL, linkctl);
+
+ altera_wait_link_retrain(pcie);
+ }
+}
+
static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
@@ -506,6 +593,11 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
return 0;
}
+static void altera_pcie_host_init(struct altera_pcie *pcie)
+{
+ altera_pcie_retrain(pcie);
+}
+
static int altera_pcie_probe(struct platform_device *pdev)
{
struct altera_pcie *pcie;
@@ -543,6 +635,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
/* enable all interrupts */
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
+ altera_pcie_host_init(pcie);
bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops,
pcie, &pcie->resources);
diff --git a/drivers/perf/perf_event_armv8.c b/drivers/perf/perf_event_armv8.c
index 443538a16aea..6b722b7a95d2 100644
--- a/drivers/perf/perf_event_armv8.c
+++ b/drivers/perf/perf_event_armv8.c
@@ -750,6 +750,7 @@ static struct platform_driver armv8_pmu_driver = {
.driver = {
.name = "armv8-pmu",
.of_match_table = armv8_pmu_of_device_ids,
+ .suppress_bind_attrs = true,
},
.probe = armv8_pmu_device_probe,
};
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index f96f7b865267..7c1defaef3f5 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2084,7 +2084,8 @@ static int asus_wmi_add(struct platform_device *pdev)
err = asus_wmi_backlight_init(asus);
if (err && err != -ENODEV)
goto fail_backlight;
- }
+ } else
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
status = wmi_install_notify_handler(asus->driver->event_guid,
asus_wmi_notify, asus);
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 9e29b1321648..15783869e1a0 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -427,14 +427,14 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256;
+ val->intval = (s16)be16_to_cpu(ec_word) * 10 / 256;
break;
case POWER_SUPPLY_PROP_TEMP_AMBIENT:
ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 100 / 256;
+ val->intval = (int)be16_to_cpu(ec_word) * 10 / 256;
break;
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2);
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index e1a10232a943..e44872fb9e5e 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -147,7 +147,7 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev)
static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
- const char *names[])
+ const char * const names[])
{
struct rproc *rproc = vdev_to_rproc(vdev);
int i, ret;
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 73354ee27877..1fcd27c1f183 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -945,7 +945,7 @@ static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len,
static int rpmsg_probe(struct virtio_device *vdev)
{
vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
- const char *names[] = { "input", "output" };
+ static const char * const names[] = { "input", "output" };
struct virtqueue *vqs[2];
struct virtproc_info *vrp;
void *bufs_va;
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 38c8e308d4c8..a96c98e3fc73 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -275,16 +275,16 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
*/
int zfcp_status_read_refill(struct zfcp_adapter *adapter)
{
- while (atomic_read(&adapter->stat_miss) > 0)
+ while (atomic_add_unless(&adapter->stat_miss, -1, 0))
if (zfcp_fsf_status_read(adapter->qdio)) {
+ atomic_inc(&adapter->stat_miss); /* undo add -1 */
if (atomic_read(&adapter->stat_miss) >=
adapter->stat_read_buf_num) {
zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
return 1;
}
break;
- } else
- atomic_dec(&adapter->stat_miss);
+ }
return 0;
}
diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c
index 53fb975c404b..1d060fd293a3 100644
--- a/drivers/s390/virtio/kvm_virtio.c
+++ b/drivers/s390/virtio/kvm_virtio.c
@@ -255,7 +255,7 @@ static void kvm_del_vqs(struct virtio_device *vdev)
static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
- const char *names[])
+ const char * const names[])
{
struct kvm_device *kdev = to_kvmdev(vdev);
int i;
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index ff06bdfd2b20..9e685246b98d 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -639,7 +639,7 @@ out:
static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
- const char *names[])
+ const char * const names[])
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
unsigned long *indicatorp = NULL;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index d0b227ffbd5f..573aeec7a02b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2279,7 +2279,7 @@ static int _bnx2fc_create(struct net_device *netdev,
if (!interface) {
printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
rc = -ENOMEM;
- goto ifput_err;
+ goto netdev_err;
}
if (netdev->priv_flags & IFF_802_1Q_VLAN) {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 741509b35617..14f32c114c55 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -1273,7 +1273,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
ld = MR_TargetIdToLdGet(ldCount, drv_map);
- if (ld >= MAX_LOGICAL_DRIVES_EXT) {
+ if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
lbInfo[ldCount].loadBalanceFlag = 0;
continue;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 213944ed64d9..3d3bfa814093 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1758,7 +1758,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
device_id < instance->fw_supported_vd_count)) {
ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
- if (ld >= instance->fw_supported_vd_count)
+ if (ld >= instance->fw_supported_vd_count - 1)
fp_possible = 0;
raid = MR_LdRaidGet(ld, local_map_ptr);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c83dadfff171..bd904d42176d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -207,6 +207,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
sp = buffer_data[0] & 0x80 ? 1 : 0;
buffer_data[0] &= ~0x80;
+ /*
+ * Ensure WP, DPOFUA, and RESERVED fields are cleared in
+ * received mode parameter buffer before doing MODE SELECT.
+ */
+ data.device_specific = 0;
+
if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
SD_MAX_RETRIES, &data, &sshdr)) {
if (scsi_sense_valid(&sshdr))
diff --git a/drivers/soc/qcom/early_dom.c b/drivers/soc/qcom/early_dom.c
index fe54820002dd..5a1cd9ef455d 100644
--- a/drivers/soc/qcom/early_dom.c
+++ b/drivers/soc/qcom/early_dom.c
@@ -154,6 +154,7 @@ static void early_domain_work(struct work_struct *work)
msleep(delay);
delay = (delay > max_delay ? max_delay : delay + 20);
}
+ early_domain_enabled = false;
free_reserved_lk_mem(core_data->lk_pool_paddr, core_data->lk_pool_size);
free_reserved_lk_mem(core_data->early_domain_shm,
core_data->early_domain_shm_size);
@@ -176,10 +177,11 @@ static int early_domain_cpu_notifier(struct notifier_block *self,
cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- if (cpumask_test_cpu(cpu, &core_data->cpumask))
+ if (cpumask_test_cpu(cpu, &core_data->cpumask)) {
pr_err("Early domain services are running on cpu%d\n"
, cpu);
break;
+ }
default:
notifier = NOTIFY_OK;
break;
@@ -195,7 +197,7 @@ static int init_early_domain_data(struct early_domain_core *core_data)
cpumask_clear(&core_data->cpumask);
cpumask = (unsigned long)core_data->pdata->cpumask;
- for_each_set_bit(cpu, &cpumask, sizeof(cpumask_t))
+ for_each_set_bit(cpu, &cpumask, sizeof(cpumask))
cpumask_set_cpu(cpu, &core_data->cpumask);
memset(&core_data->ed_qos_request, 0,
@@ -325,6 +327,7 @@ static int early_domain_remove(struct platform_device *pdev)
pm_qos_remove_request(&core_data->ed_qos_request);
__pm_relax(&core_data->ed_wake_lock);
unregister_cpu_notifier(&core_data->ed_notifier);
+ early_domain_enabled = false;
kfree(core_data);
return 0;
}
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index cf04960cc3e6..1a1368f5863c 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -88,7 +88,7 @@ struct bcm2835_spi {
u8 *rx_buf;
int tx_len;
int rx_len;
- bool dma_pending;
+ unsigned int dma_pending;
};
static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
@@ -155,8 +155,7 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
/* Write as many bytes as possible to FIFO */
bcm2835_wr_fifo(bs);
- /* based on flags decide if we can finish the transfer */
- if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
+ if (!bs->rx_len) {
/* Transfer complete - reset SPI HW */
bcm2835_spi_reset_hw(master);
/* wake up the framework */
@@ -233,10 +232,9 @@ static void bcm2835_spi_dma_done(void *data)
* is called the tx-dma must have finished - can't get to this
* situation otherwise...
*/
- dmaengine_terminate_all(master->dma_tx);
-
- /* mark as no longer pending */
- bs->dma_pending = 0;
+ if (cmpxchg(&bs->dma_pending, true, false)) {
+ dmaengine_terminate_all(master->dma_tx);
+ }
/* and mark as completed */;
complete(&master->xfer_completion);
@@ -342,6 +340,7 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
if (ret) {
/* need to reset on errors */
dmaengine_terminate_all(master->dma_tx);
+ bs->dma_pending = false;
bcm2835_spi_reset_hw(master);
return ret;
}
@@ -617,10 +616,9 @@ static void bcm2835_spi_handle_err(struct spi_master *master,
struct bcm2835_spi *bs = spi_master_get_devdata(master);
/* if an error occurred and we have an active dma, then terminate */
- if (bs->dma_pending) {
+ if (cmpxchg(&bs->dma_pending, true, false)) {
dmaengine_terminate_all(master->dma_tx);
dmaengine_terminate_all(master->dma_rx);
- bs->dma_pending = 0;
}
/* and reset */
bcm2835_spi_reset_hw(master);
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 5238d67490ce..39b99740a6d8 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -451,6 +451,8 @@ static bool android_fence_signaled(struct fence *fence)
int ret;
ret = parent->ops->has_signaled(pt);
+ if (!ret && parent->destroyed)
+ ret = -ENOENT;
if (ret < 0)
fence->status = ret;
return ret;
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 9413e1a949e5..5af4d6a03d6e 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -108,12 +108,17 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
buf[7] = 0x2; /* CmdQue=1 */
- memcpy(&buf[8], "LIO-ORG ", 8);
- memset(&buf[16], 0x20, 16);
+ /*
+ * ASCII data fields described as being left-aligned shall have any
+ * unused bytes at the end of the field (i.e., highest offset) and the
+ * unused bytes shall be filled with ASCII space characters (20h).
+ */
+ memset(&buf[8], 0x20, 8 + 16 + 4);
+ memcpy(&buf[8], "LIO-ORG", sizeof("LIO-ORG") - 1);
memcpy(&buf[16], dev->t10_wwn.model,
- min_t(size_t, strlen(dev->t10_wwn.model), 16));
+ strnlen(dev->t10_wwn.model, 16));
memcpy(&buf[32], dev->t10_wwn.revision,
- min_t(size_t, strlen(dev->t10_wwn.revision), 4));
+ strnlen(dev->t10_wwn.revision, 4));
buf[4] = 31; /* Set additional length to 31 */
return 0;
@@ -251,7 +256,9 @@ check_t10_vend_desc:
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
buf[off+2] = 0x0;
- memcpy(&buf[off+4], "LIO-ORG", 8);
+ /* left align Vendor ID and pad with spaces */
+ memset(&buf[off+4], 0x20, 8);
+ memcpy(&buf[off+4], "LIO-ORG", sizeof("LIO-ORG") - 1);
/* Extra Byte for NULL Terminator */
id_len++;
/* Identifier Length */
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index ad7eba5ca380..34234c233851 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -307,6 +307,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
if (!locked)
ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
list_del(&waiter.list);
+
+ /*
+ * In case of timeout, wake up every reader who gave the right of way
+ * to writer. Prevent separation readers into two groups:
+ * one that helds semaphore and another that sleeps.
+ * (in case of no contention with a writer)
+ */
+ if (!locked && list_empty(&sem->write_wait))
+ __ldsem_wake_readers(sem);
+
raw_spin_unlock_irq(&sem->wait_lock);
__set_task_state(tsk, TASK_RUNNING);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0a8e5ac891d4..736de1021d8b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -507,6 +507,13 @@ static int acm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
if (retval)
goto error_init_termios;
+ /*
+ * Suppress initial echoing for some devices which might send data
+ * immediately after acm driver has been installed.
+ */
+ if (acm->quirks & DISABLE_ECHO)
+ tty->termios.c_lflag &= ~ECHO;
+
tty->driver_data = acm;
return 0;
@@ -1677,6 +1684,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
+ { USB_DEVICE(0x0e8d, 0x2000), /* MediaTek Inc Preloader */
+ .driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */
+ },
{ USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
@@ -1875,6 +1885,13 @@ static const struct usb_device_id acm_ids[] = {
.driver_info = IGNORE_DEVICE,
},
+ { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
+ .driver_info = SEND_ZERO_PACKET,
+ },
+ { USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */
+ .driver_info = SEND_ZERO_PACKET,
+ },
+
/* control interfaces without any protocol set */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index b30ac5fcde68..1ad9ff9f493d 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -134,3 +134,4 @@ struct acm {
#define QUIRK_CONTROL_LINE_STATE BIT(6)
#define CLEAR_HALT_CONDITIONS BIT(7)
#define SEND_ZERO_PACKET BIT(8)
+#define DISABLE_ECHO BIT(9)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index cf378b1ed373..733479ddf8a7 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -240,7 +240,8 @@ static const struct usb_device_id usb_quirk_list[] = {
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
/* Corsair K70 RGB */
- { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_DELAY_CTRL_MSG },
/* Corsair Strafe */
{ USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
diff --git a/drivers/usb/gadget/function/u_uac1.h b/drivers/usb/gadget/function/u_uac1.h
index 6f188fd8633f..3317d3222184 100644
--- a/drivers/usb/gadget/function/u_uac1.h
+++ b/drivers/usb/gadget/function/u_uac1.h
@@ -20,7 +20,7 @@
#define UAC1_DEF_PCHMASK 0x3
#define UAC1_DEF_PSRATE 48000
#define UAC1_DEF_PSSIZE 2
-#define UAC1_DEF_REQ_NUM 2
+#define UAC1_DEF_REQ_NUM 8
struct f_uac1_opts {
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index a11c2c8bda53..a217f71b45c6 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1990,6 +1990,8 @@ static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *hep)
+__acquires(r8a66597->lock)
+__releases(r8a66597->lock)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
struct r8a66597_pipe *pipe = (struct r8a66597_pipe *)hep->hcpriv;
@@ -2002,13 +2004,14 @@ static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
return;
pipenum = pipe->info.pipenum;
+ spin_lock_irqsave(&r8a66597->lock, flags);
if (pipenum == 0) {
kfree(hep->hcpriv);
hep->hcpriv = NULL;
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
return;
}
- spin_lock_irqsave(&r8a66597->lock, flags);
pipe_stop(r8a66597, pipe);
pipe_irq_disable(r8a66597, pipenum);
disable_irq_empty(r8a66597, pipenum);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 61da1a22b98a..5293596577bf 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1504,7 +1504,8 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
portsc_buf[port_index] = 0;
/* Bail out if a USB3 port has a new device in link training */
- if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
+ if ((hcd->speed >= HCD_USB3) &&
+ (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
bus_state->bus_suspended = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
diff --git a/drivers/usb/misc/diag_ipc_bridge.c b/drivers/usb/misc/diag_ipc_bridge.c
index a487f7c458ed..f369f69da6a1 100644
--- a/drivers/usb/misc/diag_ipc_bridge.c
+++ b/drivers/usb/misc/diag_ipc_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2015, 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2015, 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,9 +10,6 @@
* GNU General Public License for more details.
*/
-/* add additional information to our printk's */
-#define pr_fmt(fmt) "%s: " fmt "\n", __func__
-
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -81,31 +78,32 @@ int diag_bridge_open(int id, struct diag_bridge_ops *ops)
struct diag_bridge *dev;
if (id < 0 || id >= MAX_BRIDGE_DEVS) {
- pr_err("Invalid device ID");
+ pr_err("%s: Invalid device ID\n", __func__);
return -ENODEV;
}
dev = __dev[id];
if (!dev) {
- pr_err("dev is null");
+ pr_err("%s: dev is null\n", __func__);
return -ENODEV;
}
if (dev->ops) {
- pr_err("bridge already opened");
+ pr_err("%s: bridge already opened\n", __func__);
return -EALREADY;
}
mutex_lock(&dev->ifc_mutex);
if (dev->opened) {
mutex_unlock(&dev->ifc_mutex);
- pr_err("Bridge already opened");
+ pr_err("%s: Bridge already opened\n", __func__);
return -EBUSY;
}
dev->opened = true;
mutex_unlock(&dev->ifc_mutex);
+ dev_dbg(&dev->ifc->dev, "%s\n", __func__);
dev->ops = ops;
dev->err = 0;
@@ -133,8 +131,12 @@ static int ipc_bridge_open(struct platform_device *pdev)
static void diag_bridge_delete(struct kref *kref)
{
struct diag_bridge *dev = container_of(kref, struct diag_bridge, kref);
+ struct usb_interface *ifc = dev->ifc;
int id = dev->id;
+ dev_dbg(&dev->ifc->dev, "%s\n", __func__);
+ usb_set_intfdata(ifc, NULL);
+ usb_put_intf(ifc);
usb_put_dev(dev->udev);
__dev[id] = 0;
kfree(dev);
@@ -145,32 +147,32 @@ void diag_bridge_close(int id)
struct diag_bridge *dev;
if (id < 0 || id >= MAX_BRIDGE_DEVS) {
- pr_err("Invalid device ID");
+ pr_err("%s: Invalid device ID\n", __func__);
return;
}
dev = __dev[id];
if (!dev) {
- pr_err("dev is null");
+ pr_err("%s: dev is null\n", __func__);
return;
}
if (id == DIAG_BRIDGE && !dev->ops) {
- pr_err("can't close bridge that was not open");
+ pr_err("%s: can't close bridge that was not open\n", __func__);
return;
}
mutex_lock(&dev->ifc_mutex);
if (!dev->opened) {
mutex_unlock(&dev->ifc_mutex);
- pr_err("Bridge not opened");
+ pr_err("%s: Bridge not opened\n", __func__);
return;
}
dev->opened = false;
mutex_unlock(&dev->ifc_mutex);
- dev_dbg(&dev->ifc->dev, "%s:\n", __func__);
+ dev_dbg(&dev->ifc->dev, "%s\n", __func__);
usb_kill_anchored_urbs(&dev->submitted);
dev->ops = 0;
@@ -230,32 +232,33 @@ int diag_bridge_read(int id, char *data, int size)
int ret;
if (id < 0 || id >= MAX_BRIDGE_DEVS) {
- pr_err("Invalid device ID");
+ pr_err("%s: Invalid device ID\n", __func__);
return -ENODEV;
}
- pr_debug("reading %d bytes", size);
+ pr_debug("%s: reading %d bytes\n", __func__, size);
dev = __dev[id];
if (!dev) {
- pr_err("device is disconnected");
+ pr_err("%s: device is disconnected\n", __func__);
return -ENODEV;
}
mutex_lock(&dev->read_mutex);
if (!dev->ifc) {
+ pr_err("%s: device is disconnected\n", __func__);
ret = -ENODEV;
goto error;
}
if (id == DIAG_BRIDGE && !dev->ops) {
- pr_err("bridge is not open");
+ pr_err("%s: bridge is not open\n", __func__);
ret = -ENODEV;
goto error;
}
if (!size) {
- dev_dbg(&dev->ifc->dev, "invalid size:%d\n", size);
+ dev_err(&dev->ifc->dev, "invalid size:%d\n", size);
dev->drop_count++;
ret = -EINVAL;
goto error;
@@ -263,6 +266,8 @@ int diag_bridge_read(int id, char *data, int size)
/* if there was a previous unrecoverable error, just quit */
if (id == DIAG_BRIDGE && dev->err) {
+ pr_err("%s: EPROTO error occurred, or device disconnected\n",
+ __func__);
ret = -ENODEV;
goto error;
}
@@ -271,14 +276,15 @@ int diag_bridge_read(int id, char *data, int size)
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
- dev_dbg(&dev->ifc->dev, "unable to allocate urb\n");
+ dev_err(&dev->ifc->dev, "unable to allocate urb\n");
ret = -ENOMEM;
goto put_error;
}
ret = usb_autopm_get_interface(dev->ifc);
if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
- pr_err_ratelimited("read: autopm_get failed:%d", ret);
+ pr_err_ratelimited("%s: read: autopm_get failed:%d\n",
+ __func__, ret);
goto free_error;
}
@@ -297,13 +303,16 @@ int diag_bridge_read(int id, char *data, int size)
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
- pr_err_ratelimited("submitting urb failed err:%d", ret);
+ pr_err_ratelimited("%s: submitting urb failed err:%d\n",
+ __func__, ret);
dev->pending_reads--;
usb_unanchor_urb(urb);
usb_autopm_put_interface(dev->ifc);
goto free_error;
}
+ usb_autopm_put_interface(dev->ifc);
+
if (id == IPC_BRIDGE) {
wait_for_completion(&dev->read_done);
ret = dev->read_result;
@@ -344,7 +353,7 @@ static void diag_bridge_write_cb(struct urb *urb)
struct diag_bridge *dev = urb->context;
struct diag_bridge_ops *cbs = dev->ops;
- dev_dbg(&dev->ifc->dev, "%s:\n", __func__);
+ dev_dbg(&dev->ifc->dev, "%s\n", __func__);
usb_autopm_put_interface_async(dev->ifc);
@@ -381,26 +390,27 @@ int diag_bridge_write(int id, char *data, int size)
int ret;
if (id < 0 || id >= MAX_BRIDGE_DEVS) {
- pr_err("Invalid device ID");
+ pr_err("%s: Invalid device ID\n", __func__);
return -ENODEV;
}
- pr_debug("writing %d bytes", size);
+ pr_debug("%s: writing %d bytes\n", __func__, size);
dev = __dev[id];
if (!dev) {
- pr_err("device is disconnected");
+ pr_err("%s: device is disconnected\n", __func__);
return -ENODEV;
}
mutex_lock(&dev->write_mutex);
if (!dev->ifc) {
+ pr_err("%s: device is disconnected\n", __func__);
ret = -ENODEV;
goto error;
}
if (id == DIAG_BRIDGE && !dev->ops) {
- pr_err("bridge is not open");
+ pr_err("%s: bridge is not open\n", __func__);
ret = -ENODEV;
goto error;
}
@@ -413,6 +423,8 @@ int diag_bridge_write(int id, char *data, int size)
/* if there was a previous unrecoverable error, just quit */
if (id == DIAG_BRIDGE && dev->err) {
+ pr_err("%s: EPROTO error occurred, or device disconnected\n",
+ __func__);
ret = -ENODEV;
goto error;
}
@@ -428,7 +440,8 @@ int diag_bridge_write(int id, char *data, int size)
ret = usb_autopm_get_interface(dev->ifc);
if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
- pr_err_ratelimited("write: autopm_get failed:%d", ret);
+ pr_err_ratelimited("%s: write: autopm_get failed:%d\n",
+ __func__, ret);
goto free_error;
}
@@ -441,7 +454,8 @@ int diag_bridge_write(int id, char *data, int size)
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
- pr_err_ratelimited("submitting urb failed err:%d", ret);
+ pr_err_ratelimited("%s: submitting urb failed err:%d\n",
+ __func__, ret);
dev->pending_writes--;
usb_unanchor_urb(urb);
usb_autopm_put_interface(dev->ifc);
@@ -584,15 +598,17 @@ diag_bridge_probe(struct usb_interface *ifc, const struct usb_device_id *id)
struct usb_endpoint_descriptor *ep_desc;
int i, devid, ret = -ENOMEM;
- pr_debug("id:%lu", id->driver_info);
+ pr_debug("%s: id:%lu\n", __func__, id->driver_info);
devid = id->driver_info & 0xFF;
- if (devid < 0 || devid >= MAX_BRIDGE_DEVS)
+ if (devid < 0 || devid >= MAX_BRIDGE_DEVS) {
+ pr_err("%s: Invalid device ID\n", __func__);
return -ENODEV;
+ }
/* already probed? */
if (__dev[devid]) {
- pr_err("Diag device already probed");
+ pr_err("%s: Diag device already probed\n", __func__);
return -ENODEV;
}
@@ -604,7 +620,7 @@ diag_bridge_probe(struct usb_interface *ifc, const struct usb_device_id *id)
dev->id = devid;
dev->udev = usb_get_dev(interface_to_usbdev(ifc));
- dev->ifc = ifc;
+ dev->ifc = usb_get_intf(ifc);
kref_init(&dev->kref);
mutex_init(&dev->ifc_mutex);
mutex_init(&dev->read_mutex);
@@ -629,7 +645,8 @@ diag_bridge_probe(struct usb_interface *ifc, const struct usb_device_id *id)
}
if (!(dev->in_epAddr && dev->out_epAddr)) {
- pr_err("could not find bulk in and bulk out endpoints");
+ pr_err("%s: could not find bulk in and bulk out endpoints\n",
+ __func__);
ret = -ENODEV;
goto error;
}
@@ -640,14 +657,16 @@ diag_bridge_probe(struct usb_interface *ifc, const struct usb_device_id *id)
dev->pdev = platform_device_register_simple("diag_bridge",
devid, NULL, 0);
if (IS_ERR(dev->pdev)) {
- pr_err("unable to allocate platform device");
+ pr_err("%s: unable to allocate platform device\n",
+ __func__);
ret = PTR_ERR(dev->pdev);
goto error;
}
} else {
dev->pdev = platform_device_alloc("ipc_bridge", -1);
if (!dev->pdev) {
- pr_err("unable to allocate platform device");
+ pr_err("%s: unable to allocate platform device\n",
+ __func__);
ret = -ENOMEM;
goto error;
}
@@ -655,13 +674,13 @@ diag_bridge_probe(struct usb_interface *ifc, const struct usb_device_id *id)
ret = platform_device_add_data(dev->pdev, &ipc_bridge_pdata,
sizeof(struct ipc_bridge_platform_data));
if (ret) {
- pr_err("fail to add pdata");
+ pr_err("%s: fail to add pdata\n", __func__);
goto put_pdev;
}
ret = platform_device_add(dev->pdev);
if (ret) {
- pr_err("fail to add pdev");
+ pr_err("%s: fail to add pdev\n", __func__);
goto put_pdev;
}
}
@@ -687,17 +706,11 @@ static void diag_bridge_disconnect(struct usb_interface *ifc)
{
struct diag_bridge *dev = usb_get_intfdata(ifc);
- dev_dbg(&dev->ifc->dev, "%s:\n", __func__);
+ dev_dbg(&dev->ifc->dev, "%s\n", __func__);
platform_device_unregister(dev->pdev);
diag_bridge_debugfs_cleanup();
- mutex_lock(&dev->ifc_mutex);
- dev->ifc = NULL;
- mutex_unlock(&dev->ifc_mutex);
- usb_set_intfdata(ifc, NULL);
- mutex_destroy(&dev->write_mutex);
- mutex_destroy(&dev->read_mutex);
- mutex_destroy(&dev->ifc_mutex);
+ dev->err = -ENODEV;
kref_put(&dev->kref, diag_bridge_delete);
}
@@ -815,7 +828,7 @@ static int __init diag_bridge_init(void)
ret = usb_register(&diag_bridge_driver);
if (ret) {
- pr_err("unable to register diag driver");
+ pr_err("%s: unable to register diag driver\n", __func__);
return ret;
}
diff --git a/drivers/usb/misc/ks_bridge.c b/drivers/usb/misc/ks_bridge.c
index 3497880dea09..c6fd30349ecc 100644
--- a/drivers/usb/misc/ks_bridge.c
+++ b/drivers/usb/misc/ks_bridge.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014, 2017-2018, Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2017-2019, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -47,7 +47,8 @@ static enum bus_id str_to_busid(const char *name)
return BUS_HSIC;
if (!strncasecmp("msm_ehci_host.0", name, BUSNAME_LEN))
return BUS_USB;
- if (!strncasecmp("xhci-hcd.0.auto", name, BUSNAME_LEN))
+ if (!strncasecmp("xhci-hcd.0.auto", name, BUSNAME_LEN) ||
+ !strncasecmp("xhci-hcd.1.auto", name, BUSNAME_LEN))
return BUS_USB;
return BUS_UNDEF;
@@ -699,6 +700,7 @@ ksb_usb_probe(struct usb_interface *ifc, const struct usb_device_id *id)
case 0x9025:
case 0x9091:
case 0x901D:
+ case 0x901F:
/* 1-1 mapping between ksb and udev port which starts with 1 */
ksb_port_num = udev->portnum - 1;
dev_dbg(&udev->dev, "ifc_count: %u, port_num:%u\n", ifc_count,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2b81939fecd7..7bc2c9fef605 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1163,6 +1163,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
+ .driver_info = NCTRL(0) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
@@ -1327,6 +1331,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) }, /* GosunCn ZTE WeLink ME3630 (MBIM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
@@ -1530,6 +1535,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
.driver_info = RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1757,6 +1763,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
.driver_info = RSVD(5) | RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
@@ -1941,7 +1948,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) }, /* HP lt4132 (Huawei ME906s-158) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
+ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
+ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
+ .driver_info = RSVD(6) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 3da25ad267a2..4966768d3c98 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -86,9 +86,14 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
{ USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 123289085ee2..a84f0959ab34 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -123,10 +123,15 @@
/* Hewlett-Packard POS Pole Displays */
#define HP_VENDOR_ID 0x03f0
+#define HP_LM920_PRODUCT_ID 0x026b
+#define HP_TD620_PRODUCT_ID 0x0956
#define HP_LD960_PRODUCT_ID 0x0b39
#define HP_LCM220_PRODUCT_ID 0x3139
#define HP_LCM960_PRODUCT_ID 0x3239
#define HP_LD220_PRODUCT_ID 0x3524
+#define HP_LD220TA_PRODUCT_ID 0x4349
+#define HP_LD960TA_PRODUCT_ID 0x4439
+#define HP_LM940_PRODUCT_ID 0x5039
/* Cressi Edy (diving computer) PC interface */
#define CRESSI_VENDOR_ID 0x04b8
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 6c186b4df94a..b3344a77dcce 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -223,8 +223,12 @@ static int slave_configure(struct scsi_device *sdev)
if (!(us->fflags & US_FL_NEEDS_CAP16))
sdev->try_rc_10_first = 1;
- /* assume SPC3 or latter devices support sense size > 18 */
- if (sdev->scsi_level > SCSI_SPC_2)
+ /*
+ * assume SPC3 or latter devices support sense size > 18
+ * unless US_FL_BAD_SENSE quirk is specified.
+ */
+ if (sdev->scsi_level > SCSI_SPC_2 &&
+ !(us->fflags & US_FL_BAD_SENSE))
us->fflags |= US_FL_SANE_SENSE;
/* USB-IDE bridges tend to report SK = 0x04 (Non-recoverable
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 898215cad351..d92b974f0635 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1393,6 +1393,18 @@ UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999,
US_FL_SANE_SENSE),
/*
+ * Reported by Icenowy Zheng <icenowy@aosc.io>
+ * The SMI SM3350 USB-UFS bridge controller will enter a wrong state
+ * that do not process read/write command if a long sense is requested,
+ * so force to use 18-byte sense.
+ */
+UNUSUAL_DEV( 0x090c, 0x3350, 0x0000, 0xffff,
+ "SMI",
+ "SM3350 UFS-to-USB-Mass-Storage bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BAD_SENSE ),
+
+/*
* Pete Zaitcev <zaitcev@yahoo.com>, bz#164688.
* The device blatantly ignores LUN and returns 1 in GetMaxLUN.
*/
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index 533eaf04f12f..40764ecad9ce 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -2,7 +2,6 @@ config VHOST_NET
tristate "Host kernel accelerator for virtio net"
depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP)
select VHOST
- select VHOST_RING
---help---
This kernel module can be loaded in host kernel to accelerate
guest networking with virtio_net. Not to be confused with virtio_net
@@ -15,17 +14,24 @@ config VHOST_SCSI
tristate "VHOST_SCSI TCM fabric driver"
depends on TARGET_CORE && EVENTFD && m
select VHOST
- select VHOST_RING
default n
---help---
Say M here to enable the vhost_scsi TCM fabric module
for use with virtio-scsi guests
-config VHOST_RING
- tristate
+config VHOST_VSOCK
+ tristate "vhost virtio-vsock driver"
+ depends on VSOCKETS && EVENTFD
+ select VIRTIO_VSOCKETS_COMMON
+ select VHOST
+ default n
---help---
- This option is selected by any driver which needs to access
- the host side of a virtio ring.
+ This kernel module can be loaded in the host kernel to provide AF_VSOCK
+ sockets for communicating with guests. The guests must have the
+ virtio_transport.ko driver loaded to use the virtio-vsock device.
+
+ To compile this driver as a module, choose M here: the module will be called
+ vhost_vsock.
config VHOST
tristate
diff --git a/drivers/vhost/Kconfig.vringh b/drivers/vhost/Kconfig.vringh
new file mode 100644
index 000000000000..6a4490c09d7f
--- /dev/null
+++ b/drivers/vhost/Kconfig.vringh
@@ -0,0 +1,5 @@
+config VHOST_RING
+ tristate
+ ---help---
+ This option is selected by any driver which needs to access
+ the host side of a virtio ring.
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
index e0441c34db1c..6b012b986b57 100644
--- a/drivers/vhost/Makefile
+++ b/drivers/vhost/Makefile
@@ -4,5 +4,9 @@ vhost_net-y := net.o
obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o
vhost_scsi-y := scsi.o
+obj-$(CONFIG_VHOST_VSOCK) += vhost_vsock.o
+vhost_vsock-y := vsock.o
+
obj-$(CONFIG_VHOST_RING) += vringh.o
+
obj-$(CONFIG_VHOST) += vhost.o
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 645b2197930e..53cf130922f3 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
enum {
VHOST_NET_FEATURES = VHOST_FEATURES |
(1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
- (1ULL << VIRTIO_NET_F_MRG_RXBUF)
+ (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << VIRTIO_F_IOMMU_PLATFORM)
};
enum {
@@ -287,6 +288,69 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
rcu_read_unlock_bh();
}
+static inline unsigned long busy_clock(void)
+{
+ return local_clock() >> 10;
+}
+
+static bool vhost_can_busy_poll(struct vhost_dev *dev,
+ unsigned long endtime)
+{
+ return likely(!need_resched()) &&
+ likely(!time_after(busy_clock(), endtime)) &&
+ likely(!signal_pending(current)) &&
+ !vhost_has_work(dev);
+}
+
+static void vhost_net_disable_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
+ struct vhost_poll *poll = n->poll + (nvq - n->vqs);
+ if (!vq->private_data)
+ return;
+ vhost_poll_stop(poll);
+}
+
+static int vhost_net_enable_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
+ struct vhost_poll *poll = n->poll + (nvq - n->vqs);
+ struct socket *sock;
+
+ sock = vq->private_data;
+ if (!sock)
+ return 0;
+
+ return vhost_poll_start(poll, sock->file);
+}
+
+static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
+ struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num)
+{
+ unsigned long uninitialized_var(endtime);
+ int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ out_num, in_num, NULL, NULL);
+
+ if (r == vq->num && vq->busyloop_timeout) {
+ preempt_disable();
+ endtime = busy_clock() + vq->busyloop_timeout;
+ while (vhost_can_busy_poll(vq->dev, endtime) &&
+ vhost_vq_avail_empty(vq->dev, vq))
+ cpu_relax_lowlatency();
+ preempt_enable();
+ r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ out_num, in_num, NULL, NULL);
+ }
+
+ return r;
+}
+
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
@@ -314,6 +378,9 @@ static void handle_tx(struct vhost_net *net)
if (!sock)
goto out;
+ if (!vq_iotlb_prefetch(vq))
+ goto out;
+
vhost_disable_notify(&net->dev, vq);
hdr_size = nvq->vhost_hlen;
@@ -331,10 +398,9 @@ static void handle_tx(struct vhost_net *net)
% UIO_MAXIOV == nvq->done_idx))
break;
- head = vhost_get_vq_desc(vq, vq->iov,
- ARRAY_SIZE(vq->iov),
- &out, &in,
- NULL, NULL);
+ head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
+ ARRAY_SIZE(vq->iov),
+ &out, &in);
/* On error, stop handling until the next kick. */
if (unlikely(head < 0))
break;
@@ -435,6 +501,43 @@ static int peek_head_len(struct sock *sk)
return len;
}
+static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+ unsigned long uninitialized_var(endtime);
+ int len = peek_head_len(sk);
+
+ if (!len && vq->busyloop_timeout) {
+ /* Both tx vq and rx socket were polled here */
+ mutex_lock_nested(&vq->mutex, 1);
+ vhost_disable_notify(&net->dev, vq);
+
+ preempt_disable();
+ endtime = busy_clock() + vq->busyloop_timeout;
+
+ while (vhost_can_busy_poll(&net->dev, endtime) &&
+ skb_queue_empty(&sk->sk_receive_queue) &&
+ vhost_vq_avail_empty(&net->dev, vq))
+ cpu_relax_lowlatency();
+
+ preempt_enable();
+
+ if (!vhost_vq_avail_empty(&net->dev, vq))
+ vhost_poll_queue(&vq->poll);
+ else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ vhost_disable_notify(&net->dev, vq);
+ vhost_poll_queue(&vq->poll);
+ }
+
+ mutex_unlock(&vq->mutex);
+
+ len = peek_head_len(sk);
+ }
+
+ return len;
+}
+
/* This is a multi-buffer version of vhost_get_desc, that works if
* vq has read descriptors only.
* @vq - the relevant virtqueue
@@ -540,11 +643,16 @@ static void handle_rx(struct vhost_net *net)
struct iov_iter fixup;
__virtio16 num_buffers;
- mutex_lock(&vq->mutex);
+ mutex_lock_nested(&vq->mutex, 0);
sock = vq->private_data;
if (!sock)
goto out;
+
+ if (!vq_iotlb_prefetch(vq))
+ goto out;
+
vhost_disable_notify(&net->dev, vq);
+ vhost_net_disable_vq(net, vq);
vhost_hlen = nvq->vhost_hlen;
sock_hlen = nvq->sock_hlen;
@@ -553,7 +661,7 @@ static void handle_rx(struct vhost_net *net)
vq->log : NULL;
mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
- while ((sock_len = peek_head_len(sock->sk))) {
+ while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads, vhost_len,
@@ -561,7 +669,7 @@ static void handle_rx(struct vhost_net *net)
likely(mergeable) ? UIO_MAXIOV : 1);
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
- break;
+ goto out;
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
@@ -580,7 +688,7 @@ static void handle_rx(struct vhost_net *net)
}
/* Nothing new? Wait for eventfd to tell us
* they refilled. */
- break;
+ goto out;
}
/* We don't need to be notified again. */
iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
@@ -608,7 +716,7 @@ static void handle_rx(struct vhost_net *net)
&fixup) != sizeof(hdr)) {
vq_err(vq, "Unable to write vnet_hdr "
"at addr %p\n", vq->iov->iov_base);
- break;
+ goto out;
}
} else {
/* Header came from socket; we'll need to patch
@@ -624,7 +732,7 @@ static void handle_rx(struct vhost_net *net)
&fixup) != sizeof num_buffers) {
vq_err(vq, "Failed num_buffers write");
vhost_discard_vq_desc(vq, headcount);
- break;
+ goto out;
}
vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
headcount);
@@ -633,9 +741,10 @@ static void handle_rx(struct vhost_net *net)
total_len += vhost_len;
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
vhost_poll_queue(&vq->poll);
- break;
+ goto out;
}
}
+ vhost_net_enable_vq(net, vq);
out:
mutex_unlock(&vq->mutex);
}
@@ -714,32 +823,6 @@ static int vhost_net_open(struct inode *inode, struct file *f)
return 0;
}
-static void vhost_net_disable_vq(struct vhost_net *n,
- struct vhost_virtqueue *vq)
-{
- struct vhost_net_virtqueue *nvq =
- container_of(vq, struct vhost_net_virtqueue, vq);
- struct vhost_poll *poll = n->poll + (nvq - n->vqs);
- if (!vq->private_data)
- return;
- vhost_poll_stop(poll);
-}
-
-static int vhost_net_enable_vq(struct vhost_net *n,
- struct vhost_virtqueue *vq)
-{
- struct vhost_net_virtqueue *nvq =
- container_of(vq, struct vhost_net_virtqueue, vq);
- struct vhost_poll *poll = n->poll + (nvq - n->vqs);
- struct socket *sock;
-
- sock = vq->private_data;
- if (!sock)
- return 0;
-
- return vhost_poll_start(poll, sock->file);
-}
-
static struct socket *vhost_net_stop_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
@@ -917,7 +1000,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
vhost_net_disable_vq(n, vq);
vq->private_data = sock;
- r = vhost_init_used(vq);
+ r = vhost_vq_init_access(vq);
if (r)
goto err_used;
r = vhost_net_enable_vq(n, vq);
@@ -969,21 +1052,21 @@ static long vhost_net_reset_owner(struct vhost_net *n)
struct socket *tx_sock = NULL;
struct socket *rx_sock = NULL;
long err;
- struct vhost_memory *memory;
+ struct vhost_umem *umem;
mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev);
if (err)
goto done;
- memory = vhost_dev_reset_owner_prepare();
- if (!memory) {
+ umem = vhost_dev_reset_owner_prepare();
+ if (!umem) {
err = -ENOMEM;
goto done;
}
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
vhost_dev_stop(&n->dev);
- vhost_dev_reset_owner(&n->dev, memory);
+ vhost_dev_reset_owner(&n->dev, umem);
vhost_net_vq_reset(n);
done:
mutex_unlock(&n->dev.mutex);
@@ -1014,10 +1097,14 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
}
mutex_lock(&n->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
- !vhost_log_access_ok(&n->dev)) {
- mutex_unlock(&n->dev.mutex);
- return -EFAULT;
+ !vhost_log_access_ok(&n->dev))
+ goto out_unlock;
+
+ if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) {
+ if (vhost_init_device_iotlb(&n->dev, true))
+ goto out_unlock;
}
+
for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
mutex_lock(&n->vqs[i].vq.mutex);
n->vqs[i].vq.acked_features = features;
@@ -1027,6 +1114,10 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
}
mutex_unlock(&n->dev.mutex);
return 0;
+
+out_unlock:
+ mutex_unlock(&n->dev.mutex);
+ return -EFAULT;
}
static long vhost_net_set_owner(struct vhost_net *n)
@@ -1100,9 +1191,40 @@ static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
}
#endif
+static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+ int noblock = file->f_flags & O_NONBLOCK;
+
+ return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static unsigned int vhost_net_chr_poll(struct file *file, poll_table *wait)
+{
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+
+ return vhost_chr_poll(file, dev, wait);
+}
+
static const struct file_operations vhost_net_fops = {
.owner = THIS_MODULE,
.release = vhost_net_release,
+ .read_iter = vhost_net_chr_read_iter,
+ .write_iter = vhost_net_chr_write_iter,
+ .poll = vhost_net_chr_poll,
.unlocked_ioctl = vhost_net_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vhost_net_compat_ioctl,
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 8fc62a03637a..009315f006bf 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1277,7 +1277,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vq->private_data = vs_tpg;
- vhost_init_used(vq);
+ vhost_vq_init_access(vq);
mutex_unlock(&vq->mutex);
}
ret = 0;
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index f2882ac98726..388eec4e1a90 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -196,7 +196,7 @@ static long vhost_test_run(struct vhost_test *n, int test)
oldpriv = vq->private_data;
vq->private_data = priv;
- r = vhost_init_used(&n->vqs[index]);
+ r = vhost_vq_init_access(&n->vqs[index]);
mutex_unlock(&vq->mutex);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index c54d388310f0..53b1b3cfce84 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -27,6 +27,7 @@
#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/sort.h>
+#include <linux/interval_tree_generic.h>
#include <linux/nospec.h>
#include "vhost.h"
@@ -35,6 +36,10 @@ static ushort max_mem_regions = 64;
module_param(max_mem_regions, ushort, 0444);
MODULE_PARM_DESC(max_mem_regions,
"Maximum number of memory regions in memory map. (default: 64)");
+static int max_iotlb_entries = 2048;
+module_param(max_iotlb_entries, int, 0444);
+MODULE_PARM_DESC(max_iotlb_entries,
+ "Maximum number of iotlb entries. (default: 2048)");
enum {
VHOST_MEMORY_F_LOG = 0x1,
@@ -43,12 +48,26 @@ enum {
#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
+INTERVAL_TREE_DEFINE(struct vhost_umem_node,
+ rb, __u64, __subtree_last,
+ START, LAST, , vhost_umem_interval_tree);
+
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
-static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq)
+static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{
vq->user_be = !virtio_legacy_is_little_endian();
}
+static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
+{
+ vq->user_be = true;
+}
+
+static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
+{
+ vq->user_be = false;
+}
+
static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
{
struct vhost_vring_state s;
@@ -63,7 +82,10 @@ static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
s.num != VHOST_VRING_BIG_ENDIAN)
return -EINVAL;
- vq->user_be = s.num;
+ if (s.num == VHOST_VRING_BIG_ENDIAN)
+ vhost_enable_cross_endian_big(vq);
+ else
+ vhost_enable_cross_endian_little(vq);
return 0;
}
@@ -92,7 +114,7 @@ static void vhost_init_is_le(struct vhost_virtqueue *vq)
vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
}
#else
-static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq)
+static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{
}
@@ -109,11 +131,29 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
static void vhost_init_is_le(struct vhost_virtqueue *vq)
{
- if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
- vq->is_le = true;
+ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
+ || virtio_legacy_is_little_endian();
}
#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
+static void vhost_reset_is_le(struct vhost_virtqueue *vq)
+{
+ vhost_init_is_le(vq);
+}
+
+struct vhost_flush_struct {
+ struct vhost_work work;
+ struct completion wait_event;
+};
+
+static void vhost_flush_work(struct vhost_work *work)
+{
+ struct vhost_flush_struct *s;
+
+ s = container_of(work, struct vhost_flush_struct, work);
+ complete(&s->wait_event);
+}
+
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
@@ -138,11 +178,9 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{
- INIT_LIST_HEAD(&work->node);
+ clear_bit(VHOST_WORK_QUEUED, &work->flags);
work->fn = fn;
init_waitqueue_head(&work->done);
- work->flushing = 0;
- work->queue_seq = work->done_seq = 0;
}
EXPORT_SYMBOL_GPL(vhost_work_init);
@@ -193,31 +231,17 @@ void vhost_poll_stop(struct vhost_poll *poll)
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
-static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
- unsigned seq)
-{
- int left;
-
- spin_lock_irq(&dev->work_lock);
- left = seq - work->done_seq;
- spin_unlock_irq(&dev->work_lock);
- return left <= 0;
-}
-
void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
- unsigned seq;
- int flushing;
+ struct vhost_flush_struct flush;
+
+ if (dev->worker) {
+ init_completion(&flush.wait_event);
+ vhost_work_init(&flush.work, vhost_flush_work);
- spin_lock_irq(&dev->work_lock);
- seq = work->queue_seq;
- work->flushing++;
- spin_unlock_irq(&dev->work_lock);
- wait_event(work->done, vhost_work_seq_done(dev, work, seq));
- spin_lock_irq(&dev->work_lock);
- flushing = --work->flushing;
- spin_unlock_irq(&dev->work_lock);
- BUG_ON(flushing < 0);
+ vhost_work_queue(dev, &flush.work);
+ wait_for_completion(&flush.wait_event);
+ }
}
EXPORT_SYMBOL_GPL(vhost_work_flush);
@@ -231,20 +255,27 @@ EXPORT_SYMBOL_GPL(vhost_poll_flush);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
{
- unsigned long flags;
+ if (!dev->worker)
+ return;
- spin_lock_irqsave(&dev->work_lock, flags);
- if (list_empty(&work->node)) {
- list_add_tail(&work->node, &dev->work_list);
- work->queue_seq++;
- spin_unlock_irqrestore(&dev->work_lock, flags);
+ if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
+ /* We can only add the work to the list after we're
+ * sure it was not in the list.
+ */
+ smp_mb();
+ llist_add(&work->node, &dev->work_list);
wake_up_process(dev->worker);
- } else {
- spin_unlock_irqrestore(&dev->work_lock, flags);
}
}
EXPORT_SYMBOL_GPL(vhost_work_queue);
+/* A lockless hint for busy polling code to exit the loop */
+bool vhost_has_work(struct vhost_dev *dev)
+{
+ return !llist_empty(&dev->work_list);
+}
+EXPORT_SYMBOL_GPL(vhost_has_work);
+
void vhost_poll_queue(struct vhost_poll *poll)
{
vhost_work_queue(poll->dev, &poll->work);
@@ -275,16 +306,18 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->call_ctx = NULL;
vq->call = NULL;
vq->log_ctx = NULL;
- vq->memory = NULL;
- vq->is_le = virtio_legacy_is_little_endian();
- vhost_vq_reset_user_be(vq);
+ vhost_reset_is_le(vq);
+ vhost_disable_cross_endian(vq);
+ vq->busyloop_timeout = 0;
+ vq->umem = NULL;
+ vq->iotlb = NULL;
}
static int vhost_worker(void *data)
{
struct vhost_dev *dev = data;
- struct vhost_work *work = NULL;
- unsigned uninitialized_var(seq);
+ struct vhost_work *work, *work_next;
+ struct llist_node *node;
mm_segment_t oldfs = get_fs();
set_fs(USER_DS);
@@ -294,35 +327,25 @@ static int vhost_worker(void *data)
/* mb paired w/ kthread_stop */
set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irq(&dev->work_lock);
- if (work) {
- work->done_seq = seq;
- if (work->flushing)
- wake_up_all(&work->done);
- }
-
if (kthread_should_stop()) {
- spin_unlock_irq(&dev->work_lock);
__set_current_state(TASK_RUNNING);
break;
}
- if (!list_empty(&dev->work_list)) {
- work = list_first_entry(&dev->work_list,
- struct vhost_work, node);
- list_del_init(&work->node);
- seq = work->queue_seq;
- } else
- work = NULL;
- spin_unlock_irq(&dev->work_lock);
- if (work) {
+ node = llist_del_all(&dev->work_list);
+ if (!node)
+ schedule();
+
+ node = llist_reverse_order(node);
+ /* make sure flag is seen after deletion */
+ smp_wmb();
+ llist_for_each_entry_safe(work, work_next, node, node) {
+ clear_bit(VHOST_WORK_QUEUED, &work->flags);
__set_current_state(TASK_RUNNING);
work->fn(work);
if (need_resched())
schedule();
- } else
- schedule();
-
+ }
}
unuse_mm(dev->mm);
set_fs(oldfs);
@@ -381,11 +404,16 @@ void vhost_dev_init(struct vhost_dev *dev,
mutex_init(&dev->mutex);
dev->log_ctx = NULL;
dev->log_file = NULL;
- dev->memory = NULL;
+ dev->umem = NULL;
+ dev->iotlb = NULL;
dev->mm = NULL;
- spin_lock_init(&dev->work_lock);
- INIT_LIST_HEAD(&dev->work_list);
dev->worker = NULL;
+ init_llist_head(&dev->work_list);
+ init_waitqueue_head(&dev->wait);
+ INIT_LIST_HEAD(&dev->read_list);
+ INIT_LIST_HEAD(&dev->pending_list);
+ spin_lock_init(&dev->iotlb_lock);
+
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
@@ -486,27 +514,36 @@ err_mm:
}
EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
-struct vhost_memory *vhost_dev_reset_owner_prepare(void)
+static void *vhost_kvzalloc(unsigned long size)
+{
+ void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+
+ if (!n)
+ n = vzalloc(size);
+ return n;
+}
+
+struct vhost_umem *vhost_dev_reset_owner_prepare(void)
{
- return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
+ return vhost_kvzalloc(sizeof(struct vhost_umem));
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
/* Caller should have device mutex */
-void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
+void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
{
int i;
vhost_dev_cleanup(dev, true);
/* Restore memory to default empty mapping. */
- memory->nregions = 0;
- dev->memory = memory;
+ INIT_LIST_HEAD(&umem->umem_list);
+ dev->umem = umem;
/* We don't need VQ locks below since vhost_dev_cleanup makes sure
* VQs aren't running.
*/
for (i = 0; i < dev->nvqs; ++i)
- dev->vqs[i]->memory = memory;
+ dev->vqs[i]->umem = umem;
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
@@ -523,6 +560,47 @@ void vhost_dev_stop(struct vhost_dev *dev)
}
EXPORT_SYMBOL_GPL(vhost_dev_stop);
+static void vhost_umem_free(struct vhost_umem *umem,
+ struct vhost_umem_node *node)
+{
+ vhost_umem_interval_tree_remove(node, &umem->umem_tree);
+ list_del(&node->link);
+ kfree(node);
+ umem->numem--;
+}
+
+static void vhost_umem_clean(struct vhost_umem *umem)
+{
+ struct vhost_umem_node *node, *tmp;
+
+ if (!umem)
+ return;
+
+ list_for_each_entry_safe(node, tmp, &umem->umem_list, link)
+ vhost_umem_free(umem, node);
+
+ kvfree(umem);
+}
+
+static void vhost_clear_msg(struct vhost_dev *dev)
+{
+ struct vhost_msg_node *node, *n;
+
+ spin_lock(&dev->iotlb_lock);
+
+ list_for_each_entry_safe(node, n, &dev->read_list, node) {
+ list_del(&node->node);
+ kfree(node);
+ }
+
+ list_for_each_entry_safe(node, n, &dev->pending_list, node) {
+ list_del(&node->node);
+ kfree(node);
+ }
+
+ spin_unlock(&dev->iotlb_lock);
+}
+
/* Caller should have device mutex if and only if locked is set */
void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
{
@@ -549,9 +627,13 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
fput(dev->log_file);
dev->log_file = NULL;
/* No one will access memory at this point */
- kvfree(dev->memory);
- dev->memory = NULL;
- WARN_ON(!list_empty(&dev->work_list));
+ vhost_umem_clean(dev->umem);
+ dev->umem = NULL;
+ vhost_umem_clean(dev->iotlb);
+ dev->iotlb = NULL;
+ vhost_clear_msg(dev);
+ wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM);
+ WARN_ON(!llist_empty(&dev->work_list));
if (dev->worker) {
kthread_stop(dev->worker);
dev->worker = NULL;
@@ -575,26 +657,34 @@ static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
}
+static bool vhost_overflow(u64 uaddr, u64 size)
+{
+ /* Make sure 64 bit math will not overflow. */
+ return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
+}
+
/* Caller should have vq mutex and device mutex. */
-static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
+static int vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
int log_all)
{
- int i;
+ struct vhost_umem_node *node;
- if (!mem)
+ if (!umem)
return 0;
- for (i = 0; i < mem->nregions; ++i) {
- struct vhost_memory_region *m = mem->regions + i;
- unsigned long a = m->userspace_addr;
- if (m->memory_size > ULONG_MAX)
+ list_for_each_entry(node, &umem->umem_list, link) {
+ unsigned long a = node->userspace_addr;
+
+ if (vhost_overflow(node->userspace_addr, node->size))
return 0;
- else if (!access_ok(VERIFY_WRITE, (void __user *)a,
- m->memory_size))
+
+
+ if (!access_ok(VERIFY_WRITE, (void __user *)a,
+ node->size))
return 0;
else if (log_all && !log_access_ok(log_base,
- m->guest_phys_addr,
- m->memory_size))
+ node->start,
+ node->size))
return 0;
}
return 1;
@@ -602,7 +692,7 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
/* Can we switch to this memory table? */
/* Caller should have device mutex but not vq mutex */
-static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
+static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
int log_all)
{
int i;
@@ -615,7 +705,8 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
/* If ring is inactive, will check when it's enabled. */
if (d->vqs[i]->private_data)
- ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, log);
+ ok = vq_memory_access_ok(d->vqs[i]->log_base,
+ umem, log);
else
ok = 1;
mutex_unlock(&d->vqs[i]->mutex);
@@ -625,12 +716,388 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
return 1;
}
+static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
+ struct iovec iov[], int iov_size, int access);
+
+static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to,
+ const void *from, unsigned size)
+{
+ int ret;
+
+ if (!vq->iotlb)
+ return __copy_to_user(to, from, size);
+ else {
+ /* This function should be called after iotlb
+ * prefetch, which means we're sure that all vq
+ * could be access through iotlb. So -EAGAIN should
+ * not happen in this case.
+ */
+ /* TODO: more fast path */
+ struct iov_iter t;
+ ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
+ ARRAY_SIZE(vq->iotlb_iov),
+ VHOST_ACCESS_WO);
+ if (ret < 0)
+ goto out;
+ iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
+ ret = copy_to_iter(from, size, &t);
+ if (ret == size)
+ ret = 0;
+ }
+out:
+ return ret;
+}
+
+static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
+ void *from, unsigned size)
+{
+ int ret;
+
+ if (!vq->iotlb)
+ return __copy_from_user(to, from, size);
+ else {
+ /* This function should be called after iotlb
+ * prefetch, which means we're sure that vq
+ * could be access through iotlb. So -EAGAIN should
+ * not happen in this case.
+ */
+ /* TODO: more fast path */
+ struct iov_iter f;
+ ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
+ ARRAY_SIZE(vq->iotlb_iov),
+ VHOST_ACCESS_RO);
+ if (ret < 0) {
+ vq_err(vq, "IOTLB translation failure: uaddr "
+ "%p size 0x%llx\n", from,
+ (unsigned long long) size);
+ goto out;
+ }
+ iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
+ ret = copy_from_iter(to, size, &f);
+ if (ret == size)
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+static void __user *__vhost_get_user(struct vhost_virtqueue *vq,
+ void *addr, unsigned size)
+{
+ int ret;
+
+ /* This function should be called after iotlb
+ * prefetch, which means we're sure that vq
+ * could be access through iotlb. So -EAGAIN should
+ * not happen in this case.
+ */
+ /* TODO: more fast path */
+ ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
+ ARRAY_SIZE(vq->iotlb_iov),
+ VHOST_ACCESS_RO);
+ if (ret < 0) {
+ vq_err(vq, "IOTLB translation failure: uaddr "
+ "%p size 0x%llx\n", addr,
+ (unsigned long long) size);
+ return NULL;
+ }
+
+ if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
+ vq_err(vq, "Non atomic userspace memory access: uaddr "
+ "%p size 0x%llx\n", addr,
+ (unsigned long long) size);
+ return NULL;
+ }
+
+ return vq->iotlb_iov[0].iov_base;
+}
+
+#define vhost_put_user(vq, x, ptr) \
+({ \
+ int ret = -EFAULT; \
+ if (!vq->iotlb) { \
+ ret = __put_user(x, ptr); \
+ } else { \
+ __typeof__(ptr) to = \
+ (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
+ if (to != NULL) \
+ ret = __put_user(x, to); \
+ else \
+ ret = -EFAULT; \
+ } \
+ ret; \
+})
+
+#define vhost_get_user(vq, x, ptr) \
+({ \
+ int ret; \
+ if (!vq->iotlb) { \
+ ret = __get_user(x, ptr); \
+ } else { \
+ __typeof__(ptr) from = \
+ (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
+ if (from != NULL) \
+ ret = __get_user(x, from); \
+ else \
+ ret = -EFAULT; \
+ } \
+ ret; \
+})
+
+static void vhost_dev_lock_vqs(struct vhost_dev *d)
+{
+ int i = 0;
+ for (i = 0; i < d->nvqs; ++i)
+ mutex_lock_nested(&d->vqs[i]->mutex, i);
+}
+
+static void vhost_dev_unlock_vqs(struct vhost_dev *d)
+{
+ int i = 0;
+ for (i = 0; i < d->nvqs; ++i)
+ mutex_unlock(&d->vqs[i]->mutex);
+}
+
+static int vhost_new_umem_range(struct vhost_umem *umem,
+ u64 start, u64 size, u64 end,
+ u64 userspace_addr, int perm)
+{
+ struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+
+ if (!node)
+ return -ENOMEM;
+
+ if (umem->numem == max_iotlb_entries) {
+ tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link);
+ vhost_umem_free(umem, tmp);
+ }
+
+ node->start = start;
+ node->size = size;
+ node->last = end;
+ node->userspace_addr = userspace_addr;
+ node->perm = perm;
+ INIT_LIST_HEAD(&node->link);
+ list_add_tail(&node->link, &umem->umem_list);
+ vhost_umem_interval_tree_insert(node, &umem->umem_tree);
+ umem->numem++;
+
+ return 0;
+}
+
+static void vhost_del_umem_range(struct vhost_umem *umem,
+ u64 start, u64 end)
+{
+ struct vhost_umem_node *node;
+
+ while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
+ start, end)))
+ vhost_umem_free(umem, node);
+}
+
+static void vhost_iotlb_notify_vq(struct vhost_dev *d,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vhost_msg_node *node, *n;
+
+ spin_lock(&d->iotlb_lock);
+
+ list_for_each_entry_safe(node, n, &d->pending_list, node) {
+ struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
+ if (msg->iova <= vq_msg->iova &&
+ msg->iova + msg->size - 1 >= vq_msg->iova &&
+ vq_msg->type == VHOST_IOTLB_MISS) {
+ vhost_poll_queue(&node->vq->poll);
+ list_del(&node->node);
+ kfree(node);
+ }
+ }
+
+ spin_unlock(&d->iotlb_lock);
+}
+
+static int umem_access_ok(u64 uaddr, u64 size, int access)
+{
+ unsigned long a = uaddr;
+
+ /* Make sure 64 bit math will not overflow. */
+ if (vhost_overflow(uaddr, size))
+ return -EFAULT;
+
+ if ((access & VHOST_ACCESS_RO) &&
+ !access_ok(VERIFY_READ, (void __user *)a, size))
+ return -EFAULT;
+ if ((access & VHOST_ACCESS_WO) &&
+ !access_ok(VERIFY_WRITE, (void __user *)a, size))
+ return -EFAULT;
+ return 0;
+}
+
+int vhost_process_iotlb_msg(struct vhost_dev *dev,
+ struct vhost_iotlb_msg *msg)
+{
+ int ret = 0;
+
+ mutex_lock(&dev->mutex);
+ vhost_dev_lock_vqs(dev);
+ switch (msg->type) {
+ case VHOST_IOTLB_UPDATE:
+ if (!dev->iotlb) {
+ ret = -EFAULT;
+ break;
+ }
+ if (umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size,
+ msg->iova + msg->size - 1,
+ msg->uaddr, msg->perm)) {
+ ret = -ENOMEM;
+ break;
+ }
+ vhost_iotlb_notify_vq(dev, msg);
+ break;
+ case VHOST_IOTLB_INVALIDATE:
+ vhost_del_umem_range(dev->iotlb, msg->iova,
+ msg->iova + msg->size - 1);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ vhost_dev_unlock_vqs(dev);
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
+ struct iov_iter *from)
+{
+ struct vhost_msg_node node;
+ unsigned size = sizeof(struct vhost_msg);
+ size_t ret;
+ int err;
+
+ if (iov_iter_count(from) < size)
+ return 0;
+ ret = copy_from_iter(&node.msg, size, from);
+ if (ret != size)
+ goto done;
+
+ switch (node.msg.type) {
+ case VHOST_IOTLB_MSG:
+ err = vhost_process_iotlb_msg(dev, &node.msg.iotlb);
+ if (err)
+ ret = err;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+done:
+ return ret;
+}
+EXPORT_SYMBOL(vhost_chr_write_iter);
+
+unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+ poll_table *wait)
+{
+ unsigned int mask = 0;
+
+ poll_wait(file, &dev->wait, wait);
+
+ if (!list_empty(&dev->read_list))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+EXPORT_SYMBOL(vhost_chr_poll);
+
+ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
+ int noblock)
+{
+ DEFINE_WAIT(wait);
+ struct vhost_msg_node *node;
+ ssize_t ret = 0;
+ unsigned size = sizeof(struct vhost_msg);
+
+ if (iov_iter_count(to) < size)
+ return 0;
+
+ while (1) {
+ if (!noblock)
+ prepare_to_wait(&dev->wait, &wait,
+ TASK_INTERRUPTIBLE);
+
+ node = vhost_dequeue_msg(dev, &dev->read_list);
+ if (node)
+ break;
+ if (noblock) {
+ ret = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ if (!dev->iotlb) {
+ ret = -EBADFD;
+ break;
+ }
+
+ schedule();
+ }
+
+ if (!noblock)
+ finish_wait(&dev->wait, &wait);
+
+ if (node) {
+ ret = copy_to_iter(&node->msg, size, to);
+
+ if (ret != size || node->msg.type != VHOST_IOTLB_MISS) {
+ kfree(node);
+ return ret;
+ }
+
+ vhost_enqueue_msg(dev, &dev->pending_list, node);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
+
+static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
+{
+ struct vhost_dev *dev = vq->dev;
+ struct vhost_msg_node *node;
+ struct vhost_iotlb_msg *msg;
+
+ node = vhost_new_msg(vq, VHOST_IOTLB_MISS);
+ if (!node)
+ return -ENOMEM;
+
+ msg = &node->msg.iotlb;
+ msg->type = VHOST_IOTLB_MISS;
+ msg->iova = iova;
+ msg->perm = access;
+
+ vhost_enqueue_msg(dev, &dev->read_list, node);
+
+ return 0;
+}
+
static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
struct vring_desc __user *desc,
struct vring_avail __user *avail,
struct vring_used __user *used)
+
{
size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+
return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
access_ok(VERIFY_READ, avail,
sizeof *avail + num * sizeof *avail->ring + s) &&
@@ -638,11 +1105,59 @@ static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
sizeof *used + num * sizeof *used->ring + s);
}
+static int iotlb_access_ok(struct vhost_virtqueue *vq,
+ int access, u64 addr, u64 len)
+{
+ const struct vhost_umem_node *node;
+ struct vhost_umem *umem = vq->iotlb;
+ u64 s = 0, size;
+
+ while (len > s) {
+ node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
+ addr,
+ addr + len - 1);
+ if (node == NULL || node->start > addr) {
+ vhost_iotlb_miss(vq, addr, access);
+ return false;
+ } else if (!(node->perm & access)) {
+ /* Report the possible access violation by
+ * request another translation from userspace.
+ */
+ return false;
+ }
+
+ size = node->size - addr + node->start;
+ s += size;
+ addr += size;
+ }
+
+ return true;
+}
+
+int vq_iotlb_prefetch(struct vhost_virtqueue *vq)
+{
+ size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+ unsigned int num = vq->num;
+
+ if (!vq->iotlb)
+ return 1;
+
+ return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
+ num * sizeof *vq->desc) &&
+ iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail,
+ sizeof *vq->avail +
+ num * sizeof *vq->avail->ring + s) &&
+ iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used,
+ sizeof *vq->used +
+ num * sizeof *vq->used->ring + s);
+}
+EXPORT_SYMBOL_GPL(vq_iotlb_prefetch);
+
/* Can we log writes? */
/* Caller should have device mutex but not vq mutex */
int vhost_log_access_ok(struct vhost_dev *dev)
{
- return memory_access_ok(dev, dev->memory, 1);
+ return memory_access_ok(dev, dev->umem, 1);
}
EXPORT_SYMBOL_GPL(vhost_log_access_ok);
@@ -653,7 +1168,7 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
{
size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
- return vq_memory_access_ok(log_base, vq->memory,
+ return vq_memory_access_ok(log_base, vq->umem,
vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
(!vq->log_used || log_access_ok(log_base, vq->log_addr,
sizeof *vq->used +
@@ -664,33 +1179,36 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
/* Caller should have vq mutex and device mutex */
int vhost_vq_access_ok(struct vhost_virtqueue *vq)
{
- return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
- vq_log_access_ok(vq, vq->log_base);
-}
-EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
+ if (!vq_log_access_ok(vq, vq->log_base))
+ return 0;
-static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
-{
- const struct vhost_memory_region *r1 = p1, *r2 = p2;
- if (r1->guest_phys_addr < r2->guest_phys_addr)
+ /* Access validation occurs at prefetch time with IOTLB */
+ if (vq->iotlb)
return 1;
- if (r1->guest_phys_addr > r2->guest_phys_addr)
- return -1;
- return 0;
+
+ return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
}
+EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
-static void *vhost_kvzalloc(unsigned long size)
+static struct vhost_umem *vhost_umem_alloc(void)
{
- void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ struct vhost_umem *umem = vhost_kvzalloc(sizeof(*umem));
- if (!n)
- n = vzalloc(size);
- return n;
+ if (!umem)
+ return NULL;
+
+ umem->umem_tree = RB_ROOT;
+ umem->numem = 0;
+ INIT_LIST_HEAD(&umem->umem_list);
+
+ return umem;
}
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
- struct vhost_memory mem, *newmem, *oldmem;
+ struct vhost_memory mem, *newmem;
+ struct vhost_memory_region *region;
+ struct vhost_umem *newumem, *oldumem;
unsigned long size = offsetof(struct vhost_memory, regions);
int i;
@@ -710,24 +1228,47 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
kvfree(newmem);
return -EFAULT;
}
- sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
- vhost_memory_reg_sort_cmp, NULL);
- if (!memory_access_ok(d, newmem, 0)) {
+ newumem = vhost_umem_alloc();
+ if (!newumem) {
kvfree(newmem);
- return -EFAULT;
+ return -ENOMEM;
+ }
+
+ for (region = newmem->regions;
+ region < newmem->regions + mem.nregions;
+ region++) {
+ if (vhost_new_umem_range(newumem,
+ region->guest_phys_addr,
+ region->memory_size,
+ region->guest_phys_addr +
+ region->memory_size - 1,
+ region->userspace_addr,
+ VHOST_ACCESS_RW))
+ goto err;
}
- oldmem = d->memory;
- d->memory = newmem;
+
+ if (!memory_access_ok(d, newumem, 0))
+ goto err;
+
+ oldumem = d->umem;
+ d->umem = newumem;
/* All memory accesses are done under some VQ mutex. */
for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i]->mutex);
- d->vqs[i]->memory = newmem;
+ d->vqs[i]->umem = newumem;
mutex_unlock(&d->vqs[i]->mutex);
}
- kvfree(oldmem);
+
+ kvfree(newmem);
+ vhost_umem_clean(oldumem);
return 0;
+
+err:
+ vhost_umem_clean(newumem);
+ kvfree(newmem);
+ return -EFAULT;
}
long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
@@ -913,6 +1454,19 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
case VHOST_GET_VRING_ENDIAN:
r = vhost_get_vring_endian(vq, idx, argp);
break;
+ case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
+ if (copy_from_user(&s, argp, sizeof(s))) {
+ r = -EFAULT;
+ break;
+ }
+ vq->busyloop_timeout = s.num;
+ break;
+ case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
+ s.index = idx;
+ s.num = vq->busyloop_timeout;
+ if (copy_to_user(argp, &s, sizeof(s)))
+ r = -EFAULT;
+ break;
default:
r = -ENOIOCTLCMD;
}
@@ -936,6 +1490,30 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
}
EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
+int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
+{
+ struct vhost_umem *niotlb, *oiotlb;
+ int i;
+
+ niotlb = vhost_umem_alloc();
+ if (!niotlb)
+ return -ENOMEM;
+
+ oiotlb = d->iotlb;
+ d->iotlb = niotlb;
+
+ for (i = 0; i < d->nvqs; ++i) {
+ mutex_lock(&d->vqs[i]->mutex);
+ d->vqs[i]->iotlb = niotlb;
+ mutex_unlock(&d->vqs[i]->mutex);
+ }
+
+ vhost_umem_clean(oiotlb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
+
/* Caller must have device mutex */
long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{
@@ -1018,28 +1596,6 @@ done:
}
EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
-static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
- __u64 addr, __u32 len)
-{
- const struct vhost_memory_region *reg;
- int start = 0, end = mem->nregions;
-
- while (start < end) {
- int slot = start + (end - start) / 2;
- reg = mem->regions + slot;
- if (addr >= reg->guest_phys_addr)
- end = slot;
- else
- start = slot + 1;
- }
-
- reg = mem->regions + start;
- if (addr >= reg->guest_phys_addr &&
- reg->guest_phys_addr + reg->memory_size > addr)
- return reg;
- return NULL;
-}
-
/* TODO: This is really inefficient. We need something like get_user()
* (instruction directly accesses the data, with an exception table entry
* returning -EFAULT). See Documentation/x86/exception-tables.txt.
@@ -1118,7 +1674,8 @@ EXPORT_SYMBOL_GPL(vhost_log_write);
static int vhost_update_used_flags(struct vhost_virtqueue *vq)
{
void __user *used;
- if (__put_user(cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags) < 0)
+ if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
+ &vq->used->flags) < 0)
return -EFAULT;
if (unlikely(vq->log_used)) {
/* Make sure the flag is seen before log. */
@@ -1136,7 +1693,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
{
- if (__put_user(cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq)))
+ if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
+ vhost_avail_event(vq)))
return -EFAULT;
if (unlikely(vq->log_used)) {
void __user *used;
@@ -1153,62 +1711,84 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
return 0;
}
-int vhost_init_used(struct vhost_virtqueue *vq)
+int vhost_vq_init_access(struct vhost_virtqueue *vq)
{
__virtio16 last_used_idx;
int r;
- if (!vq->private_data) {
- vq->is_le = virtio_legacy_is_little_endian();
+ bool is_le = vq->is_le;
+
+ if (!vq->private_data)
return 0;
- }
vhost_init_is_le(vq);
r = vhost_update_used_flags(vq);
if (r)
- return r;
+ goto err;
vq->signalled_used_valid = false;
- if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx))
- return -EFAULT;
- r = __get_user(last_used_idx, &vq->used->idx);
- if (r)
- return r;
+ if (!vq->iotlb &&
+ !access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
+ r = -EFAULT;
+ goto err;
+ }
+ r = vhost_get_user(vq, last_used_idx, &vq->used->idx);
+ if (r) {
+ vq_err(vq, "Can't access used idx at %p\n",
+ &vq->used->idx);
+ goto err;
+ }
vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
return 0;
+
+err:
+ vq->is_le = is_le;
+ return r;
}
-EXPORT_SYMBOL_GPL(vhost_init_used);
+EXPORT_SYMBOL_GPL(vhost_vq_init_access);
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
- struct iovec iov[], int iov_size)
+ struct iovec iov[], int iov_size, int access)
{
- const struct vhost_memory_region *reg;
- struct vhost_memory *mem;
+ const struct vhost_umem_node *node;
+ struct vhost_dev *dev = vq->dev;
+ struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem;
struct iovec *_iov;
u64 s = 0;
int ret = 0;
- mem = vq->memory;
while ((u64)len > s) {
u64 size;
if (unlikely(ret >= iov_size)) {
ret = -ENOBUFS;
break;
}
- reg = find_region(mem, addr, len);
- if (unlikely(!reg)) {
- ret = -EFAULT;
+
+ node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
+ addr, addr + len - 1);
+ if (node == NULL || node->start > addr) {
+ if (umem != dev->iotlb) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = -EAGAIN;
+ break;
+ } else if (!(node->perm & access)) {
+ ret = -EPERM;
break;
}
+
_iov = iov + ret;
- size = reg->memory_size - addr + reg->guest_phys_addr;
+ size = node->size - addr + node->start;
_iov->iov_len = min((u64)len - s, size);
_iov->iov_base = (void __user *)(unsigned long)
- (reg->userspace_addr + addr - reg->guest_phys_addr);
+ (node->userspace_addr + addr - node->start);
s += size;
addr += size;
++ret;
}
+ if (ret == -EAGAIN)
+ vhost_iotlb_miss(vq, addr, access);
return ret;
}
@@ -1243,7 +1823,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
unsigned int i = 0, count, found = 0;
u32 len = vhost32_to_cpu(vq, indirect->len);
struct iov_iter from;
- int ret;
+ int ret, access;
/* Sanity check */
if (unlikely(len % sizeof desc)) {
@@ -1255,9 +1835,10 @@ static int get_indirect(struct vhost_virtqueue *vq,
}
ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
- UIO_MAXIOV);
+ UIO_MAXIOV, VHOST_ACCESS_RO);
if (unlikely(ret < 0)) {
- vq_err(vq, "Translation failure %d in indirect.\n", ret);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d in indirect.\n", ret);
return ret;
}
iov_iter_init(&from, READ, vq->indirect, ret, len);
@@ -1295,16 +1876,22 @@ static int get_indirect(struct vhost_virtqueue *vq,
return -EINVAL;
}
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
+ access = VHOST_ACCESS_WO;
+ else
+ access = VHOST_ACCESS_RO;
+
ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
vhost32_to_cpu(vq, desc.len), iov + iov_count,
- iov_size - iov_count);
+ iov_size - iov_count, access);
if (unlikely(ret < 0)) {
- vq_err(vq, "Translation failure %d indirect idx %d\n",
- ret, i);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d indirect idx %d\n",
+ ret, i);
return ret;
}
/* If this is an input descriptor, increment that count. */
- if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
+ if (access == VHOST_ACCESS_WO) {
*in_num += ret;
if (unlikely(log)) {
log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
@@ -1343,11 +1930,11 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
u16 last_avail_idx;
__virtio16 avail_idx;
__virtio16 ring_head;
- int ret;
+ int ret, access;
/* Check it isn't doing very strange things with descriptor numbers. */
last_avail_idx = vq->last_avail_idx;
- if (unlikely(__get_user(avail_idx, &vq->avail->idx))) {
+ if (unlikely(vhost_get_user(vq, avail_idx, &vq->avail->idx))) {
vq_err(vq, "Failed to access avail idx at %p\n",
&vq->avail->idx);
return -EFAULT;
@@ -1369,8 +1956,8 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
/* Grab the next descriptor number they're advertising, and increment
* the index we've seen. */
- if (unlikely(__get_user(ring_head,
- &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
+ if (unlikely(vhost_get_user(vq, ring_head,
+ &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
vq_err(vq, "Failed to read head: idx %d address %p\n",
last_avail_idx,
&vq->avail->ring[last_avail_idx % vq->num]);
@@ -1405,7 +1992,8 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
i, vq->num, head);
return -EINVAL;
}
- ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
+ ret = vhost_copy_from_user(vq, &desc, vq->desc + i,
+ sizeof desc);
if (unlikely(ret)) {
vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
i, vq->desc + i);
@@ -1416,22 +2004,28 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
out_num, in_num,
log, log_num, &desc);
if (unlikely(ret < 0)) {
- vq_err(vq, "Failure detected "
- "in indirect descriptor at idx %d\n", i);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Failure detected "
+ "in indirect descriptor at idx %d\n", i);
return ret;
}
continue;
}
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
+ access = VHOST_ACCESS_WO;
+ else
+ access = VHOST_ACCESS_RO;
ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
vhost32_to_cpu(vq, desc.len), iov + iov_count,
- iov_size - iov_count);
+ iov_size - iov_count, access);
if (unlikely(ret < 0)) {
- vq_err(vq, "Translation failure %d descriptor idx %d\n",
- ret, i);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d descriptor idx %d\n",
+ ret, i);
return ret;
}
- if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
+ if (access == VHOST_ACCESS_WO) {
/* If this is an input descriptor,
* increment that count. */
*in_num += ret;
@@ -1493,15 +2087,15 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
start = vq->last_used_idx & (vq->num - 1);
used = vq->used->ring + start;
if (count == 1) {
- if (__put_user(heads[0].id, &used->id)) {
+ if (vhost_put_user(vq, heads[0].id, &used->id)) {
vq_err(vq, "Failed to write used id");
return -EFAULT;
}
- if (__put_user(heads[0].len, &used->len)) {
+ if (vhost_put_user(vq, heads[0].len, &used->len)) {
vq_err(vq, "Failed to write used len");
return -EFAULT;
}
- } else if (__copy_to_user(used, heads, count * sizeof *used)) {
+ } else if (vhost_copy_to_user(vq, used, heads, count * sizeof *used)) {
vq_err(vq, "Failed to write used");
return -EFAULT;
}
@@ -1545,11 +2139,14 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
/* Make sure buffer is written before we update index. */
smp_wmb();
- if (__put_user(cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx)) {
+ if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
+ &vq->used->idx)) {
vq_err(vq, "Failed to increment used idx");
return -EFAULT;
}
if (unlikely(vq->log_used)) {
+ /* Make sure used idx is seen before log. */
+ smp_wmb();
/* Log used index update. */
log_write(vq->log_base,
vq->log_addr + offsetof(struct vring_used, idx),
@@ -1577,7 +2174,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
__virtio16 flags;
- if (__get_user(flags, &vq->avail->flags)) {
+ if (vhost_get_user(vq, flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags");
return true;
}
@@ -1591,7 +2188,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (unlikely(!v))
return true;
- if (__get_user(event, vhost_used_event(vq))) {
+ if (vhost_get_user(vq, event, vhost_used_event(vq))) {
vq_err(vq, "Failed to get used event idx");
return true;
}
@@ -1627,6 +2224,20 @@ void vhost_add_used_and_signal_n(struct vhost_dev *dev,
}
EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
+/* return true if we're sure that avaiable ring is empty */
+bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+{
+ __virtio16 avail_idx;
+ int r;
+
+ r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
+ if (r)
+ return false;
+
+ return vhost16_to_cpu(vq, avail_idx) == vq->avail_idx;
+}
+EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
+
/* OK, now we need to know about added descriptors. */
bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
@@ -1654,7 +2265,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
/* They could have slipped one in as we were doing that: make
* sure it's written, then check again. */
smp_mb();
- r = __get_user(avail_idx, &vq->avail->idx);
+ r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
if (r) {
vq_err(vq, "Failed to check avail idx at %p: %d\n",
&vq->avail->idx, r);
@@ -1682,6 +2293,50 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
}
EXPORT_SYMBOL_GPL(vhost_disable_notify);
+/* Create a new message. */
+struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
+{
+ struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
+ if (!node)
+ return NULL;
+
+ /* Make sure all padding within the structure is initialized. */
+ memset(&node->msg, 0, sizeof node->msg);
+ node->vq = vq;
+ node->msg.type = type;
+ return node;
+}
+EXPORT_SYMBOL_GPL(vhost_new_msg);
+
+void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
+ struct vhost_msg_node *node)
+{
+ spin_lock(&dev->iotlb_lock);
+ list_add_tail(&node->node, head);
+ spin_unlock(&dev->iotlb_lock);
+
+ wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM);
+}
+EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
+
+struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
+ struct list_head *head)
+{
+ struct vhost_msg_node *node = NULL;
+
+ spin_lock(&dev->iotlb_lock);
+ if (!list_empty(head)) {
+ node = list_first_entry(head, struct vhost_msg_node,
+ node);
+ list_del(&node->node);
+ }
+ spin_unlock(&dev->iotlb_lock);
+
+ return node;
+}
+EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
+
+
static int __init vhost_init(void)
{
return 0;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index d3f767448a72..78f3c5fc02e4 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -15,13 +15,15 @@
struct vhost_work;
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
+#define VHOST_WORK_QUEUED 1
struct vhost_work {
- struct list_head node;
+ struct llist_node node;
vhost_work_fn_t fn;
wait_queue_head_t done;
int flushing;
unsigned queue_seq;
unsigned done_seq;
+ unsigned long flags;
};
/* Poll a file (eventfd or socket) */
@@ -37,6 +39,7 @@ struct vhost_poll {
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
+bool vhost_has_work(struct vhost_dev *dev);
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
unsigned long mask, struct vhost_dev *dev);
@@ -52,6 +55,27 @@ struct vhost_log {
u64 len;
};
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+
+struct vhost_umem_node {
+ struct rb_node rb;
+ struct list_head link;
+ __u64 start;
+ __u64 last;
+ __u64 size;
+ __u64 userspace_addr;
+ __u32 perm;
+ __u32 flags_padding;
+ __u64 __subtree_last;
+};
+
+struct vhost_umem {
+ struct rb_root umem_tree;
+ struct list_head umem_list;
+ int numem;
+};
+
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
@@ -97,10 +121,12 @@ struct vhost_virtqueue {
u64 log_addr;
struct iovec iov[UIO_MAXIOV];
+ struct iovec iotlb_iov[64];
struct iovec *indirect;
struct vring_used_elem *heads;
/* Protected by virtqueue mutex. */
- struct vhost_memory *memory;
+ struct vhost_umem *umem;
+ struct vhost_umem *iotlb;
void *private_data;
u64 acked_features;
/* Log write descriptors */
@@ -114,27 +140,38 @@ struct vhost_virtqueue {
/* Ring endianness requested by userspace for cross-endian support. */
bool user_be;
#endif
+ u32 busyloop_timeout;
+};
+
+struct vhost_msg_node {
+ struct vhost_msg msg;
+ struct vhost_virtqueue *vq;
+ struct list_head node;
};
struct vhost_dev {
- struct vhost_memory *memory;
struct mm_struct *mm;
struct mutex mutex;
struct vhost_virtqueue **vqs;
int nvqs;
struct file *log_file;
struct eventfd_ctx *log_ctx;
- spinlock_t work_lock;
- struct list_head work_list;
+ struct llist_head work_list;
struct task_struct *worker;
+ struct vhost_umem *umem;
+ struct vhost_umem *iotlb;
+ spinlock_t iotlb_lock;
+ struct list_head read_list;
+ struct list_head pending_list;
+ wait_queue_head_t wait;
};
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
-struct vhost_memory *vhost_dev_reset_owner_prepare(void);
-void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
+struct vhost_umem *vhost_dev_reset_owner_prepare(void);
+void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *);
void vhost_dev_cleanup(struct vhost_dev *, bool locked);
void vhost_dev_stop(struct vhost_dev *);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
@@ -148,7 +185,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *,
struct vhost_log *log, unsigned int *log_num);
void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
-int vhost_init_used(struct vhost_virtqueue *);
+int vhost_vq_init_access(struct vhost_virtqueue *);
int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
unsigned count);
@@ -158,10 +195,26 @@ void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
struct vring_used_elem *heads, unsigned count);
void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
+bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
unsigned int log_num, u64 len);
+int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
+
+struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
+void vhost_enqueue_msg(struct vhost_dev *dev,
+ struct list_head *head,
+ struct vhost_msg_node *node);
+struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
+ struct list_head *head);
+unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+ poll_table *wait);
+ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
+ int noblock);
+ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
+ struct iov_iter *from);
+int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
#define vq_err(vq, fmt, ...) do { \
pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
new file mode 100644
index 000000000000..72e914de473e
--- /dev/null
+++ b/drivers/vhost/vsock.c
@@ -0,0 +1,797 @@
+/*
+ * vhost transport for vsock
+ *
+ * Copyright (C) 2013-2015 Red Hat, Inc.
+ * Author: Asias He <asias@redhat.com>
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+#include <linux/miscdevice.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <net/sock.h>
+#include <linux/virtio_vsock.h>
+#include <linux/vhost.h>
+#include <linux/hashtable.h>
+
+#include <net/af_vsock.h>
+#include "vhost.h"
+
+#define VHOST_VSOCK_DEFAULT_HOST_CID 2
+
+enum {
+ VHOST_VSOCK_FEATURES = VHOST_FEATURES,
+};
+
+/* Used to track all the vhost_vsock instances on the system. */
+static DEFINE_SPINLOCK(vhost_vsock_lock);
+static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
+
+struct vhost_vsock {
+ struct vhost_dev dev;
+ struct vhost_virtqueue vqs[2];
+
+ /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
+ struct hlist_node hash;
+
+ struct vhost_work send_pkt_work;
+ spinlock_t send_pkt_list_lock;
+ struct list_head send_pkt_list; /* host->guest pending packets */
+
+ atomic_t queued_replies;
+
+ u32 guest_cid;
+};
+
+static u32 vhost_transport_get_local_cid(void)
+{
+ return VHOST_VSOCK_DEFAULT_HOST_CID;
+}
+
+/* Callers that dereference the return value must hold vhost_vsock_lock or the
+ * RCU read lock.
+ */
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+{
+ struct vhost_vsock *vsock;
+
+ hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
+ u32 other_cid = vsock->guest_cid;
+
+ /* Skip instances that have no CID yet */
+ if (other_cid == 0)
+ continue;
+
+ if (other_cid == guest_cid) {
+ return vsock;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+ bool added = false;
+ bool restart_tx = false;
+
+ mutex_lock(&vq->mutex);
+
+ if (!vq->private_data)
+ goto out;
+
+ /* Avoid further vmexits, we're already processing the virtqueue */
+ vhost_disable_notify(&vsock->dev, vq);
+
+ for (;;) {
+ struct virtio_vsock_pkt *pkt;
+ struct iov_iter iov_iter;
+ unsigned out, in;
+ size_t nbytes;
+ size_t len;
+ int head;
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ if (list_empty(&vsock->send_pkt_list)) {
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+ vhost_enable_notify(&vsock->dev, vq);
+ break;
+ }
+
+ pkt = list_first_entry(&vsock->send_pkt_list,
+ struct virtio_vsock_pkt, list);
+ list_del_init(&pkt->list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ &out, &in, NULL, NULL);
+ if (head < 0) {
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+ break;
+ }
+
+ if (head == vq->num) {
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ /* We cannot finish yet if more buffers snuck in while
+ * re-enabling notify.
+ */
+ if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
+ vhost_disable_notify(&vsock->dev, vq);
+ continue;
+ }
+ break;
+ }
+
+ if (out) {
+ virtio_transport_free_pkt(pkt);
+ vq_err(vq, "Expected 0 output buffers, got %u\n", out);
+ break;
+ }
+
+ len = iov_length(&vq->iov[out], in);
+ iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
+
+ nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
+ if (nbytes != sizeof(pkt->hdr)) {
+ virtio_transport_free_pkt(pkt);
+ vq_err(vq, "Faulted on copying pkt hdr\n");
+ break;
+ }
+
+ nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
+ if (nbytes != pkt->len) {
+ virtio_transport_free_pkt(pkt);
+ vq_err(vq, "Faulted on copying pkt buf\n");
+ break;
+ }
+
+ vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
+ added = true;
+
+ if (pkt->reply) {
+ int val;
+
+ val = atomic_dec_return(&vsock->queued_replies);
+
+ /* Do we have resources to resume tx processing? */
+ if (val + 1 == tx_vq->num)
+ restart_tx = true;
+ }
+
+ virtio_transport_free_pkt(pkt);
+ }
+ if (added)
+ vhost_signal(&vsock->dev, vq);
+
+out:
+ mutex_unlock(&vq->mutex);
+
+ if (restart_tx)
+ vhost_poll_queue(&tx_vq->poll);
+}
+
+static void vhost_transport_send_pkt_work(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq;
+ struct vhost_vsock *vsock;
+
+ vsock = container_of(work, struct vhost_vsock, send_pkt_work);
+ vq = &vsock->vqs[VSOCK_VQ_RX];
+
+ vhost_transport_do_send_pkt(vsock, vq);
+}
+
+static int
+vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
+{
+ struct vhost_vsock *vsock;
+ struct vhost_virtqueue *vq;
+ int len = pkt->len;
+
+ rcu_read_lock();
+
+ /* Find the vhost_vsock according to guest context id */
+ vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
+ if (!vsock) {
+ rcu_read_unlock();
+ virtio_transport_free_pkt(pkt);
+ return -ENODEV;
+ }
+
+ vq = &vsock->vqs[VSOCK_VQ_RX];
+
+ if (pkt->reply)
+ atomic_inc(&vsock->queued_replies);
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add_tail(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+
+ rcu_read_unlock();
+ return len;
+}
+
+static int
+vhost_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+ struct vhost_vsock *vsock;
+ struct virtio_vsock_pkt *pkt, *n;
+ int cnt = 0;
+ int ret = -ENODEV;
+ LIST_HEAD(freeme);
+
+ rcu_read_lock();
+
+ /* Find the vhost_vsock according to guest context id */
+ vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
+ if (!vsock)
+ goto out;
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+ if (pkt->vsk != vsk)
+ continue;
+ list_move(&pkt->list, &freeme);
+ }
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ list_for_each_entry_safe(pkt, n, &freeme, list) {
+ if (pkt->reply)
+ cnt++;
+ list_del(&pkt->list);
+ virtio_transport_free_pkt(pkt);
+ }
+
+ if (cnt) {
+ struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+ int new_cnt;
+
+ new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
+ if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
+ vhost_poll_queue(&tx_vq->poll);
+ }
+
+ ret = 0;
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+static struct virtio_vsock_pkt *
+vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
+ unsigned int out, unsigned int in)
+{
+ struct virtio_vsock_pkt *pkt;
+ struct iov_iter iov_iter;
+ size_t nbytes;
+ size_t len;
+
+ if (in != 0) {
+ vq_err(vq, "Expected 0 input buffers, got %u\n", in);
+ return NULL;
+ }
+
+ pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ return NULL;
+
+ len = iov_length(vq->iov, out);
+ iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
+
+ nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
+ if (nbytes != sizeof(pkt->hdr)) {
+ vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
+ sizeof(pkt->hdr), nbytes);
+ kfree(pkt);
+ return NULL;
+ }
+
+ if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
+ pkt->len = le32_to_cpu(pkt->hdr.len);
+
+ /* No payload */
+ if (!pkt->len)
+ return pkt;
+
+ /* The pkt is too big */
+ if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
+ kfree(pkt);
+ return NULL;
+ }
+
+ pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return NULL;
+ }
+
+ nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
+ if (nbytes != pkt->len) {
+ vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
+ pkt->len, nbytes);
+ virtio_transport_free_pkt(pkt);
+ return NULL;
+ }
+
+ return pkt;
+}
+
+/* Is there space left for replies to rx packets? */
+static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
+{
+ struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
+ int val;
+
+ smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
+ val = atomic_read(&vsock->queued_replies);
+
+ return val < vq->num;
+}
+
+static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
+ dev);
+ struct virtio_vsock_pkt *pkt;
+ int head;
+ unsigned int out, in;
+ bool added = false;
+
+ mutex_lock(&vq->mutex);
+
+ if (!vq->private_data)
+ goto out;
+
+ vhost_disable_notify(&vsock->dev, vq);
+ for (;;) {
+ u32 len;
+
+ if (!vhost_vsock_more_replies(vsock)) {
+ /* Stop tx until the device processes already
+ * pending replies. Leave tx virtqueue
+ * callbacks disabled.
+ */
+ goto no_more_replies;
+ }
+
+ head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ &out, &in, NULL, NULL);
+ if (head < 0)
+ break;
+
+ if (head == vq->num) {
+ if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
+ vhost_disable_notify(&vsock->dev, vq);
+ continue;
+ }
+ break;
+ }
+
+ pkt = vhost_vsock_alloc_pkt(vq, out, in);
+ if (!pkt) {
+ vq_err(vq, "Faulted on pkt\n");
+ continue;
+ }
+
+ len = pkt->len;
+
+ /* Only accept correctly addressed packets */
+ if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
+ virtio_transport_recv_pkt(pkt);
+ else
+ virtio_transport_free_pkt(pkt);
+
+ vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
+ added = true;
+ }
+
+no_more_replies:
+ if (added)
+ vhost_signal(&vsock->dev, vq);
+
+out:
+ mutex_unlock(&vq->mutex);
+}
+
+static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
+ dev);
+
+ vhost_transport_do_send_pkt(vsock, vq);
+}
+
+static int vhost_vsock_start(struct vhost_vsock *vsock)
+{
+ struct vhost_virtqueue *vq;
+ size_t i;
+ int ret;
+
+ mutex_lock(&vsock->dev.mutex);
+
+ ret = vhost_dev_check_owner(&vsock->dev);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+
+ if (!vhost_vq_access_ok(vq)) {
+ ret = -EFAULT;
+ goto err_vq;
+ }
+
+ if (!vq->private_data) {
+ vq->private_data = vsock;
+ ret = vhost_vq_init_access(vq);
+ if (ret)
+ goto err_vq;
+ }
+
+ mutex_unlock(&vq->mutex);
+ }
+
+ mutex_unlock(&vsock->dev.mutex);
+ return 0;
+
+err_vq:
+ vq->private_data = NULL;
+ mutex_unlock(&vq->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->private_data = NULL;
+ mutex_unlock(&vq->mutex);
+ }
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return ret;
+}
+
+static int vhost_vsock_stop(struct vhost_vsock *vsock)
+{
+ size_t i;
+ int ret;
+
+ mutex_lock(&vsock->dev.mutex);
+
+ ret = vhost_dev_check_owner(&vsock->dev);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ struct vhost_virtqueue *vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->private_data = NULL;
+ mutex_unlock(&vq->mutex);
+ }
+
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return ret;
+}
+
+static void vhost_vsock_free(struct vhost_vsock *vsock)
+{
+ kvfree(vsock);
+}
+
+static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
+{
+ struct vhost_virtqueue **vqs;
+ struct vhost_vsock *vsock;
+ int ret;
+
+ /* This struct is large and allocation could fail, fall back to vmalloc
+ * if there is no other way.
+ */
+ vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ if (!vsock) {
+ vsock = vmalloc(sizeof(*vsock));
+ if (!vsock)
+ return -ENOMEM;
+ }
+
+ vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
+ if (!vqs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ vsock->guest_cid = 0; /* no CID assigned yet */
+
+ atomic_set(&vsock->queued_replies, 0);
+
+ vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
+ vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
+ vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
+ vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
+
+ vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
+
+ file->private_data = vsock;
+ spin_lock_init(&vsock->send_pkt_list_lock);
+ INIT_LIST_HEAD(&vsock->send_pkt_list);
+ vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
+ return 0;
+
+out:
+ vhost_vsock_free(vsock);
+ return ret;
+}
+
+static void vhost_vsock_flush(struct vhost_vsock *vsock)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
+ if (vsock->vqs[i].handle_kick)
+ vhost_poll_flush(&vsock->vqs[i].poll);
+ vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
+}
+
+static void vhost_vsock_reset_orphans(struct sock *sk)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+
+ /* vmci_transport.c doesn't take sk_lock here either. At least we're
+ * under vsock_table_lock so the sock cannot disappear while we're
+ * executing.
+ */
+
+ /* If the peer is still valid, no need to reset connection */
+ if (vhost_vsock_get(vsk->remote_addr.svm_cid))
+ return;
+
+ /* If the close timeout is pending, let it expire. This avoids races
+ * with the timeout callback.
+ */
+ if (vsk->close_work_scheduled)
+ return;
+
+ sock_set_flag(sk, SOCK_DONE);
+ vsk->peer_shutdown = SHUTDOWN_MASK;
+ sk->sk_state = SS_UNCONNECTED;
+ sk->sk_err = ECONNRESET;
+ sk->sk_error_report(sk);
+}
+
+static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
+{
+ struct vhost_vsock *vsock = file->private_data;
+
+ spin_lock_bh(&vhost_vsock_lock);
+ if (vsock->guest_cid)
+ hash_del_rcu(&vsock->hash);
+ spin_unlock_bh(&vhost_vsock_lock);
+
+ /* Wait for other CPUs to finish using vsock */
+ synchronize_rcu();
+
+ /* Iterating over all connections for all CIDs to find orphans is
+ * inefficient. Room for improvement here. */
+ vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
+
+ vhost_vsock_stop(vsock);
+ vhost_vsock_flush(vsock);
+ vhost_dev_stop(&vsock->dev);
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ while (!list_empty(&vsock->send_pkt_list)) {
+ struct virtio_vsock_pkt *pkt;
+
+ pkt = list_first_entry(&vsock->send_pkt_list,
+ struct virtio_vsock_pkt, list);
+ list_del_init(&pkt->list);
+ virtio_transport_free_pkt(pkt);
+ }
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ vhost_dev_cleanup(&vsock->dev, false);
+ kfree(vsock->dev.vqs);
+ vhost_vsock_free(vsock);
+ return 0;
+}
+
+static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
+{
+ struct vhost_vsock *other;
+
+ /* Refuse reserved CIDs */
+ if (guest_cid <= VMADDR_CID_HOST ||
+ guest_cid == U32_MAX)
+ return -EINVAL;
+
+ /* 64-bit CIDs are not yet supported */
+ if (guest_cid > U32_MAX)
+ return -EINVAL;
+
+ /* Refuse if CID is already in use */
+ spin_lock_bh(&vhost_vsock_lock);
+ other = vhost_vsock_get(guest_cid);
+ if (other && other != vsock) {
+ spin_unlock_bh(&vhost_vsock_lock);
+ return -EADDRINUSE;
+ }
+
+ if (vsock->guest_cid)
+ hash_del_rcu(&vsock->hash);
+
+ vsock->guest_cid = guest_cid;
+ hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
+ spin_unlock_bh(&vhost_vsock_lock);
+
+ return 0;
+}
+
+static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
+{
+ struct vhost_virtqueue *vq;
+ int i;
+
+ if (features & ~VHOST_VSOCK_FEATURES)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&vsock->dev.mutex);
+ if ((features & (1 << VHOST_F_LOG_ALL)) &&
+ !vhost_log_access_ok(&vsock->dev)) {
+ mutex_unlock(&vsock->dev.mutex);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ vq = &vsock->vqs[i];
+ mutex_lock(&vq->mutex);
+ vq->acked_features = features;
+ mutex_unlock(&vq->mutex);
+ }
+ mutex_unlock(&vsock->dev.mutex);
+ return 0;
+}
+
+static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct vhost_vsock *vsock = f->private_data;
+ void __user *argp = (void __user *)arg;
+ u64 guest_cid;
+ u64 features;
+ int start;
+ int r;
+
+ switch (ioctl) {
+ case VHOST_VSOCK_SET_GUEST_CID:
+ if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
+ return -EFAULT;
+ return vhost_vsock_set_cid(vsock, guest_cid);
+ case VHOST_VSOCK_SET_RUNNING:
+ if (copy_from_user(&start, argp, sizeof(start)))
+ return -EFAULT;
+ if (start)
+ return vhost_vsock_start(vsock);
+ else
+ return vhost_vsock_stop(vsock);
+ case VHOST_GET_FEATURES:
+ features = VHOST_VSOCK_FEATURES;
+ if (copy_to_user(argp, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_FEATURES:
+ if (copy_from_user(&features, argp, sizeof(features)))
+ return -EFAULT;
+ return vhost_vsock_set_features(vsock, features);
+ default:
+ mutex_lock(&vsock->dev.mutex);
+ r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
+ if (r == -ENOIOCTLCMD)
+ r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
+ else
+ vhost_vsock_flush(vsock);
+ mutex_unlock(&vsock->dev.mutex);
+ return r;
+ }
+}
+
+static const struct file_operations vhost_vsock_fops = {
+ .owner = THIS_MODULE,
+ .open = vhost_vsock_dev_open,
+ .release = vhost_vsock_dev_release,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = vhost_vsock_dev_ioctl,
+};
+
+static struct miscdevice vhost_vsock_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "vhost-vsock",
+ .fops = &vhost_vsock_fops,
+};
+
+static struct virtio_transport vhost_transport = {
+ .transport = {
+ .get_local_cid = vhost_transport_get_local_cid,
+
+ .init = virtio_transport_do_socket_init,
+ .destruct = virtio_transport_destruct,
+ .release = virtio_transport_release,
+ .connect = virtio_transport_connect,
+ .shutdown = virtio_transport_shutdown,
+ .cancel_pkt = vhost_transport_cancel_pkt,
+
+ .dgram_enqueue = virtio_transport_dgram_enqueue,
+ .dgram_dequeue = virtio_transport_dgram_dequeue,
+ .dgram_bind = virtio_transport_dgram_bind,
+ .dgram_allow = virtio_transport_dgram_allow,
+
+ .stream_enqueue = virtio_transport_stream_enqueue,
+ .stream_dequeue = virtio_transport_stream_dequeue,
+ .stream_has_data = virtio_transport_stream_has_data,
+ .stream_has_space = virtio_transport_stream_has_space,
+ .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
+ .stream_is_active = virtio_transport_stream_is_active,
+ .stream_allow = virtio_transport_stream_allow,
+
+ .notify_poll_in = virtio_transport_notify_poll_in,
+ .notify_poll_out = virtio_transport_notify_poll_out,
+ .notify_recv_init = virtio_transport_notify_recv_init,
+ .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
+ .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
+ .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
+ .notify_send_init = virtio_transport_notify_send_init,
+ .notify_send_pre_block = virtio_transport_notify_send_pre_block,
+ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
+
+ .set_buffer_size = virtio_transport_set_buffer_size,
+ .set_min_buffer_size = virtio_transport_set_min_buffer_size,
+ .set_max_buffer_size = virtio_transport_set_max_buffer_size,
+ .get_buffer_size = virtio_transport_get_buffer_size,
+ .get_min_buffer_size = virtio_transport_get_min_buffer_size,
+ .get_max_buffer_size = virtio_transport_get_max_buffer_size,
+ },
+
+ .send_pkt = vhost_transport_send_pkt,
+};
+
+static int __init vhost_vsock_init(void)
+{
+ int ret;
+
+ ret = vsock_core_init(&vhost_transport.transport);
+ if (ret < 0)
+ return ret;
+ return misc_register(&vhost_vsock_misc);
+};
+
+static void __exit vhost_vsock_exit(void)
+{
+ misc_deregister(&vhost_vsock_misc);
+ vsock_core_exit();
+};
+
+module_init(vhost_vsock_init);
+module_exit(vhost_vsock_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Asias He");
+MODULE_DESCRIPTION("vhost transport for vsock ");
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index 34ab4f950f0a..0c1c34ff40a9 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
int r = 0;
+ memset(&p, 0, sizeof(p));
+
switch (cmd) {
case OMAPFB_SYNC_GFX:
DBG("ioctl SYNC_GFX\n");
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index cab9f3f63a38..77590320d44c 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -60,7 +60,7 @@ config VIRTIO_INPUT
config VIRTIO_MMIO
tristate "Platform bus driver for memory mapped virtio devices"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
select VIRTIO
---help---
This drivers provides support for memory mapped virtio
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 7d4c7f35e5cf..f77358f08930 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -401,7 +401,7 @@ static int init_vqs(struct virtio_balloon *vb)
{
struct virtqueue *vqs[3];
vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
- const char *names[] = { "inflate", "deflate", "stats" };
+ static const char * const names[] = { "inflate", "deflate", "stats" };
int err, nvqs;
/*
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index c96944b59856..350a2a5a49db 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -170,7 +170,7 @@ static int virtinput_init_vqs(struct virtio_input *vi)
struct virtqueue *vqs[2];
vq_callback_t *cbs[] = { virtinput_recv_events,
virtinput_recv_status };
- static const char *names[] = { "events", "status" };
+ static const char * const names[] = { "events", "status" };
int err;
err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index f499d9da7237..745c6ee1bb3e 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -482,7 +482,7 @@ error_available:
static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
- const char *names[])
+ const char * const names[])
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 2046a68ad0ba..f6bed86c17f9 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -296,7 +296,7 @@ void vp_del_vqs(struct virtio_device *vdev)
static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
- const char *names[],
+ const char * const names[],
bool use_msix,
bool per_vq_vectors)
{
@@ -376,7 +376,7 @@ error_find:
int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
- const char *names[])
+ const char * const names[])
{
int err;
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index b976d968e793..2cc252270b2d 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -139,7 +139,7 @@ void vp_del_vqs(struct virtio_device *vdev);
int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
- const char *names[]);
+ const char * const names[]);
const char *vp_bus_name(struct virtio_device *vdev);
/* Setup the affinity for a virtqueue:
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 4469202eaa8e..631021cfc740 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -423,7 +423,7 @@ err_new_queue:
static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
- const char *names[])
+ const char * const names[])
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtqueue *vq;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index a01a41a41269..761f28ffd40e 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -24,6 +24,8 @@
#include <linux/module.h>
#include <linux/hrtimer.h>
#include <linux/kmemleak.h>
+#include <linux/dma-mapping.h>
+#include <xen/xen.h>
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
@@ -54,6 +56,11 @@
#define END_USE(vq)
#endif
+struct vring_desc_state {
+ void *data; /* Data for callback. */
+ struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
+};
+
struct vring_virtqueue {
struct virtqueue vq;
@@ -98,12 +105,131 @@ struct vring_virtqueue {
ktime_t last_add_time;
#endif
- /* Tokens for callbacks. */
- void *data[];
+ /* Per-descriptor state. */
+ struct vring_desc_state desc_state[];
};
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
+/*
+ * Modern virtio devices have feature bits to specify whether they need a
+ * quirk and bypass the IOMMU. If not there, just use the DMA API.
+ *
+ * If there, the interaction between virtio and DMA API is messy.
+ *
+ * On most systems with virtio, physical addresses match bus addresses,
+ * and it doesn't particularly matter whether we use the DMA API.
+ *
+ * On some systems, including Xen and any system with a physical device
+ * that speaks virtio behind a physical IOMMU, we must use the DMA API
+ * for virtio DMA to work at all.
+ *
+ * On other systems, including SPARC and PPC64, virtio-pci devices are
+ * enumerated as though they are behind an IOMMU, but the virtio host
+ * ignores the IOMMU, so we must either pretend that the IOMMU isn't
+ * there or somehow map everything as the identity.
+ *
+ * For the time being, we preserve historic behavior and bypass the DMA
+ * API.
+ *
+ * TODO: install a per-device DMA ops structure that does the right thing
+ * taking into account all the above quirks, and use the DMA API
+ * unconditionally on data path.
+ */
+
+static bool vring_use_dma_api(struct virtio_device *vdev)
+{
+ if (!virtio_has_iommu_quirk(vdev))
+ return true;
+
+ /* Otherwise, we are left to guess. */
+ /*
+ * In theory, it's possible to have a buggy QEMU-supposed
+ * emulated Q35 IOMMU and Xen enabled at the same time. On
+ * such a configuration, virtio has never worked and will
+ * not work without an even larger kludge. Instead, enable
+ * the DMA API if we're a Xen guest, which at least allows
+ * all of the sensible Xen configurations to work correctly.
+ */
+ if (xen_domain())
+ return true;
+
+ return false;
+}
+
+/*
+ * The DMA ops on various arches are rather gnarly right now, and
+ * making all of the arch DMA ops work on the vring device itself
+ * is a mess. For now, we use the parent device for DMA ops.
+ */
+struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+{
+ return vq->vq.vdev->dev.parent;
+}
+
+/* Map one sg entry. */
+static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
+ struct scatterlist *sg,
+ enum dma_data_direction direction)
+{
+ if (!vring_use_dma_api(vq->vq.vdev))
+ return (dma_addr_t)sg_phys(sg);
+
+ /*
+ * We can't use dma_map_sg, because we don't use scatterlists in
+ * the way it expects (we don't guarantee that the scatterlist
+ * will exist for the lifetime of the mapping).
+ */
+ return dma_map_page(vring_dma_dev(vq),
+ sg_page(sg), sg->offset, sg->length,
+ direction);
+}
+
+static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
+ void *cpu_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ if (!vring_use_dma_api(vq->vq.vdev))
+ return (dma_addr_t)virt_to_phys(cpu_addr);
+
+ return dma_map_single(vring_dma_dev(vq),
+ cpu_addr, size, direction);
+}
+
+static void vring_unmap_one(const struct vring_virtqueue *vq,
+ struct vring_desc *desc)
+{
+ u16 flags;
+
+ if (!vring_use_dma_api(vq->vq.vdev))
+ return;
+
+ flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
+
+ if (flags & VRING_DESC_F_INDIRECT) {
+ dma_unmap_single(vring_dma_dev(vq),
+ virtio64_to_cpu(vq->vq.vdev, desc->addr),
+ virtio32_to_cpu(vq->vq.vdev, desc->len),
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(vring_dma_dev(vq),
+ virtio64_to_cpu(vq->vq.vdev, desc->addr),
+ virtio32_to_cpu(vq->vq.vdev, desc->len),
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+}
+
+static int vring_mapping_error(const struct vring_virtqueue *vq,
+ dma_addr_t addr)
+{
+ if (!vring_use_dma_api(vq->vq.vdev))
+ return 0;
+
+ return dma_mapping_error(vring_dma_dev(vq), addr);
+}
+
static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
unsigned int total_sg, gfp_t gfp)
{
@@ -137,7 +263,7 @@ static inline int virtqueue_add(struct virtqueue *_vq,
struct vring_virtqueue *vq = to_vvq(_vq);
struct scatterlist *sg;
struct vring_desc *desc;
- unsigned int i, n, avail, descs_used, uninitialized_var(prev);
+ unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
int head;
bool indirect;
@@ -177,21 +303,15 @@ static inline int virtqueue_add(struct virtqueue *_vq,
if (desc) {
/* Use a single buffer which doesn't continue */
- vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
- vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc));
- /* avoid kmemleak false positive (hidden by virt_to_phys) */
- kmemleak_ignore(desc);
- vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
-
+ indirect = true;
/* Set up rest to use this indirect table. */
i = 0;
descs_used = 1;
- indirect = true;
} else {
+ indirect = false;
desc = vq->vring.desc;
i = head;
descs_used = total_sg;
- indirect = false;
}
if (vq->vq.num_free < descs_used) {
@@ -208,13 +328,14 @@ static inline int virtqueue_add(struct virtqueue *_vq,
return -ENOSPC;
}
- /* We're about to use some buffers from the free list. */
- vq->vq.num_free -= descs_used;
-
for (n = 0; n < out_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
+ if (vring_mapping_error(vq, addr))
+ goto unmap_release;
+
desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
- desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
+ desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
prev = i;
i = virtio16_to_cpu(_vq->vdev, desc[i].next);
@@ -222,8 +343,12 @@ static inline int virtqueue_add(struct virtqueue *_vq,
}
for (; n < (out_sgs + in_sgs); n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
+ if (vring_mapping_error(vq, addr))
+ goto unmap_release;
+
desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
- desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
+ desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
prev = i;
i = virtio16_to_cpu(_vq->vdev, desc[i].next);
@@ -232,14 +357,33 @@ static inline int virtqueue_add(struct virtqueue *_vq,
/* Last one doesn't continue. */
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
+ if (indirect) {
+ /* Now that the indirect table is filled in, map it. */
+ dma_addr_t addr = vring_map_single(
+ vq, desc, total_sg * sizeof(struct vring_desc),
+ DMA_TO_DEVICE);
+ if (vring_mapping_error(vq, addr))
+ goto unmap_release;
+
+ vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
+ vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
+
+ vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
+ }
+
+ /* We're using some buffers from the free list. */
+ vq->vq.num_free -= descs_used;
+
/* Update free pointer */
if (indirect)
vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
else
vq->free_head = i;
- /* Set token. */
- vq->data[head] = data;
+ /* Store token and indirect buffer state. */
+ vq->desc_state[head].data = data;
+ if (indirect)
+ vq->desc_state[head].indir_desc = desc;
/* Put entry in available array (but don't update avail->idx until they
* do sync). */
@@ -262,6 +406,24 @@ static inline int virtqueue_add(struct virtqueue *_vq,
virtqueue_kick(_vq);
return 0;
+
+unmap_release:
+ err_idx = i;
+ i = head;
+
+ for (n = 0; n < total_sg; n++) {
+ if (i == err_idx)
+ break;
+ vring_unmap_one(vq, &desc[i]);
+ i = vq->vring.desc[i].next;
+ }
+
+ vq->vq.num_free += total_sg;
+
+ if (indirect)
+ kfree(desc);
+
+ return -EIO;
}
/**
@@ -432,27 +594,43 @@ EXPORT_SYMBOL_GPL(virtqueue_kick);
static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
- unsigned int i;
+ unsigned int i, j;
+ u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
/* Clear data ptr. */
- vq->data[head] = NULL;
+ vq->desc_state[head].data = NULL;
- /* Put back on free list: find end */
+ /* Put back on free list: unmap first-level descriptors and find end */
i = head;
- /* Free the indirect table */
- if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))
- kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr)));
-
- while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) {
+ while (vq->vring.desc[i].flags & nextflag) {
+ vring_unmap_one(vq, &vq->vring.desc[i]);
i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
vq->vq.num_free++;
}
+ vring_unmap_one(vq, &vq->vring.desc[i]);
vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
vq->free_head = head;
+
/* Plus final descriptor */
vq->vq.num_free++;
+
+ /* Free the indirect table, if any, now that it's unmapped. */
+ if (vq->desc_state[head].indir_desc) {
+ struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
+ u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
+
+ BUG_ON(!(vq->vring.desc[head].flags &
+ cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
+ BUG_ON(len == 0 || len % sizeof(struct vring_desc));
+
+ for (j = 0; j < len / sizeof(struct vring_desc); j++)
+ vring_unmap_one(vq, &indir_desc[j]);
+
+ kfree(vq->desc_state[head].indir_desc);
+ vq->desc_state[head].indir_desc = NULL;
+ }
}
static inline bool more_used(const struct vring_virtqueue *vq)
@@ -507,13 +685,13 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
BAD_RING(vq, "id %u out of range\n", i);
return NULL;
}
- if (unlikely(!vq->data[i])) {
+ if (unlikely(!vq->desc_state[i].data)) {
BAD_RING(vq, "id %u is not a head!\n", i);
return NULL;
}
/* detach_buf clears data, so grab it now. */
- ret = vq->data[i];
+ ret = vq->desc_state[i].data;
detach_buf(vq, i);
vq->last_used_idx++;
/* If we expect an interrupt for the next entry, tell host
@@ -687,10 +865,10 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
START_USE(vq);
for (i = 0; i < vq->vring.num; i++) {
- if (!vq->data[i])
+ if (!vq->desc_state[i].data)
continue;
/* detach_buf clears data, so grab it now. */
- buf = vq->data[i];
+ buf = vq->desc_state[i].data;
detach_buf(vq, i);
vq->avail_idx_shadow--;
vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
@@ -744,7 +922,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
return NULL;
}
- vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
+ vq = kmalloc(sizeof(*vq) + num * sizeof(struct vring_desc_state),
+ GFP_KERNEL);
if (!vq)
return NULL;
@@ -779,11 +958,9 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
/* Put everything in free lists. */
vq->free_head = 0;
- for (i = 0; i < num-1; i++) {
+ for (i = 0; i < num-1; i++)
vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
- vq->data[i] = NULL;
- }
- vq->data[i] = NULL;
+ memset(vq->desc_state, 0, num * sizeof(struct vring_desc_state));
return &vq->vq;
}
@@ -809,6 +986,8 @@ void vring_transport_features(struct virtio_device *vdev)
break;
case VIRTIO_F_VERSION_1:
break;
+ case VIRTIO_F_IOMMU_PLATFORM:
+ break;
default:
/* We don't understand this bit. */
__virtio_clear_bit(vdev, i);
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 6d1d0b93b1aa..c792df826e12 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -9,7 +9,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
- uuid-tree.o props.o hash.o
+ uuid-tree.o props.o hash.o tree-checker.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 38ee08675468..8f4baa3cb992 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1726,20 +1726,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
return err;
}
-/*
- * The leaf data grows from end-to-front in the node.
- * this returns the address of the start of the last item,
- * which is the stop of the leaf data stack
- */
-static inline unsigned int leaf_data_end(struct btrfs_root *root,
- struct extent_buffer *leaf)
-{
- u32 nr = btrfs_header_nritems(leaf);
- if (nr == 0)
- return BTRFS_LEAF_DATA_SIZE(root);
- return btrfs_item_offset_nr(leaf, nr - 1);
-}
-
/*
* search for key in the extent_buffer. The items start at offset p,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index e847573c6db0..4a91d3119e59 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -35,6 +35,7 @@
#include <linux/btrfs.h>
#include <linux/workqueue.h>
#include <linux/security.h>
+#include <linux/sizes.h>
#include "extent_io.h"
#include "extent_map.h"
#include "async-thread.h"
@@ -897,6 +898,7 @@ struct btrfs_balance_item {
#define BTRFS_FILE_EXTENT_INLINE 0
#define BTRFS_FILE_EXTENT_REG 1
#define BTRFS_FILE_EXTENT_PREALLOC 2
+#define BTRFS_FILE_EXTENT_TYPES 2
struct btrfs_file_extent_item {
/*
@@ -2283,7 +2285,7 @@ do { \
#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31)
struct btrfs_map_token {
- struct extent_buffer *eb;
+ const struct extent_buffer *eb;
char *kaddr;
unsigned long offset;
};
@@ -2314,18 +2316,19 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token)
sizeof(((type *)0)->member)))
#define DECLARE_BTRFS_SETGET_BITS(bits) \
-u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
- unsigned long off, \
- struct btrfs_map_token *token); \
-void btrfs_set_token_##bits(struct extent_buffer *eb, void *ptr, \
+u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \
+ const void *ptr, unsigned long off, \
+ struct btrfs_map_token *token); \
+void btrfs_set_token_##bits(struct extent_buffer *eb, const void *ptr, \
unsigned long off, u##bits val, \
struct btrfs_map_token *token); \
-static inline u##bits btrfs_get_##bits(struct extent_buffer *eb, void *ptr, \
+static inline u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
+ const void *ptr, \
unsigned long off) \
{ \
return btrfs_get_token_##bits(eb, ptr, off, NULL); \
} \
-static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \
+static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr,\
unsigned long off, u##bits val) \
{ \
btrfs_set_token_##bits(eb, ptr, off, val, NULL); \
@@ -2337,7 +2340,8 @@ DECLARE_BTRFS_SETGET_BITS(32)
DECLARE_BTRFS_SETGET_BITS(64)
#define BTRFS_SETGET_FUNCS(name, type, member, bits) \
-static inline u##bits btrfs_##name(struct extent_buffer *eb, type *s) \
+static inline u##bits btrfs_##name(const struct extent_buffer *eb, \
+ const type *s) \
{ \
BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
return btrfs_get_##bits(eb, s, offsetof(type, member)); \
@@ -2348,7 +2352,8 @@ static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \
BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
btrfs_set_##bits(eb, s, offsetof(type, member), val); \
} \
-static inline u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, \
+static inline u##bits btrfs_token_##name(const struct extent_buffer *eb,\
+ const type *s, \
struct btrfs_map_token *token) \
{ \
BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
@@ -2363,9 +2368,9 @@ static inline void btrfs_set_token_##name(struct extent_buffer *eb, \
}
#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
-static inline u##bits btrfs_##name(struct extent_buffer *eb) \
+static inline u##bits btrfs_##name(const struct extent_buffer *eb) \
{ \
- type *p = page_address(eb->pages[0]); \
+ const type *p = page_address(eb->pages[0]); \
u##bits res = le##bits##_to_cpu(p->member); \
return res; \
} \
@@ -2377,7 +2382,7 @@ static inline void btrfs_set_##name(struct extent_buffer *eb, \
}
#define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \
-static inline u##bits btrfs_##name(type *s) \
+static inline u##bits btrfs_##name(const type *s) \
{ \
return le##bits##_to_cpu(s->member); \
} \
@@ -2678,7 +2683,7 @@ static inline unsigned long btrfs_node_key_ptr_offset(int nr)
sizeof(struct btrfs_key_ptr) * nr;
}
-void btrfs_node_key(struct extent_buffer *eb,
+void btrfs_node_key(const struct extent_buffer *eb,
struct btrfs_disk_key *disk_key, int nr);
static inline void btrfs_set_node_key(struct extent_buffer *eb,
@@ -2707,28 +2712,28 @@ static inline struct btrfs_item *btrfs_item_nr(int nr)
return (struct btrfs_item *)btrfs_item_nr_offset(nr);
}
-static inline u32 btrfs_item_end(struct extent_buffer *eb,
+static inline u32 btrfs_item_end(const struct extent_buffer *eb,
struct btrfs_item *item)
{
return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item);
}
-static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr)
+static inline u32 btrfs_item_end_nr(const struct extent_buffer *eb, int nr)
{
return btrfs_item_end(eb, btrfs_item_nr(nr));
}
-static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr)
+static inline u32 btrfs_item_offset_nr(const struct extent_buffer *eb, int nr)
{
return btrfs_item_offset(eb, btrfs_item_nr(nr));
}
-static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr)
+static inline u32 btrfs_item_size_nr(const struct extent_buffer *eb, int nr)
{
return btrfs_item_size(eb, btrfs_item_nr(nr));
}
-static inline void btrfs_item_key(struct extent_buffer *eb,
+static inline void btrfs_item_key(const struct extent_buffer *eb,
struct btrfs_disk_key *disk_key, int nr)
{
struct btrfs_item *item = btrfs_item_nr(nr);
@@ -2764,8 +2769,8 @@ BTRFS_SETGET_STACK_FUNCS(stack_dir_name_len, struct btrfs_dir_item,
BTRFS_SETGET_STACK_FUNCS(stack_dir_transid, struct btrfs_dir_item,
transid, 64);
-static inline void btrfs_dir_item_key(struct extent_buffer *eb,
- struct btrfs_dir_item *item,
+static inline void btrfs_dir_item_key(const struct extent_buffer *eb,
+ const struct btrfs_dir_item *item,
struct btrfs_disk_key *key)
{
read_eb_member(eb, item, struct btrfs_dir_item, location, key);
@@ -2773,7 +2778,7 @@ static inline void btrfs_dir_item_key(struct extent_buffer *eb,
static inline void btrfs_set_dir_item_key(struct extent_buffer *eb,
struct btrfs_dir_item *item,
- struct btrfs_disk_key *key)
+ const struct btrfs_disk_key *key)
{
write_eb_member(eb, item, struct btrfs_dir_item, location, key);
}
@@ -2785,8 +2790,8 @@ BTRFS_SETGET_FUNCS(free_space_bitmaps, struct btrfs_free_space_header,
BTRFS_SETGET_FUNCS(free_space_generation, struct btrfs_free_space_header,
generation, 64);
-static inline void btrfs_free_space_key(struct extent_buffer *eb,
- struct btrfs_free_space_header *h,
+static inline void btrfs_free_space_key(const struct extent_buffer *eb,
+ const struct btrfs_free_space_header *h,
struct btrfs_disk_key *key)
{
read_eb_member(eb, h, struct btrfs_free_space_header, location, key);
@@ -2794,7 +2799,7 @@ static inline void btrfs_free_space_key(struct extent_buffer *eb,
static inline void btrfs_set_free_space_key(struct extent_buffer *eb,
struct btrfs_free_space_header *h,
- struct btrfs_disk_key *key)
+ const struct btrfs_disk_key *key)
{
write_eb_member(eb, h, struct btrfs_free_space_header, location, key);
}
@@ -2821,25 +2826,25 @@ static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk,
disk->objectid = cpu_to_le64(cpu->objectid);
}
-static inline void btrfs_node_key_to_cpu(struct extent_buffer *eb,
- struct btrfs_key *key, int nr)
+static inline void btrfs_node_key_to_cpu(const struct extent_buffer *eb,
+ struct btrfs_key *key, int nr)
{
struct btrfs_disk_key disk_key;
btrfs_node_key(eb, &disk_key, nr);
btrfs_disk_key_to_cpu(key, &disk_key);
}
-static inline void btrfs_item_key_to_cpu(struct extent_buffer *eb,
- struct btrfs_key *key, int nr)
+static inline void btrfs_item_key_to_cpu(const struct extent_buffer *eb,
+ struct btrfs_key *key, int nr)
{
struct btrfs_disk_key disk_key;
btrfs_item_key(eb, &disk_key, nr);
btrfs_disk_key_to_cpu(key, &disk_key);
}
-static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb,
- struct btrfs_dir_item *item,
- struct btrfs_key *key)
+static inline void btrfs_dir_item_key_to_cpu(const struct extent_buffer *eb,
+ const struct btrfs_dir_item *item,
+ struct btrfs_key *key)
{
struct btrfs_disk_key disk_key;
btrfs_dir_item_key(eb, item, &disk_key);
@@ -2872,7 +2877,7 @@ BTRFS_SETGET_STACK_FUNCS(stack_header_nritems, struct btrfs_header,
nritems, 32);
BTRFS_SETGET_STACK_FUNCS(stack_header_bytenr, struct btrfs_header, bytenr, 64);
-static inline int btrfs_header_flag(struct extent_buffer *eb, u64 flag)
+static inline int btrfs_header_flag(const struct extent_buffer *eb, u64 flag)
{
return (btrfs_header_flags(eb) & flag) == flag;
}
@@ -2891,7 +2896,7 @@ static inline int btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag)
return (flags & flag) == flag;
}
-static inline int btrfs_header_backref_rev(struct extent_buffer *eb)
+static inline int btrfs_header_backref_rev(const struct extent_buffer *eb)
{
u64 flags = btrfs_header_flags(eb);
return flags >> BTRFS_BACKREF_REV_SHIFT;
@@ -2911,12 +2916,12 @@ static inline unsigned long btrfs_header_fsid(void)
return offsetof(struct btrfs_header, fsid);
}
-static inline unsigned long btrfs_header_chunk_tree_uuid(struct extent_buffer *eb)
+static inline unsigned long btrfs_header_chunk_tree_uuid(const struct extent_buffer *eb)
{
return offsetof(struct btrfs_header, chunk_tree_uuid);
}
-static inline int btrfs_is_leaf(struct extent_buffer *eb)
+static inline int btrfs_is_leaf(const struct extent_buffer *eb)
{
return btrfs_header_level(eb) == 0;
}
@@ -2950,12 +2955,12 @@ BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item,
BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item,
rtransid, 64);
-static inline bool btrfs_root_readonly(struct btrfs_root *root)
+static inline bool btrfs_root_readonly(const struct btrfs_root *root)
{
return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
}
-static inline bool btrfs_root_dead(struct btrfs_root *root)
+static inline bool btrfs_root_dead(const struct btrfs_root *root)
{
return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
}
@@ -3012,51 +3017,51 @@ BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup,
/* struct btrfs_balance_item */
BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64);
-static inline void btrfs_balance_data(struct extent_buffer *eb,
- struct btrfs_balance_item *bi,
+static inline void btrfs_balance_data(const struct extent_buffer *eb,
+ const struct btrfs_balance_item *bi,
struct btrfs_disk_balance_args *ba)
{
read_eb_member(eb, bi, struct btrfs_balance_item, data, ba);
}
static inline void btrfs_set_balance_data(struct extent_buffer *eb,
- struct btrfs_balance_item *bi,
- struct btrfs_disk_balance_args *ba)
+ struct btrfs_balance_item *bi,
+ const struct btrfs_disk_balance_args *ba)
{
write_eb_member(eb, bi, struct btrfs_balance_item, data, ba);
}
-static inline void btrfs_balance_meta(struct extent_buffer *eb,
- struct btrfs_balance_item *bi,
+static inline void btrfs_balance_meta(const struct extent_buffer *eb,
+ const struct btrfs_balance_item *bi,
struct btrfs_disk_balance_args *ba)
{
read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba);
}
static inline void btrfs_set_balance_meta(struct extent_buffer *eb,
- struct btrfs_balance_item *bi,
- struct btrfs_disk_balance_args *ba)
+ struct btrfs_balance_item *bi,
+ const struct btrfs_disk_balance_args *ba)
{
write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba);
}
-static inline void btrfs_balance_sys(struct extent_buffer *eb,
- struct btrfs_balance_item *bi,
+static inline void btrfs_balance_sys(const struct extent_buffer *eb,
+ const struct btrfs_balance_item *bi,
struct btrfs_disk_balance_args *ba)
{
read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba);
}
static inline void btrfs_set_balance_sys(struct extent_buffer *eb,
- struct btrfs_balance_item *bi,
- struct btrfs_disk_balance_args *ba)
+ struct btrfs_balance_item *bi,
+ const struct btrfs_disk_balance_args *ba)
{
write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba);
}
static inline void
btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
- struct btrfs_disk_balance_args *disk)
+ const struct btrfs_disk_balance_args *disk)
{
memset(cpu, 0, sizeof(*cpu));
@@ -3076,7 +3081,7 @@ btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
static inline void
btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk,
- struct btrfs_balance_args *cpu)
+ const struct btrfs_balance_args *cpu)
{
memset(disk, 0, sizeof(*disk));
@@ -3144,7 +3149,7 @@ BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64);
BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block,
uuid_tree_generation, 64);
-static inline int btrfs_super_csum_size(struct btrfs_super_block *s)
+static inline int btrfs_super_csum_size(const struct btrfs_super_block *s)
{
u16 t = btrfs_super_csum_type(s);
/*
@@ -3158,6 +3163,21 @@ static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
return offsetof(struct btrfs_leaf, items);
}
+/*
+ * The leaf data grows from end-to-front in the node.
+ * this returns the address of the start of the last item,
+ * which is the stop of the leaf data stack
+ */
+static inline unsigned int leaf_data_end(const struct btrfs_root *root,
+ const struct extent_buffer *leaf)
+{
+ u32 nr = btrfs_header_nritems(leaf);
+
+ if (nr == 0)
+ return BTRFS_LEAF_DATA_SIZE(root);
+ return btrfs_item_offset_nr(leaf, nr - 1);
+}
+
/* struct btrfs_file_extent_item */
BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8);
BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr,
@@ -3174,7 +3194,7 @@ BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression,
struct btrfs_file_extent_item, compression, 8);
static inline unsigned long
-btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e)
+btrfs_file_extent_inline_start(const struct btrfs_file_extent_item *e)
{
return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START;
}
@@ -3208,8 +3228,9 @@ BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item,
* size of any extent headers. If a file is compressed on disk, this is
* the compressed size
*/
-static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb,
- struct btrfs_item *e)
+static inline u32 btrfs_file_extent_inline_item_len(
+ const struct extent_buffer *eb,
+ struct btrfs_item *e)
{
return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START;
}
@@ -3217,9 +3238,9 @@ static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb,
/* this returns the number of file bytes represented by the inline item.
* If an item is compressed, this is the uncompressed size
*/
-static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb,
- int slot,
- struct btrfs_file_extent_item *fi)
+static inline u32 btrfs_file_extent_inline_len(const struct extent_buffer *eb,
+ int slot,
+ const struct btrfs_file_extent_item *fi)
{
struct btrfs_map_token token;
@@ -3241,8 +3262,8 @@ static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb,
/* btrfs_dev_stats_item */
-static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb,
- struct btrfs_dev_stats_item *ptr,
+static inline u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
+ const struct btrfs_dev_stats_item *ptr,
int index)
{
u64 val;
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 176a27bc63aa..81e5bc62e8e3 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -620,7 +620,7 @@ static void btrfs_dev_replace_update_device_in_mapping_tree(
em = lookup_extent_mapping(em_tree, start, (u64)-1);
if (!em)
break;
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++)
if (srcdev == map->stripes[i].dev)
map->stripes[i].dev = tgtdev;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1f21c6c33228..78722aaffecd 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -49,6 +49,7 @@
#include "raid56.h"
#include "sysfs.h"
#include "qgroup.h"
+#include "tree-checker.h"
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
@@ -522,72 +523,6 @@ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
return ret;
}
-#define CORRUPT(reason, eb, root, slot) \
- btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu," \
- "root=%llu, slot=%d", reason, \
- btrfs_header_bytenr(eb), root->objectid, slot)
-
-static noinline int check_leaf(struct btrfs_root *root,
- struct extent_buffer *leaf)
-{
- struct btrfs_key key;
- struct btrfs_key leaf_key;
- u32 nritems = btrfs_header_nritems(leaf);
- int slot;
-
- if (nritems == 0)
- return 0;
-
- /* Check the 0 item */
- if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
- BTRFS_LEAF_DATA_SIZE(root)) {
- CORRUPT("invalid item offset size pair", leaf, root, 0);
- return -EIO;
- }
-
- /*
- * Check to make sure each items keys are in the correct order and their
- * offsets make sense. We only have to loop through nritems-1 because
- * we check the current slot against the next slot, which verifies the
- * next slot's offset+size makes sense and that the current's slot
- * offset is correct.
- */
- for (slot = 0; slot < nritems - 1; slot++) {
- btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
- btrfs_item_key_to_cpu(leaf, &key, slot + 1);
-
- /* Make sure the keys are in the right order */
- if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
- CORRUPT("bad key order", leaf, root, slot);
- return -EIO;
- }
-
- /*
- * Make sure the offset and ends are right, remember that the
- * item data starts at the end of the leaf and grows towards the
- * front.
- */
- if (btrfs_item_offset_nr(leaf, slot) !=
- btrfs_item_end_nr(leaf, slot + 1)) {
- CORRUPT("slot offset bad", leaf, root, slot);
- return -EIO;
- }
-
- /*
- * Check to make sure that we don't point outside of the leaf,
- * just incase all the items are consistent to eachother, but
- * all point outside of the leaf.
- */
- if (btrfs_item_end_nr(leaf, slot) >
- BTRFS_LEAF_DATA_SIZE(root)) {
- CORRUPT("slot end outside of leaf", leaf, root, slot);
- return -EIO;
- }
- }
-
- return 0;
-}
-
static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
u64 phy_offset, struct page *page,
u64 start, u64 end, int mirror)
@@ -654,11 +589,14 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
* that we don't try and read the other copies of this block, just
* return -EIO.
*/
- if (found_level == 0 && check_leaf(root, eb)) {
+ if (found_level == 0 && btrfs_check_leaf_full(root, eb)) {
set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
ret = -EIO;
}
+ if (found_level > 0 && btrfs_check_node(root, eb))
+ ret = -EIO;
+
if (!ret)
set_extent_buffer_uptodate(eb);
err:
@@ -3958,7 +3896,13 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
buf->len,
root->fs_info->dirty_metadata_batch);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
+ /*
+ * Since btrfs_mark_buffer_dirty() can be called with item pointer set
+ * but item data not updated.
+ * So here we should only check item pointers, not item data.
+ */
+ if (btrfs_header_level(buf) == 0 &&
+ btrfs_check_leaf_relaxed(root, buf)) {
btrfs_print_leaf(root, buf);
ASSERT(0);
}
@@ -4167,6 +4111,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
spin_lock(&fs_info->ordered_root_lock);
}
spin_unlock(&fs_info->ordered_root_lock);
+
+ /*
+ * We need this here because if we've been flipped read-only we won't
+ * get sync() from the umount, so we need to make sure any ordered
+ * extents that haven't had their dirty pages IO start writeout yet
+ * actually get run and error out properly.
+ */
+ btrfs_wait_ordered_roots(fs_info, -1);
}
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 13ff0fdae03e..978bbfed5a2c 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2342,7 +2342,13 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
ins.type = BTRFS_EXTENT_ITEM_KEY;
}
- BUG_ON(node->ref_mod != 1);
+ if (node->ref_mod != 1) {
+ btrfs_err(root->fs_info,
+ "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
+ node->bytenr, node->ref_mod, node->action, ref_root,
+ parent);
+ return -EIO;
+ }
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
BUG_ON(!extent_op || !extent_op->update_flags);
ret = alloc_reserved_tree_block(trans, root,
@@ -9481,6 +9487,8 @@ static int find_first_block_group(struct btrfs_root *root,
int ret = 0;
struct btrfs_key found_key;
struct extent_buffer *leaf;
+ struct btrfs_block_group_item bg;
+ u64 flags;
int slot;
ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
@@ -9502,7 +9510,47 @@ static int find_first_block_group(struct btrfs_root *root,
if (found_key.objectid >= key->objectid &&
found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
- ret = 0;
+ struct extent_map_tree *em_tree;
+ struct extent_map *em;
+
+ em_tree = &root->fs_info->mapping_tree.map_tree;
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, found_key.objectid,
+ found_key.offset);
+ read_unlock(&em_tree->lock);
+ if (!em) {
+ btrfs_err(root->fs_info,
+ "logical %llu len %llu found bg but no related chunk",
+ found_key.objectid, found_key.offset);
+ ret = -ENOENT;
+ } else if (em->start != found_key.objectid ||
+ em->len != found_key.offset) {
+ btrfs_err(root->fs_info,
+ "block group %llu len %llu mismatch with chunk %llu len %llu",
+ found_key.objectid, found_key.offset,
+ em->start, em->len);
+ ret = -EUCLEAN;
+ } else {
+ read_extent_buffer(leaf, &bg,
+ btrfs_item_ptr_offset(leaf, slot),
+ sizeof(bg));
+ flags = btrfs_block_group_flags(&bg) &
+ BTRFS_BLOCK_GROUP_TYPE_MASK;
+
+ if (flags != (em->map_lookup->type &
+ BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+ btrfs_err(root->fs_info,
+"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
+ found_key.objectid,
+ found_key.offset, flags,
+ (BTRFS_BLOCK_GROUP_TYPE_MASK &
+ em->map_lookup->type));
+ ret = -EUCLEAN;
+ } else {
+ ret = 0;
+ }
+ }
+ free_extent_map(em);
goto out;
}
path->slots[0]++;
@@ -9717,6 +9765,62 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
return cache;
}
+
+/*
+ * Iterate all chunks and verify that each of them has the corresponding block
+ * group
+ */
+static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
+ struct extent_map *em;
+ struct btrfs_block_group_cache *bg;
+ u64 start = 0;
+ int ret = 0;
+
+ while (1) {
+ read_lock(&map_tree->map_tree.lock);
+ /*
+ * lookup_extent_mapping will return the first extent map
+ * intersecting the range, so setting @len to 1 is enough to
+ * get the first chunk.
+ */
+ em = lookup_extent_mapping(&map_tree->map_tree, start, 1);
+ read_unlock(&map_tree->map_tree.lock);
+ if (!em)
+ break;
+
+ bg = btrfs_lookup_block_group(fs_info, em->start);
+ if (!bg) {
+ btrfs_err(fs_info,
+ "chunk start=%llu len=%llu doesn't have corresponding block group",
+ em->start, em->len);
+ ret = -EUCLEAN;
+ free_extent_map(em);
+ break;
+ }
+ if (bg->key.objectid != em->start ||
+ bg->key.offset != em->len ||
+ (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
+ (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+ btrfs_err(fs_info,
+"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
+ em->start, em->len,
+ em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
+ bg->key.objectid, bg->key.offset,
+ bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
+ ret = -EUCLEAN;
+ free_extent_map(em);
+ btrfs_put_block_group(bg);
+ break;
+ }
+ start = em->start + em->len;
+ free_extent_map(em);
+ btrfs_put_block_group(bg);
+ }
+ return ret;
+}
+
int btrfs_read_block_groups(struct btrfs_root *root)
{
struct btrfs_path *path;
@@ -9903,7 +10007,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
}
init_global_block_rsv(info);
- ret = 0;
+ ret = check_chunk_block_group_mappings(info);
error:
btrfs_free_path(path);
return ret;
@@ -10388,7 +10492,7 @@ btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
* more device items and remove one chunk item), but this is done at
* btrfs_remove_chunk() through a call to check_system_chunk().
*/
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
num_items = 3 + map->num_stripes;
free_extent_map(em);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7de8d545f4d6..573c9d62cfc9 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3847,8 +3847,10 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
struct block_device *bdev = fs_info->fs_devices->latest_bdev;
struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
u64 offset = eb->start;
+ u32 nritems;
unsigned long i, num_pages;
unsigned long bio_flags = 0;
+ unsigned long start, end;
int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
int ret = 0;
@@ -3858,6 +3860,23 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
bio_flags = EXTENT_BIO_TREE_LOG;
+ /* set btree blocks beyond nritems with 0 to avoid stale content. */
+ nritems = btrfs_header_nritems(eb);
+ if (btrfs_header_level(eb) > 0) {
+ end = btrfs_node_key_ptr_offset(nritems);
+
+ memset_extent_buffer(eb, 0, end, eb->len - end);
+ } else {
+ /*
+ * leaf:
+ * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
+ */
+ start = btrfs_item_nr_offset(nritems);
+ end = btrfs_leaf_data(eb) +
+ leaf_data_end(fs_info->tree_root, eb);
+ memset_extent_buffer(eb, 0, start, end - start);
+ }
+
for (i = 0; i < num_pages; i++) {
struct page *p = eb->pages[i];
@@ -5351,9 +5370,8 @@ unlock_exit:
return ret;
}
-void read_extent_buffer(struct extent_buffer *eb, void *dstv,
- unsigned long start,
- unsigned long len)
+void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
+ unsigned long start, unsigned long len)
{
size_t cur;
size_t offset;
@@ -5382,9 +5400,9 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
}
}
-int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
- unsigned long start,
- unsigned long len)
+int read_extent_buffer_to_user(const struct extent_buffer *eb,
+ void __user *dstv,
+ unsigned long start, unsigned long len)
{
size_t cur;
size_t offset;
@@ -5419,10 +5437,10 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
return ret;
}
-int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
- unsigned long min_len, char **map,
- unsigned long *map_start,
- unsigned long *map_len)
+int map_private_extent_buffer(const struct extent_buffer *eb,
+ unsigned long start, unsigned long min_len,
+ char **map, unsigned long *map_start,
+ unsigned long *map_len)
{
size_t offset = start & (PAGE_CACHE_SIZE - 1);
char *kaddr;
@@ -5457,9 +5475,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
return 0;
}
-int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
- unsigned long start,
- unsigned long len)
+int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
+ unsigned long start, unsigned long len)
{
size_t cur;
size_t offset;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index f4c1ae11855f..751435967724 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -308,14 +308,13 @@ static inline void extent_buffer_get(struct extent_buffer *eb)
atomic_inc(&eb->refs);
}
-int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
- unsigned long start,
- unsigned long len);
-void read_extent_buffer(struct extent_buffer *eb, void *dst,
+int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
+ unsigned long start, unsigned long len);
+void read_extent_buffer(const struct extent_buffer *eb, void *dst,
unsigned long start,
unsigned long len);
-int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dst,
- unsigned long start,
+int read_extent_buffer_to_user(const struct extent_buffer *eb,
+ void __user *dst, unsigned long start,
unsigned long len);
void write_extent_buffer(struct extent_buffer *eb, const void *src,
unsigned long start, unsigned long len);
@@ -334,10 +333,10 @@ int set_extent_buffer_uptodate(struct extent_buffer *eb);
int clear_extent_buffer_uptodate(struct extent_buffer *eb);
int extent_buffer_uptodate(struct extent_buffer *eb);
int extent_buffer_under_io(struct extent_buffer *eb);
-int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
- unsigned long min_len, char **map,
- unsigned long *map_start,
- unsigned long *map_len);
+int map_private_extent_buffer(const struct extent_buffer *eb,
+ unsigned long offset, unsigned long min_len,
+ char **map, unsigned long *map_start,
+ unsigned long *map_len);
int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 6a98bddd8f33..84fb56d5c018 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -76,7 +76,7 @@ void free_extent_map(struct extent_map *em)
WARN_ON(extent_map_in_tree(em));
WARN_ON(!list_empty(&em->list));
if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
- kfree(em->bdev);
+ kfree(em->map_lookup);
kmem_cache_free(extent_map_cache, em);
}
}
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index b2991fd8583e..eb8b8fae036b 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -32,7 +32,15 @@ struct extent_map {
u64 block_len;
u64 generation;
unsigned long flags;
- struct block_device *bdev;
+ union {
+ struct block_device *bdev;
+
+ /*
+ * used for chunk mappings
+ * flags & EXTENT_FLAG_FS_MAPPING must be set
+ */
+ struct map_lookup *map_lookup;
+ };
atomic_t refs;
unsigned int compress_type;
struct list_head list;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 6dca9f937bf6..cc9ccc42f469 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3460,7 +3460,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
return ret;
}
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
if (em->start != chunk_offset)
goto out;
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index b976597b0721..63ffd213b0b7 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -50,8 +50,8 @@ static inline void put_unaligned_le8(u8 val, void *p)
*/
#define DEFINE_BTRFS_SETGET_BITS(bits) \
-u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
- unsigned long off, \
+u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \
+ const void *ptr, unsigned long off, \
struct btrfs_map_token *token) \
{ \
unsigned long part_offset = (unsigned long)ptr; \
@@ -90,7 +90,8 @@ u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
return res; \
} \
void btrfs_set_token_##bits(struct extent_buffer *eb, \
- void *ptr, unsigned long off, u##bits val, \
+ const void *ptr, unsigned long off, \
+ u##bits val, \
struct btrfs_map_token *token) \
{ \
unsigned long part_offset = (unsigned long)ptr; \
@@ -133,7 +134,7 @@ DEFINE_BTRFS_SETGET_BITS(16)
DEFINE_BTRFS_SETGET_BITS(32)
DEFINE_BTRFS_SETGET_BITS(64)
-void btrfs_node_key(struct extent_buffer *eb,
+void btrfs_node_key(const struct extent_buffer *eb,
struct btrfs_disk_key *disk_key, int nr)
{
unsigned long ptr = btrfs_node_key_ptr_offset(nr);
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
new file mode 100644
index 000000000000..5b98f3c76ce4
--- /dev/null
+++ b/fs/btrfs/tree-checker.c
@@ -0,0 +1,649 @@
+/*
+ * Copyright (C) Qu Wenruo 2017. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program.
+ */
+
+/*
+ * The module is used to catch unexpected/corrupted tree block data.
+ * Such behavior can be caused either by a fuzzed image or bugs.
+ *
+ * The objective is to do leaf/node validation checks when tree block is read
+ * from disk, and check *every* possible member, so other code won't
+ * need to checking them again.
+ *
+ * Due to the potential and unwanted damage, every checker needs to be
+ * carefully reviewed otherwise so it does not prevent mount of valid images.
+ */
+
+#include "ctree.h"
+#include "tree-checker.h"
+#include "disk-io.h"
+#include "compression.h"
+#include "hash.h"
+#include "volumes.h"
+
+#define CORRUPT(reason, eb, root, slot) \
+ btrfs_crit(root->fs_info, \
+ "corrupt %s, %s: block=%llu, root=%llu, slot=%d", \
+ btrfs_header_level(eb) == 0 ? "leaf" : "node", \
+ reason, btrfs_header_bytenr(eb), root->objectid, slot)
+
+/*
+ * Error message should follow the following format:
+ * corrupt <type>: <identifier>, <reason>[, <bad_value>]
+ *
+ * @type: leaf or node
+ * @identifier: the necessary info to locate the leaf/node.
+ * It's recommened to decode key.objecitd/offset if it's
+ * meaningful.
+ * @reason: describe the error
+ * @bad_value: optional, it's recommened to output bad value and its
+ * expected value (range).
+ *
+ * Since comma is used to separate the components, only space is allowed
+ * inside each component.
+ */
+
+/*
+ * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt.
+ * Allows callers to customize the output.
+ */
+__printf(4, 5)
+static void generic_err(const struct btrfs_root *root,
+ const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(root->fs_info,
+ "corrupt %s: root=%llu block=%llu slot=%d, %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node",
+ root->objectid, btrfs_header_bytenr(eb), slot, &vaf);
+ va_end(args);
+}
+
+static int check_extent_data_item(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ struct btrfs_file_extent_item *fi;
+ u32 sectorsize = root->sectorsize;
+ u32 item_size = btrfs_item_size_nr(leaf, slot);
+
+ if (!IS_ALIGNED(key->offset, sectorsize)) {
+ CORRUPT("unaligned key offset for file extent",
+ leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) {
+ CORRUPT("invalid file extent type", leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ /*
+ * Support for new compression/encrption must introduce incompat flag,
+ * and must be caught in open_ctree().
+ */
+ if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) {
+ CORRUPT("invalid file extent compression", leaf, root, slot);
+ return -EUCLEAN;
+ }
+ if (btrfs_file_extent_encryption(leaf, fi)) {
+ CORRUPT("invalid file extent encryption", leaf, root, slot);
+ return -EUCLEAN;
+ }
+ if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
+ /* Inline extent must have 0 as key offset */
+ if (key->offset) {
+ CORRUPT("inline extent has non-zero key offset",
+ leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ /* Compressed inline extent has no on-disk size, skip it */
+ if (btrfs_file_extent_compression(leaf, fi) !=
+ BTRFS_COMPRESS_NONE)
+ return 0;
+
+ /* Uncompressed inline extent size must match item size */
+ if (item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START +
+ btrfs_file_extent_ram_bytes(leaf, fi)) {
+ CORRUPT("plaintext inline extent has invalid size",
+ leaf, root, slot);
+ return -EUCLEAN;
+ }
+ return 0;
+ }
+
+ /* Regular or preallocated extent has fixed item size */
+ if (item_size != sizeof(*fi)) {
+ CORRUPT(
+ "regluar or preallocated extent data item size is invalid",
+ leaf, root, slot);
+ return -EUCLEAN;
+ }
+ if (!IS_ALIGNED(btrfs_file_extent_ram_bytes(leaf, fi), sectorsize) ||
+ !IS_ALIGNED(btrfs_file_extent_disk_bytenr(leaf, fi), sectorsize) ||
+ !IS_ALIGNED(btrfs_file_extent_disk_num_bytes(leaf, fi), sectorsize) ||
+ !IS_ALIGNED(btrfs_file_extent_offset(leaf, fi), sectorsize) ||
+ !IS_ALIGNED(btrfs_file_extent_num_bytes(leaf, fi), sectorsize)) {
+ CORRUPT(
+ "regular or preallocated extent data item has unaligned value",
+ leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ return 0;
+}
+
+static int check_csum_item(struct btrfs_root *root, struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ u32 sectorsize = root->sectorsize;
+ u32 csumsize = btrfs_super_csum_size(root->fs_info->super_copy);
+
+ if (key->objectid != BTRFS_EXTENT_CSUM_OBJECTID) {
+ CORRUPT("invalid objectid for csum item", leaf, root, slot);
+ return -EUCLEAN;
+ }
+ if (!IS_ALIGNED(key->offset, sectorsize)) {
+ CORRUPT("unaligned key offset for csum item", leaf, root, slot);
+ return -EUCLEAN;
+ }
+ if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) {
+ CORRUPT("unaligned csum item size", leaf, root, slot);
+ return -EUCLEAN;
+ }
+ return 0;
+}
+
+/*
+ * Customized reported for dir_item, only important new info is key->objectid,
+ * which represents inode number
+ */
+__printf(4, 5)
+static void dir_item_err(const struct btrfs_root *root,
+ const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ struct btrfs_key key;
+ struct va_format vaf;
+ va_list args;
+
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(root->fs_info,
+ "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node", root->objectid,
+ btrfs_header_bytenr(eb), slot, key.objectid, &vaf);
+ va_end(args);
+}
+
+static int check_dir_item(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ struct btrfs_dir_item *di;
+ u32 item_size = btrfs_item_size_nr(leaf, slot);
+ u32 cur = 0;
+
+ di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
+ while (cur < item_size) {
+ u32 name_len;
+ u32 data_len;
+ u32 max_name_len;
+ u32 total_size;
+ u32 name_hash;
+ u8 dir_type;
+
+ /* header itself should not cross item boundary */
+ if (cur + sizeof(*di) > item_size) {
+ dir_item_err(root, leaf, slot,
+ "dir item header crosses item boundary, have %zu boundary %u",
+ cur + sizeof(*di), item_size);
+ return -EUCLEAN;
+ }
+
+ /* dir type check */
+ dir_type = btrfs_dir_type(leaf, di);
+ if (dir_type >= BTRFS_FT_MAX) {
+ dir_item_err(root, leaf, slot,
+ "invalid dir item type, have %u expect [0, %u)",
+ dir_type, BTRFS_FT_MAX);
+ return -EUCLEAN;
+ }
+
+ if (key->type == BTRFS_XATTR_ITEM_KEY &&
+ dir_type != BTRFS_FT_XATTR) {
+ dir_item_err(root, leaf, slot,
+ "invalid dir item type for XATTR key, have %u expect %u",
+ dir_type, BTRFS_FT_XATTR);
+ return -EUCLEAN;
+ }
+ if (dir_type == BTRFS_FT_XATTR &&
+ key->type != BTRFS_XATTR_ITEM_KEY) {
+ dir_item_err(root, leaf, slot,
+ "xattr dir type found for non-XATTR key");
+ return -EUCLEAN;
+ }
+ if (dir_type == BTRFS_FT_XATTR)
+ max_name_len = XATTR_NAME_MAX;
+ else
+ max_name_len = BTRFS_NAME_LEN;
+
+ /* Name/data length check */
+ name_len = btrfs_dir_name_len(leaf, di);
+ data_len = btrfs_dir_data_len(leaf, di);
+ if (name_len > max_name_len) {
+ dir_item_err(root, leaf, slot,
+ "dir item name len too long, have %u max %u",
+ name_len, max_name_len);
+ return -EUCLEAN;
+ }
+ if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root)) {
+ dir_item_err(root, leaf, slot,
+ "dir item name and data len too long, have %u max %zu",
+ name_len + data_len,
+ BTRFS_MAX_XATTR_SIZE(root));
+ return -EUCLEAN;
+ }
+
+ if (data_len && dir_type != BTRFS_FT_XATTR) {
+ dir_item_err(root, leaf, slot,
+ "dir item with invalid data len, have %u expect 0",
+ data_len);
+ return -EUCLEAN;
+ }
+
+ total_size = sizeof(*di) + name_len + data_len;
+
+ /* header and name/data should not cross item boundary */
+ if (cur + total_size > item_size) {
+ dir_item_err(root, leaf, slot,
+ "dir item data crosses item boundary, have %u boundary %u",
+ cur + total_size, item_size);
+ return -EUCLEAN;
+ }
+
+ /*
+ * Special check for XATTR/DIR_ITEM, as key->offset is name
+ * hash, should match its name
+ */
+ if (key->type == BTRFS_DIR_ITEM_KEY ||
+ key->type == BTRFS_XATTR_ITEM_KEY) {
+ char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
+
+ read_extent_buffer(leaf, namebuf,
+ (unsigned long)(di + 1), name_len);
+ name_hash = btrfs_name_hash(namebuf, name_len);
+ if (key->offset != name_hash) {
+ dir_item_err(root, leaf, slot,
+ "name hash mismatch with key, have 0x%016x expect 0x%016llx",
+ name_hash, key->offset);
+ return -EUCLEAN;
+ }
+ }
+ cur += total_size;
+ di = (struct btrfs_dir_item *)((void *)di + total_size);
+ }
+ return 0;
+}
+
+__printf(4, 5)
+__cold
+static void block_group_err(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ struct btrfs_key key;
+ struct va_format vaf;
+ va_list args;
+
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(fs_info,
+ "corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node",
+ btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
+ key.objectid, key.offset, &vaf);
+ va_end(args);
+}
+
+static int check_block_group_item(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ struct btrfs_block_group_item bgi;
+ u32 item_size = btrfs_item_size_nr(leaf, slot);
+ u64 flags;
+ u64 type;
+
+ /*
+ * Here we don't really care about alignment since extent allocator can
+ * handle it. We care more about the size, as if one block group is
+ * larger than maximum size, it's must be some obvious corruption.
+ */
+ if (key->offset > BTRFS_MAX_DATA_CHUNK_SIZE || key->offset == 0) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid block group size, have %llu expect (0, %llu]",
+ key->offset, BTRFS_MAX_DATA_CHUNK_SIZE);
+ return -EUCLEAN;
+ }
+
+ if (item_size != sizeof(bgi)) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid item size, have %u expect %zu",
+ item_size, sizeof(bgi));
+ return -EUCLEAN;
+ }
+
+ read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
+ sizeof(bgi));
+ if (btrfs_block_group_chunk_objectid(&bgi) !=
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid block group chunk objectid, have %llu expect %llu",
+ btrfs_block_group_chunk_objectid(&bgi),
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+ return -EUCLEAN;
+ }
+
+ if (btrfs_block_group_used(&bgi) > key->offset) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid block group used, have %llu expect [0, %llu)",
+ btrfs_block_group_used(&bgi), key->offset);
+ return -EUCLEAN;
+ }
+
+ flags = btrfs_block_group_flags(&bgi);
+ if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
+ block_group_err(fs_info, leaf, slot,
+"invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
+ flags & BTRFS_BLOCK_GROUP_PROFILE_MASK,
+ hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK));
+ return -EUCLEAN;
+ }
+
+ type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
+ if (type != BTRFS_BLOCK_GROUP_DATA &&
+ type != BTRFS_BLOCK_GROUP_METADATA &&
+ type != BTRFS_BLOCK_GROUP_SYSTEM &&
+ type != (BTRFS_BLOCK_GROUP_METADATA |
+ BTRFS_BLOCK_GROUP_DATA)) {
+ block_group_err(fs_info, leaf, slot,
+"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
+ type, hweight64(type),
+ BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
+ BTRFS_BLOCK_GROUP_SYSTEM,
+ BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA);
+ return -EUCLEAN;
+ }
+ return 0;
+}
+
+/*
+ * Common point to switch the item-specific validation.
+ */
+static int check_leaf_item(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ int ret = 0;
+
+ switch (key->type) {
+ case BTRFS_EXTENT_DATA_KEY:
+ ret = check_extent_data_item(root, leaf, key, slot);
+ break;
+ case BTRFS_EXTENT_CSUM_KEY:
+ ret = check_csum_item(root, leaf, key, slot);
+ break;
+ case BTRFS_DIR_ITEM_KEY:
+ case BTRFS_DIR_INDEX_KEY:
+ case BTRFS_XATTR_ITEM_KEY:
+ ret = check_dir_item(root, leaf, key, slot);
+ break;
+ case BTRFS_BLOCK_GROUP_ITEM_KEY:
+ ret = check_block_group_item(root->fs_info, leaf, key, slot);
+ break;
+ }
+ return ret;
+}
+
+static int check_leaf(struct btrfs_root *root, struct extent_buffer *leaf,
+ bool check_item_data)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ /* No valid key type is 0, so all key should be larger than this key */
+ struct btrfs_key prev_key = {0, 0, 0};
+ struct btrfs_key key;
+ u32 nritems = btrfs_header_nritems(leaf);
+ int slot;
+
+ if (btrfs_header_level(leaf) != 0) {
+ generic_err(root, leaf, 0,
+ "invalid level for leaf, have %d expect 0",
+ btrfs_header_level(leaf));
+ return -EUCLEAN;
+ }
+
+ /*
+ * Extent buffers from a relocation tree have a owner field that
+ * corresponds to the subvolume tree they are based on. So just from an
+ * extent buffer alone we can not find out what is the id of the
+ * corresponding subvolume tree, so we can not figure out if the extent
+ * buffer corresponds to the root of the relocation tree or not. So
+ * skip this check for relocation trees.
+ */
+ if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
+ u64 owner = btrfs_header_owner(leaf);
+ struct btrfs_root *check_root;
+
+ /* These trees must never be empty */
+ if (owner == BTRFS_ROOT_TREE_OBJECTID ||
+ owner == BTRFS_CHUNK_TREE_OBJECTID ||
+ owner == BTRFS_EXTENT_TREE_OBJECTID ||
+ owner == BTRFS_DEV_TREE_OBJECTID ||
+ owner == BTRFS_FS_TREE_OBJECTID ||
+ owner == BTRFS_DATA_RELOC_TREE_OBJECTID) {
+ generic_err(root, leaf, 0,
+ "invalid root, root %llu must never be empty",
+ owner);
+ return -EUCLEAN;
+ }
+ key.objectid = owner;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
+
+ check_root = btrfs_get_fs_root(fs_info, &key, false);
+ /*
+ * The only reason we also check NULL here is that during
+ * open_ctree() some roots has not yet been set up.
+ */
+ if (!IS_ERR_OR_NULL(check_root)) {
+ struct extent_buffer *eb;
+
+ eb = btrfs_root_node(check_root);
+ /* if leaf is the root, then it's fine */
+ if (leaf != eb) {
+ CORRUPT("non-root leaf's nritems is 0",
+ leaf, check_root, 0);
+ free_extent_buffer(eb);
+ return -EUCLEAN;
+ }
+ free_extent_buffer(eb);
+ }
+ return 0;
+ }
+
+ if (nritems == 0)
+ return 0;
+
+ /*
+ * Check the following things to make sure this is a good leaf, and
+ * leaf users won't need to bother with similar sanity checks:
+ *
+ * 1) key ordering
+ * 2) item offset and size
+ * No overlap, no hole, all inside the leaf.
+ * 3) item content
+ * If possible, do comprehensive sanity check.
+ * NOTE: All checks must only rely on the item data itself.
+ */
+ for (slot = 0; slot < nritems; slot++) {
+ u32 item_end_expected;
+ int ret;
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+
+ /* Make sure the keys are in the right order */
+ if (btrfs_comp_cpu_keys(&prev_key, &key) >= 0) {
+ CORRUPT("bad key order", leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ /*
+ * Make sure the offset and ends are right, remember that the
+ * item data starts at the end of the leaf and grows towards the
+ * front.
+ */
+ if (slot == 0)
+ item_end_expected = BTRFS_LEAF_DATA_SIZE(root);
+ else
+ item_end_expected = btrfs_item_offset_nr(leaf,
+ slot - 1);
+ if (btrfs_item_end_nr(leaf, slot) != item_end_expected) {
+ CORRUPT("slot offset bad", leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ /*
+ * Check to make sure that we don't point outside of the leaf,
+ * just in case all the items are consistent to each other, but
+ * all point outside of the leaf.
+ */
+ if (btrfs_item_end_nr(leaf, slot) >
+ BTRFS_LEAF_DATA_SIZE(root)) {
+ CORRUPT("slot end outside of leaf", leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ /* Also check if the item pointer overlaps with btrfs item. */
+ if (btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item) >
+ btrfs_item_ptr_offset(leaf, slot)) {
+ CORRUPT("slot overlap with its data", leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ if (check_item_data) {
+ /*
+ * Check if the item size and content meet other
+ * criteria
+ */
+ ret = check_leaf_item(root, leaf, &key, slot);
+ if (ret < 0)
+ return ret;
+ }
+
+ prev_key.objectid = key.objectid;
+ prev_key.type = key.type;
+ prev_key.offset = key.offset;
+ }
+
+ return 0;
+}
+
+int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf)
+{
+ return check_leaf(root, leaf, true);
+}
+
+int btrfs_check_leaf_relaxed(struct btrfs_root *root,
+ struct extent_buffer *leaf)
+{
+ return check_leaf(root, leaf, false);
+}
+
+int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node)
+{
+ unsigned long nr = btrfs_header_nritems(node);
+ struct btrfs_key key, next_key;
+ int slot;
+ int level = btrfs_header_level(node);
+ u64 bytenr;
+ int ret = 0;
+
+ if (level <= 0 || level >= BTRFS_MAX_LEVEL) {
+ generic_err(root, node, 0,
+ "invalid level for node, have %d expect [1, %d]",
+ level, BTRFS_MAX_LEVEL - 1);
+ return -EUCLEAN;
+ }
+ if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
+ btrfs_crit(root->fs_info,
+"corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%zu]",
+ root->objectid, node->start,
+ nr == 0 ? "small" : "large", nr,
+ BTRFS_NODEPTRS_PER_BLOCK(root));
+ return -EUCLEAN;
+ }
+
+ for (slot = 0; slot < nr - 1; slot++) {
+ bytenr = btrfs_node_blockptr(node, slot);
+ btrfs_node_key_to_cpu(node, &key, slot);
+ btrfs_node_key_to_cpu(node, &next_key, slot + 1);
+
+ if (!bytenr) {
+ generic_err(root, node, slot,
+ "invalid NULL node pointer");
+ ret = -EUCLEAN;
+ goto out;
+ }
+ if (!IS_ALIGNED(bytenr, root->sectorsize)) {
+ generic_err(root, node, slot,
+ "unaligned pointer, have %llu should be aligned to %u",
+ bytenr, root->sectorsize);
+ ret = -EUCLEAN;
+ goto out;
+ }
+
+ if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
+ generic_err(root, node, slot,
+ "bad key order, current (%llu %u %llu) next (%llu %u %llu)",
+ key.objectid, key.type, key.offset,
+ next_key.objectid, next_key.type,
+ next_key.offset);
+ ret = -EUCLEAN;
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
diff --git a/fs/btrfs/tree-checker.h b/fs/btrfs/tree-checker.h
new file mode 100644
index 000000000000..3d53e8d6fda0
--- /dev/null
+++ b/fs/btrfs/tree-checker.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) Qu Wenruo 2017. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program.
+ */
+
+#ifndef __BTRFS_TREE_CHECKER__
+#define __BTRFS_TREE_CHECKER__
+
+#include "ctree.h"
+#include "extent_io.h"
+
+/*
+ * Comprehensive leaf checker.
+ * Will check not only the item pointers, but also every possible member
+ * in item data.
+ */
+int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf);
+
+/*
+ * Less strict leaf checker.
+ * Will only check item pointers, not reading item data.
+ */
+int btrfs_check_leaf_relaxed(struct btrfs_root *root,
+ struct extent_buffer *leaf);
+int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node);
+
+#endif
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b4d63a9842fa..5e8fe8f3942d 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1184,7 +1184,7 @@ again:
struct map_lookup *map;
int i;
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) {
u64 end;
@@ -2757,7 +2757,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
free_extent_map(em);
return -EINVAL;
}
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
lock_chunks(root->fs_info->chunk_root);
check_system_chunk(trans, extent_root, map->type);
unlock_chunks(root->fs_info->chunk_root);
@@ -4540,7 +4540,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
if (type & BTRFS_BLOCK_GROUP_DATA) {
max_stripe_size = 1024 * 1024 * 1024;
- max_chunk_size = 10 * max_stripe_size;
+ max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
if (!devs_max)
devs_max = BTRFS_MAX_DEVS(info->chunk_root);
} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
@@ -4731,7 +4731,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
goto error;
}
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
- em->bdev = (struct block_device *)map;
+ em->map_lookup = map;
em->start = start;
em->len = num_bytes;
em->block_start = 0;
@@ -4826,7 +4826,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
return -EINVAL;
}
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
item_size = btrfs_chunk_item_size(map->num_stripes);
stripe_size = em->orig_block_len;
@@ -4968,7 +4968,7 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
if (!em)
return 1;
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) {
if (map->stripes[i].dev->missing) {
miss_ndevs++;
@@ -5048,7 +5048,7 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
return 1;
}
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
ret = map->num_stripes;
else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
@@ -5091,7 +5091,7 @@ unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
BUG_ON(!em);
BUG_ON(em->start > logical || em->start + em->len < logical);
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
len = map->stripe_len * nr_data_stripes(map);
free_extent_map(em);
@@ -5112,7 +5112,7 @@ int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
BUG_ON(!em);
BUG_ON(em->start > logical || em->start + em->len < logical);
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
ret = 1;
free_extent_map(em);
@@ -5271,7 +5271,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
return -EINVAL;
}
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
offset = logical - em->start;
stripe_len = map->stripe_len;
@@ -5813,7 +5813,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
free_extent_map(em);
return -EIO;
}
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
length = em->len;
rmap_len = map->stripe_len;
@@ -6208,6 +6208,101 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
return dev;
}
+/* Return -EIO if any error, otherwise return 0. */
+static int btrfs_check_chunk_valid(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_chunk *chunk, u64 logical)
+{
+ u64 length;
+ u64 stripe_len;
+ u16 num_stripes;
+ u16 sub_stripes;
+ u64 type;
+ u64 features;
+ bool mixed = false;
+
+ length = btrfs_chunk_length(leaf, chunk);
+ stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+ num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+ sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+ type = btrfs_chunk_type(leaf, chunk);
+
+ if (!num_stripes) {
+ btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
+ num_stripes);
+ return -EIO;
+ }
+ if (!IS_ALIGNED(logical, root->sectorsize)) {
+ btrfs_err(root->fs_info,
+ "invalid chunk logical %llu", logical);
+ return -EIO;
+ }
+ if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) {
+ btrfs_err(root->fs_info, "invalid chunk sectorsize %u",
+ btrfs_chunk_sector_size(leaf, chunk));
+ return -EIO;
+ }
+ if (!length || !IS_ALIGNED(length, root->sectorsize)) {
+ btrfs_err(root->fs_info,
+ "invalid chunk length %llu", length);
+ return -EIO;
+ }
+ if (!is_power_of_2(stripe_len)) {
+ btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
+ stripe_len);
+ return -EIO;
+ }
+ if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
+ type) {
+ btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
+ ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
+ BTRFS_BLOCK_GROUP_PROFILE_MASK) &
+ btrfs_chunk_type(leaf, chunk));
+ return -EIO;
+ }
+
+ if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
+ btrfs_err(root->fs_info, "missing chunk type flag: 0x%llx", type);
+ return -EIO;
+ }
+
+ if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
+ (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
+ btrfs_err(root->fs_info,
+ "system chunk with data or metadata type: 0x%llx", type);
+ return -EIO;
+ }
+
+ features = btrfs_super_incompat_flags(root->fs_info->super_copy);
+ if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
+ mixed = true;
+
+ if (!mixed) {
+ if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
+ (type & BTRFS_BLOCK_GROUP_DATA)) {
+ btrfs_err(root->fs_info,
+ "mixed chunk type in non-mixed mode: 0x%llx", type);
+ return -EIO;
+ }
+ }
+
+ if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
+ (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
+ (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
+ (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
+ (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
+ ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
+ num_stripes != 1)) {
+ btrfs_err(root->fs_info,
+ "invalid num_stripes:sub_stripes %u:%u for profile %llu",
+ num_stripes, sub_stripes,
+ type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk)
@@ -6217,6 +6312,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
struct extent_map *em;
u64 logical;
u64 length;
+ u64 stripe_len;
u64 devid;
u8 uuid[BTRFS_UUID_SIZE];
int num_stripes;
@@ -6225,6 +6321,12 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
logical = key->offset;
length = btrfs_chunk_length(leaf, chunk);
+ stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+ num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+
+ ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
+ if (ret)
+ return ret;
read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
@@ -6241,7 +6343,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
em = alloc_extent_map();
if (!em)
return -ENOMEM;
- num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
if (!map) {
free_extent_map(em);
@@ -6249,7 +6350,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
}
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
- em->bdev = (struct block_device *)map;
+ em->map_lookup = map;
em->start = logical;
em->len = length;
em->orig_start = 0;
@@ -6473,6 +6574,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
u32 array_size;
u32 len = 0;
u32 cur_offset;
+ u64 type;
struct btrfs_key key;
ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
@@ -6539,6 +6641,15 @@ int btrfs_read_sys_array(struct btrfs_root *root)
break;
}
+ type = btrfs_chunk_type(sb, chunk);
+ if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
+ btrfs_err(root->fs_info,
+ "invalid chunk type %llu in sys_array at offset %u",
+ type, cur_offset);
+ ret = -EIO;
+ break;
+ }
+
len = btrfs_chunk_item_size(num_stripes);
if (cur_offset + len > array_size)
goto out_short_read;
@@ -6948,7 +7059,7 @@ void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
/* In order to kick the device replace finish process */
lock_chunks(root);
list_for_each_entry(em, &transaction->pending_chunks, list) {
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) {
dev = map->stripes[i].dev;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index d5c84f6b1353..3c651df420be 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -24,6 +24,8 @@
#include <linux/btrfs.h>
#include "async-thread.h"
+#define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
+
extern struct mutex uuid_mutex;
#define BTRFS_STRIPE_LEN (64 * 1024)
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 0e3de1bb6500..e7b54514d99a 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -3243,7 +3243,6 @@ retry:
tcap->cap_id = t_cap_id;
tcap->seq = t_seq - 1;
tcap->issue_seq = t_seq - 1;
- tcap->mseq = t_mseq;
tcap->issued |= issued;
tcap->implemented |= issued;
if (cap == ci->i_auth_cap)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 79a1bad88931..7b00727e1c11 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1073,10 +1073,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
- * and check it for zero before using.
+ * and check it before using.
*/
max_buf = tcon->ses->server->maxBuf;
- if (!max_buf) {
+ if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
free_xid(xid);
return -EINVAL;
}
@@ -1404,10 +1404,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
- * and check it for zero before using.
+ * and check it before using.
*/
max_buf = tcon->ses->server->maxBuf;
- if (!max_buf)
+ if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
return -EINVAL;
max_num = (max_buf - sizeof(struct smb_hdr)) /
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index b2aff0c6f22c..b7885dc0d9bb 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -123,10 +123,10 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
- * and check it for zero before using.
+ * and check it before using.
*/
max_buf = tcon->ses->server->maxBuf;
- if (!max_buf)
+ if (max_buf < sizeof(struct smb2_lock_element))
return -EINVAL;
max_num = max_buf / sizeof(struct smb2_lock_element);
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 8257a5a97cc0..98c25b969ab8 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -377,8 +377,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
{STATUS_NONEXISTENT_EA_ENTRY, -EIO, "STATUS_NONEXISTENT_EA_ENTRY"},
{STATUS_NO_EAS_ON_FILE, -ENODATA, "STATUS_NO_EAS_ON_FILE"},
{STATUS_EA_CORRUPT_ERROR, -EIO, "STATUS_EA_CORRUPT_ERROR"},
- {STATUS_FILE_LOCK_CONFLICT, -EIO, "STATUS_FILE_LOCK_CONFLICT"},
- {STATUS_LOCK_NOT_GRANTED, -EIO, "STATUS_LOCK_NOT_GRANTED"},
+ {STATUS_FILE_LOCK_CONFLICT, -EACCES, "STATUS_FILE_LOCK_CONFLICT"},
+ {STATUS_LOCK_NOT_GRANTED, -EACCES, "STATUS_LOCK_NOT_GRANTED"},
{STATUS_DELETE_PENDING, -ENOENT, "STATUS_DELETE_PENDING"},
{STATUS_CTL_FILE_NOT_SUPPORTED, -ENOSYS,
"STATUS_CTL_FILE_NOT_SUPPORTED"},
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 54af10204e83..1cf0a336ec06 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -360,7 +360,7 @@ uncork:
if (rc < 0 && rc != -EINTR)
cifs_dbg(VFS, "Error %d sending data on socket to server\n",
rc);
- else
+ else if (rc > 0)
rc = 0;
return rc;
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 35502d4046f5..3a7f401e943c 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1210,6 +1210,7 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
if (rv < 0) {
log_error(ls, "create_lkb idr error %d", rv);
+ dlm_free_lkb(lkb);
return rv;
}
@@ -4177,6 +4178,7 @@ static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
(unsigned long long)lkb->lkb_recover_seq,
ms->m_header.h_nodeid, ms->m_lkid);
error = -ENOENT;
+ dlm_put_lkb(lkb);
goto fail;
}
@@ -4230,6 +4232,7 @@ static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
lkb->lkb_id, lkb->lkb_remid,
ms->m_header.h_nodeid, ms->m_lkid);
error = -ENOENT;
+ dlm_put_lkb(lkb);
goto fail;
}
@@ -5792,20 +5795,20 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
goto out;
}
}
-
- /* After ua is attached to lkb it will be freed by dlm_free_lkb().
- When DLM_IFL_USER is set, the dlm knows that this is a userspace
- lock and that lkb_astparam is the dlm_user_args structure. */
-
error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
fake_astfn, ua, fake_bastfn, &args);
- lkb->lkb_flags |= DLM_IFL_USER;
-
if (error) {
+ kfree(ua->lksb.sb_lvbptr);
+ ua->lksb.sb_lvbptr = NULL;
+ kfree(ua);
__put_lkb(ls, lkb);
goto out;
}
+ /* After ua is attached to lkb it will be freed by dlm_free_lkb().
+ When DLM_IFL_USER is set, the dlm knows that this is a userspace
+ lock and that lkb_astparam is the dlm_user_args structure. */
+ lkb->lkb_flags |= DLM_IFL_USER;
error = request_lock(ls, lkb, name, namelen, &args);
switch (error) {
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index f3e72787e7f9..30e4e01db35a 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -673,11 +673,11 @@ static int new_lockspace(const char *name, const char *cluster,
kfree(ls->ls_recover_buf);
out_lkbidr:
idr_destroy(&ls->ls_lkbidr);
+ out_rsbtbl:
for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
if (ls->ls_remove_names[i])
kfree(ls->ls_remove_names[i]);
}
- out_rsbtbl:
vfree(ls->ls_rsbtbl);
out_lsfree:
if (do_unreg)
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index cab694540930..ec506c2733ee 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -715,8 +715,11 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
if (!PageUptodate(page)) {
ret = ext4_read_inline_page(inode, page);
- if (ret < 0)
+ if (ret < 0) {
+ unlock_page(page);
+ put_page(page);
goto out_up_read;
+ }
}
ret = 1;
@@ -1870,12 +1873,12 @@ int ext4_inline_data_fiemap(struct inode *inode,
physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
physical += offsetof(struct ext4_inode, i_block);
- if (physical)
- error = fiemap_fill_next_extent(fieinfo, start, physical,
- inline_len, flags);
brelse(iloc.bh);
out:
up_read(&EXT4_I(inode)->xattr_sem);
+ if (physical)
+ error = fiemap_fill_next_extent(fieinfo, start, physical,
+ inline_len, flags);
return (error < 0 ? error : 0);
}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index bad13f049fb0..2fc1564f62dd 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1600,7 +1600,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
}
if (reserved_gdb || gdb_off == 0) {
- if (ext4_has_feature_resize_inode(sb) ||
+ if (!ext4_has_feature_resize_inode(sb) ||
!le16_to_cpu(es->s_reserved_gdt_blocks)) {
ext4_warning(sb,
"No reserved GDT blocks, can't resize");
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2cd426a4161d..0ec3689a8990 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1048,6 +1048,16 @@ static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
ext4_nfs_get_inode);
}
+static int ext4_nfs_commit_metadata(struct inode *inode)
+{
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL
+ };
+
+ trace_ext4_nfs_commit_metadata(inode);
+ return ext4_write_inode(inode, &wbc);
+}
+
/*
* Try to release metadata pages (indirect blocks, directories) which are
* mapped via the block device. Since these pages could have journal heads
@@ -1142,6 +1152,7 @@ static const struct export_operations ext4_export_ops = {
.fh_to_dentry = ext4_fh_to_dentry,
.fh_to_parent = ext4_fh_to_parent,
.get_parent = ext4_get_parent,
+ .commit_metadata = ext4_nfs_commit_metadata,
};
enum {
@@ -5187,9 +5198,9 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
qf_inode->i_flags |= S_NOQUOTA;
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
err = dquot_enable(qf_inode, type, format_id, flags);
- iput(qf_inode);
if (err)
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
+ iput(qf_inode);
return err;
}
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index f82f916e1f2c..7ebf7b958f9e 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -160,7 +160,7 @@ static void *f2fs_acl_to_disk(struct f2fs_sb_info *sbi,
return (void *)f2fs_acl;
fail:
- kfree(f2fs_acl);
+ kvfree(f2fs_acl);
return ERR_PTR(-EINVAL);
}
@@ -190,7 +190,7 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type,
acl = NULL;
else
acl = ERR_PTR(retval);
- kfree(value);
+ kvfree(value);
if (!IS_ERR(acl))
set_cached_acl(inode, type, acl);
@@ -243,7 +243,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
error = f2fs_setxattr(inode, name_index, "", value, size, ipage, 0);
- kfree(value);
+ kvfree(value);
if (!error)
set_cached_acl(inode, type, acl);
@@ -355,12 +355,14 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
return PTR_ERR(p);
clone = f2fs_acl_clone(p, GFP_NOFS);
- if (!clone)
- goto no_mem;
+ if (!clone) {
+ ret = -ENOMEM;
+ goto release_acl;
+ }
ret = f2fs_acl_create_masq(clone, mode);
if (ret < 0)
- goto no_mem_clone;
+ goto release_clone;
if (ret == 0)
posix_acl_release(clone);
@@ -374,11 +376,11 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
return 0;
-no_mem_clone:
+release_clone:
posix_acl_release(clone);
-no_mem:
+release_acl:
posix_acl_release(p);
- return -ENOMEM;
+ return ret;
}
int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 4ed2d2a0bf02..940372ebfc60 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -44,7 +44,7 @@ repeat:
cond_resched();
goto repeat;
}
- f2fs_wait_on_page_writeback(page, META, true);
+ f2fs_wait_on_page_writeback(page, META, true, true);
if (!PageUptodate(page))
SetPageUptodate(page);
return page;
@@ -371,9 +371,8 @@ continue_unlock:
goto continue_unlock;
}
- f2fs_wait_on_page_writeback(page, META, true);
+ f2fs_wait_on_page_writeback(page, META, true, true);
- BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
@@ -912,7 +911,7 @@ free_fail_no_cp:
f2fs_put_page(cp1, 1);
f2fs_put_page(cp2, 1);
fail_no_cp:
- kfree(sbi->ckpt);
+ kvfree(sbi->ckpt);
return -EINVAL;
}
@@ -1291,11 +1290,11 @@ static void commit_checkpoint(struct f2fs_sb_info *sbi,
struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
int err;
+ f2fs_wait_on_page_writeback(page, META, true, true);
+
memcpy(page_address(page), src, PAGE_SIZE);
- set_page_dirty(page);
- f2fs_wait_on_page_writeback(page, META, true);
- f2fs_bug_on(sbi, PageWriteback(page));
+ set_page_dirty(page);
if (unlikely(!clear_page_dirty_for_io(page)))
f2fs_bug_on(sbi, 1);
@@ -1329,11 +1328,9 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
int err;
/* Flush all the NAT/SIT pages */
- while (get_pages(sbi, F2FS_DIRTY_META)) {
- f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
- if (unlikely(f2fs_cp_error(sbi)))
- break;
- }
+ f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+ f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
+ !f2fs_cp_error(sbi));
/*
* modify checkpoint
@@ -1406,14 +1403,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
for (i = 0; i < nm_i->nat_bits_blocks; i++)
f2fs_update_meta_page(sbi, nm_i->nat_bits +
(i << F2FS_BLKSIZE_BITS), blk + i);
-
- /* Flush all the NAT BITS pages */
- while (get_pages(sbi, F2FS_DIRTY_META)) {
- f2fs_sync_meta_pages(sbi, META, LONG_MAX,
- FS_CP_META_IO);
- if (unlikely(f2fs_cp_error(sbi)))
- break;
- }
}
/* write out checkpoint buffer at block 0 */
@@ -1449,6 +1438,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* Here, we have one bio having CP pack except cp pack 2 page */
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+ f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
+ !f2fs_cp_error(sbi));
/* wait for previous submitted meta pages writeback */
f2fs_wait_on_all_pages_writeback(sbi);
@@ -1466,7 +1457,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
* invalidate intermediate page cache borrowed from meta inode
* which are used for migration of encrypted inode's blocks.
*/
- if (f2fs_sb_has_encrypt(sbi->sb))
+ if (f2fs_sb_has_encrypt(sbi))
invalidate_mapping_pages(META_MAPPING(sbi),
MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 8286f2114c37..84b3ee71d175 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -142,6 +142,8 @@ static bool f2fs_bio_post_read_required(struct bio *bio)
static void f2fs_read_end_io(struct bio *bio)
{
+ struct page *first_page = bio->bi_io_vec[0].bv_page;
+
if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_READ_IO)) {
f2fs_show_injection_info(FAULT_READ_IO);
bio->bi_error = -EIO;
@@ -155,6 +157,13 @@ static void f2fs_read_end_io(struct bio *bio)
return;
}
+ if (first_page != NULL &&
+ __read_io_type(first_page) == F2FS_RD_DATA) {
+ trace_android_fs_dataread_end(first_page->mapping->host,
+ page_offset(first_page),
+ bio->bi_iter.bi_size);
+ }
+
__read_end_io(bio);
}
@@ -321,6 +330,32 @@ submit_io:
submit_bio(bio_op(bio), bio);
}
+static void __f2fs_submit_read_bio(struct f2fs_sb_info *sbi,
+ struct bio *bio, enum page_type type)
+{
+ if (trace_android_fs_dataread_start_enabled() && (type == DATA)) {
+ struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+ if (first_page != NULL &&
+ __read_io_type(first_page) == F2FS_RD_DATA) {
+ char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+ path = android_fstrace_get_pathname(pathbuf,
+ MAX_TRACE_PATHBUF_LEN,
+ first_page->mapping->host);
+
+ trace_android_fs_dataread_start(
+ first_page->mapping->host,
+ page_offset(first_page),
+ bio->bi_iter.bi_size,
+ current->pid,
+ path,
+ current->comm);
+ }
+ }
+ __submit_bio(sbi, bio, type);
+}
+
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
struct f2fs_io_info *fio = &io->fio;
@@ -370,29 +405,6 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
return false;
}
-static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
- struct page *page, nid_t ino,
- enum page_type type)
-{
- enum page_type btype = PAGE_TYPE_OF_BIO(type);
- enum temp_type temp;
- struct f2fs_bio_info *io;
- bool ret = false;
-
- for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
- io = sbi->write_io[btype] + temp;
-
- down_read(&io->io_rwsem);
- ret = __has_merged_page(io, inode, page, ino);
- up_read(&io->io_rwsem);
-
- /* TODO: use HOT temp only for meta pages now. */
- if (ret || btype == META)
- break;
- }
- return ret;
-}
-
static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
enum page_type type, enum temp_type temp)
{
@@ -418,13 +430,19 @@ static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
nid_t ino, enum page_type type, bool force)
{
enum temp_type temp;
-
- if (!force && !has_merged_page(sbi, inode, page, ino, type))
- return;
+ bool ret = true;
for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
+ if (!force) {
+ enum page_type btype = PAGE_TYPE_OF_BIO(type);
+ struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
- __f2fs_submit_merged_write(sbi, type, temp);
+ down_read(&io->io_rwsem);
+ ret = __has_merged_page(io, inode, page, ino);
+ up_read(&io->io_rwsem);
+ }
+ if (ret)
+ __f2fs_submit_merged_write(sbi, type, temp);
/* TODO: use HOT temp only for meta pages now. */
if (type >= META)
@@ -485,7 +503,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
inc_page_count(fio->sbi, is_read_io(fio->op) ?
__read_io_type(page): WB_DATA_TYPE(fio->page));
- __submit_bio(fio->sbi, bio, fio->type);
+ __f2fs_submit_read_bio(fio->sbi, bio, fio->type);
return 0;
}
@@ -615,7 +633,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
}
ClearPageError(page);
inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
- __submit_bio(F2FS_I_SB(inode), bio, DATA);
+ __f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
return 0;
}
@@ -641,7 +659,7 @@ static void __set_data_blkaddr(struct dnode_of_data *dn)
*/
void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
{
- f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+ f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
__set_data_blkaddr(dn);
if (set_page_dirty(dn->node_page))
dn->node_changed = true;
@@ -671,7 +689,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
dn->ofs_in_node, count);
- f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+ f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
for (; count > 0; dn->ofs_in_node++) {
block_t blkaddr = datablock_addr(dn->inode,
@@ -955,6 +973,9 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
return err;
}
+ if (direct_io && allow_outplace_dio(inode, iocb, from))
+ return 0;
+
if (is_inode_flag_set(inode, FI_NO_PREALLOC))
return 0;
@@ -968,6 +989,7 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
map.m_next_pgofs = NULL;
map.m_next_extent = NULL;
map.m_seg_type = NO_CHECK_TYPE;
+ map.m_may_create = true;
if (direct_io) {
map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
@@ -1026,7 +1048,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
unsigned int maxblocks = map->m_len;
struct dnode_of_data dn;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- int mode = create ? ALLOC_NODE : LOOKUP_NODE;
+ int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
pgoff_t pgofs, end_offset, end;
int err = 0, ofs = 1;
unsigned int ofs_in_node, last_ofs_in_node;
@@ -1046,6 +1068,10 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
end = pgofs + maxblocks;
if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
+ if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
+ map->m_may_create)
+ goto next_dnode;
+
map->m_pblk = ei.blk + pgofs - ei.fofs;
map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
map->m_flags = F2FS_MAP_MAPPED;
@@ -1060,7 +1086,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
}
next_dnode:
- if (create)
+ if (map->m_may_create)
__do_map_lock(sbi, flag, true);
/* When reading holes, we need its node page */
@@ -1097,11 +1123,13 @@ next_block:
if (is_valid_data_blkaddr(sbi, blkaddr)) {
/* use out-place-update for driect IO under LFS mode */
- if (test_opt(sbi, LFS) && create &&
- flag == F2FS_GET_BLOCK_DIO) {
+ if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
+ map->m_may_create) {
err = __allocate_data_block(&dn, map->m_seg_type);
- if (!err)
+ if (!err) {
+ blkaddr = dn.data_blkaddr;
set_inode_flag(inode, FI_APPEND_WRITE);
+ }
}
} else {
if (create) {
@@ -1207,7 +1235,7 @@ skip:
f2fs_put_dnode(&dn);
- if (create) {
+ if (map->m_may_create) {
__do_map_lock(sbi, flag, false);
f2fs_balance_fs(sbi, dn.node_changed);
}
@@ -1233,7 +1261,7 @@ sync_out:
}
f2fs_put_dnode(&dn);
unlock_out:
- if (create) {
+ if (map->m_may_create) {
__do_map_lock(sbi, flag, false);
f2fs_balance_fs(sbi, dn.node_changed);
}
@@ -1255,6 +1283,7 @@ bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
map.m_next_pgofs = NULL;
map.m_next_extent = NULL;
map.m_seg_type = NO_CHECK_TYPE;
+ map.m_may_create = false;
last_lblk = F2FS_BLK_ALIGN(pos + len);
while (map.m_lblk < last_lblk) {
@@ -1269,7 +1298,7 @@ bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
static int __get_data_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create, int flag,
- pgoff_t *next_pgofs, int seg_type)
+ pgoff_t *next_pgofs, int seg_type, bool may_write)
{
struct f2fs_map_blocks map;
int err;
@@ -1279,6 +1308,7 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
map.m_next_pgofs = next_pgofs;
map.m_next_extent = NULL;
map.m_seg_type = seg_type;
+ map.m_may_create = may_write;
err = f2fs_map_blocks(inode, &map, create, flag);
if (!err) {
@@ -1295,16 +1325,25 @@ static int get_data_block(struct inode *inode, sector_t iblock,
{
return __get_data_block(inode, iblock, bh_result, create,
flag, next_pgofs,
- NO_CHECK_TYPE);
+ NO_CHECK_TYPE, create);
+}
+
+static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ return __get_data_block(inode, iblock, bh_result, create,
+ F2FS_GET_BLOCK_DIO, NULL,
+ f2fs_rw_hint_to_seg_type(inode->i_write_hint),
+ true);
}
static int get_data_block_dio(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_DIO, NULL,
- f2fs_rw_hint_to_seg_type(
- inode->i_write_hint));
+ F2FS_GET_BLOCK_DIO, NULL,
+ f2fs_rw_hint_to_seg_type(inode->i_write_hint),
+ false);
}
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
@@ -1316,7 +1355,7 @@ static int get_data_block_bmap(struct inode *inode, sector_t iblock,
return __get_data_block(inode, iblock, bh_result, create,
F2FS_GET_BLOCK_BMAP, NULL,
- NO_CHECK_TYPE);
+ NO_CHECK_TYPE, create);
}
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
@@ -1523,6 +1562,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
map.m_next_pgofs = NULL;
map.m_next_extent = NULL;
map.m_seg_type = NO_CHECK_TYPE;
+ map.m_may_create = false;
for (; nr_pages; nr_pages--) {
if (pages) {
@@ -1592,7 +1632,7 @@ got_it:
if (bio && (last_block_in_bio != block_nr - 1 ||
!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
submit_and_realloc:
- __submit_bio(F2FS_I_SB(inode), bio, DATA);
+ __f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
if (bio == NULL) {
@@ -1624,7 +1664,7 @@ set_error_page:
goto next_page;
confused:
if (bio) {
- __submit_bio(F2FS_I_SB(inode), bio, DATA);
+ __f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
unlock_page(page);
@@ -1634,7 +1674,7 @@ next_page:
}
BUG_ON(pages && !list_empty(pages));
if (bio)
- __submit_bio(F2FS_I_SB(inode), bio, DATA);
+ __f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
return 0;
}
@@ -1852,6 +1892,8 @@ got_it:
if (fio->need_lock == LOCK_REQ)
f2fs_unlock_op(fio->sbi);
err = f2fs_inplace_write_data(fio);
+ if (err && PageWriteback(page))
+ end_page_writeback(page);
trace_f2fs_do_write_data_page(fio->page, IPU);
set_inode_flag(inode, FI_UPDATE_WRITE);
return err;
@@ -2139,12 +2181,11 @@ continue_unlock:
if (PageWriteback(page)) {
if (wbc->sync_mode != WB_SYNC_NONE)
f2fs_wait_on_page_writeback(page,
- DATA, true);
+ DATA, true, true);
else
goto continue_unlock;
}
- BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
@@ -2321,6 +2362,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
bool locked = false;
struct extent_info ei = {0,0,0};
int err = 0;
+ int flag;
/*
* we already allocated all the blocks, so we don't need to get
@@ -2330,9 +2372,15 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
!is_inode_flag_set(inode, FI_NO_PREALLOC))
return 0;
+ /* f2fs_lock_op avoids race between write CP and convert_inline_page */
+ if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
+ flag = F2FS_GET_BLOCK_DEFAULT;
+ else
+ flag = F2FS_GET_BLOCK_PRE_AIO;
+
if (f2fs_has_inline_data(inode) ||
(pos & PAGE_MASK) >= i_size_read(inode)) {
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+ __do_map_lock(sbi, flag, true);
locked = true;
}
restart:
@@ -2370,6 +2418,7 @@ restart:
f2fs_put_dnode(&dn);
__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
true);
+ WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
locked = true;
goto restart;
}
@@ -2383,7 +2432,7 @@ out:
f2fs_put_dnode(&dn);
unlock_out:
if (locked)
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+ __do_map_lock(sbi, flag, false);
return err;
}
@@ -2464,7 +2513,7 @@ repeat:
}
}
- f2fs_wait_on_page_writeback(page, DATA, false);
+ f2fs_wait_on_page_writeback(page, DATA, false, true);
if (len == PAGE_SIZE || PageUptodate(page))
return 0;
@@ -2556,6 +2605,53 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
return 0;
}
+static void f2fs_dio_end_io(struct bio *bio)
+{
+ struct f2fs_private_dio *dio = bio->bi_private;
+
+ dec_page_count(F2FS_I_SB(dio->inode),
+ dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
+
+ bio->bi_private = dio->orig_private;
+ bio->bi_end_io = dio->orig_end_io;
+
+ kvfree(dio);
+
+ bio_endio(bio);
+}
+
+static void f2fs_dio_submit_bio(int rw, struct bio *bio, struct inode *inode,
+ loff_t file_offset)
+{
+ struct f2fs_private_dio *dio;
+ bool write = (rw == REQ_OP_WRITE);
+ int err;
+
+ dio = f2fs_kzalloc(F2FS_I_SB(inode),
+ sizeof(struct f2fs_private_dio), GFP_NOFS);
+ if (!dio) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dio->inode = inode;
+ dio->orig_end_io = bio->bi_end_io;
+ dio->orig_private = bio->bi_private;
+ dio->write = write;
+
+ bio->bi_end_io = f2fs_dio_end_io;
+ bio->bi_private = dio;
+
+ inc_page_count(F2FS_I_SB(inode),
+ write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
+
+ submit_bio(rw, bio);
+ return;
+out:
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+}
+
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
{
@@ -2625,7 +2721,11 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
down_read(&fi->i_gc_rwsem[READ]);
}
- err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
+ err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
+ iter, offset,
+ rw == WRITE ? get_data_block_dio_write :
+ get_data_block_dio, NULL, f2fs_dio_submit_bio,
+ DIO_LOCKING | DIO_SKIP_HOLES);
if (do_opu)
up_read(&fi->i_gc_rwsem[READ]);
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 139b4d5c83d5..f05b37ef7182 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -53,6 +53,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->vw_cnt = atomic_read(&sbi->vw_cnt);
si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt);
si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt);
+ si->nr_dio_read = get_pages(sbi, F2FS_DIO_READ);
+ si->nr_dio_write = get_pages(sbi, F2FS_DIO_WRITE);
si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA);
si->nr_rd_data = get_pages(sbi, F2FS_RD_DATA);
@@ -62,7 +64,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->nr_flushed =
atomic_read(&SM_I(sbi)->fcc_info->issued_flush);
si->nr_flushing =
- atomic_read(&SM_I(sbi)->fcc_info->issing_flush);
+ atomic_read(&SM_I(sbi)->fcc_info->queued_flush);
si->flush_list_empty =
llist_empty(&SM_I(sbi)->fcc_info->issue_list);
}
@@ -70,7 +72,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->nr_discarded =
atomic_read(&SM_I(sbi)->dcc_info->issued_discard);
si->nr_discarding =
- atomic_read(&SM_I(sbi)->dcc_info->issing_discard);
+ atomic_read(&SM_I(sbi)->dcc_info->queued_discard);
si->nr_discard_cmd =
atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt);
si->undiscard_blks = SM_I(sbi)->dcc_info->undiscard_blks;
@@ -94,8 +96,10 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->free_secs = free_sections(sbi);
si->prefree_count = prefree_segments(sbi);
si->dirty_count = dirty_segments(sbi);
- si->node_pages = NODE_MAPPING(sbi)->nrpages;
- si->meta_pages = META_MAPPING(sbi)->nrpages;
+ if (sbi->node_inode)
+ si->node_pages = NODE_MAPPING(sbi)->nrpages;
+ if (sbi->meta_inode)
+ si->meta_pages = META_MAPPING(sbi)->nrpages;
si->nats = NM_I(sbi)->nat_cnt;
si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
si->sits = MAIN_SEGS(sbi);
@@ -173,7 +177,6 @@ static void update_sit_info(struct f2fs_sb_info *sbi)
static void update_mem_info(struct f2fs_sb_info *sbi)
{
struct f2fs_stat_info *si = F2FS_STAT(sbi);
- unsigned npages;
int i;
if (si->base_mem)
@@ -197,7 +200,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += SIT_VBLOCK_MAP_SIZE;
- if (sbi->segs_per_sec > 1)
+ if (__is_large_section(sbi))
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
si->base_mem += __bitmap_size(sbi, SIT_BITMAP);
@@ -256,10 +259,14 @@ get_cache:
sizeof(struct extent_node);
si->page_mem = 0;
- npages = NODE_MAPPING(sbi)->nrpages;
- si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
- npages = META_MAPPING(sbi)->nrpages;
- si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
+ if (sbi->node_inode) {
+ unsigned npages = NODE_MAPPING(sbi)->nrpages;
+ si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
+ }
+ if (sbi->meta_inode) {
+ unsigned npages = META_MAPPING(sbi)->nrpages;
+ si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
+ }
}
static int stat_show(struct seq_file *s, void *v)
@@ -374,6 +381,8 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n",
si->ext_tree, si->zombie_tree, si->ext_node);
seq_puts(s, "\nBalancing F2FS Async:\n");
+ seq_printf(s, " - DIO (R: %4d, W: %4d)\n",
+ si->nr_dio_read, si->nr_dio_write);
seq_printf(s, " - IO_R (Data: %4d, Node: %4d, Meta: %4d\n",
si->nr_rd_data, si->nr_rd_node, si->nr_rd_meta);
seq_printf(s, " - IO_W (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), "
@@ -510,7 +519,7 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
list_del(&si->stat_list);
mutex_unlock(&f2fs_stat_mutex);
- kfree(si);
+ kvfree(si);
}
int __init f2fs_create_root_stats(void)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index eedc07b46a52..719ba4d92098 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -293,7 +293,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
{
enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
lock_page(page);
- f2fs_wait_on_page_writeback(page, type, true);
+ f2fs_wait_on_page_writeback(page, type, true, true);
de->ino = cpu_to_le32(inode->i_ino);
set_de_type(de, inode->i_mode);
set_page_dirty(page);
@@ -307,7 +307,7 @@ static void init_dent_inode(const struct qstr *name, struct page *ipage)
{
struct f2fs_inode *ri;
- f2fs_wait_on_page_writeback(ipage, NODE, true);
+ f2fs_wait_on_page_writeback(ipage, NODE, true, true);
/* copy name info. to this inode page */
ri = F2FS_INODE(ipage);
@@ -550,7 +550,7 @@ start:
++level;
goto start;
add_dentry:
- f2fs_wait_on_page_writeback(dentry_page, DATA, true);
+ f2fs_wait_on_page_writeback(dentry_page, DATA, true, true);
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
@@ -705,7 +705,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
return f2fs_delete_inline_entry(dentry, page, dir, inode);
lock_page(page);
- f2fs_wait_on_page_writeback(page, DATA, true);
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
dentry_blk = page_address(page);
bit_pos = dentry - dentry_blk->dentry;
@@ -808,6 +808,17 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
de_name.name = d->filename[bit_pos];
de_name.len = le16_to_cpu(de->name_len);
+ /* check memory boundary before moving forward */
+ bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
+ if (unlikely(bit_pos > d->max)) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: corrupted namelen=%d, run fsck to fix.",
+ __func__, le16_to_cpu(de->name_len));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ err = -EINVAL;
+ goto out;
+ }
+
if (f2fs_encrypted_inode(d->inode)) {
int save_len = fstr->len;
@@ -830,7 +841,6 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
if (readdir_ra)
f2fs_ra_node_page(sbi, le32_to_cpu(de->ino));
- bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
ctx->pos = start_pos + bit_pos;
}
out:
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 85542cba3fa2..b401578f8fb4 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -69,7 +69,7 @@ struct f2fs_fault_info {
unsigned int inject_type;
};
-extern char *f2fs_fault_name[FAULT_MAX];
+extern const char *f2fs_fault_name[FAULT_MAX];
#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
#endif
@@ -154,12 +154,13 @@ struct f2fs_mount_info {
#define F2FS_FEATURE_VERITY 0x0400 /* reserved */
#define F2FS_FEATURE_SB_CHKSUM 0x0800
-#define F2FS_HAS_FEATURE(sb, mask) \
- ((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
-#define F2FS_SET_FEATURE(sb, mask) \
- (F2FS_SB(sb)->raw_super->feature |= cpu_to_le32(mask))
-#define F2FS_CLEAR_FEATURE(sb, mask) \
- (F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask))
+#define __F2FS_HAS_FEATURE(raw_super, mask) \
+ ((raw_super->feature & cpu_to_le32(mask)) != 0)
+#define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask)
+#define F2FS_SET_FEATURE(sbi, mask) \
+ (sbi->raw_super->feature |= cpu_to_le32(mask))
+#define F2FS_CLEAR_FEATURE(sbi, mask) \
+ (sbi->raw_super->feature &= ~cpu_to_le32(mask))
/* bio stuffs */
#define REQ_OP_READ READ
@@ -347,7 +348,7 @@ struct discard_cmd {
struct block_device *bdev; /* bdev */
unsigned short ref; /* reference count */
unsigned char state; /* state */
- unsigned char issuing; /* issuing discard */
+ unsigned char queued; /* queued discard */
int error; /* bio error */
spinlock_t lock; /* for state/bio_ref updating */
unsigned short bio_ref; /* bio reference count */
@@ -389,7 +390,7 @@ struct discard_cmd_control {
unsigned int undiscard_blks; /* # of undiscard blocks */
unsigned int next_pos; /* next discard position */
atomic_t issued_discard; /* # of issued discard */
- atomic_t issing_discard; /* # of issing discard */
+ atomic_t queued_discard; /* # of queued discard */
atomic_t discard_cmd_cnt; /* # of cached cmd count */
struct rb_root root; /* root of discard rb-tree */
bool rbtree_check; /* config for consistence check */
@@ -479,6 +480,7 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
#define F2FS_GOING_DOWN_METASYNC 0x1 /* going down with metadata */
#define F2FS_GOING_DOWN_NOSYNC 0x2 /* going down */
#define F2FS_GOING_DOWN_METAFLUSH 0x3 /* going down with meta flush */
+#define F2FS_GOING_DOWN_NEED_FSCK 0x4 /* going down to trigger fsck */
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
@@ -617,16 +619,8 @@ struct extent_info {
};
struct extent_node {
- struct rb_node rb_node;
- union {
- struct {
- unsigned int fofs;
- unsigned int len;
- u32 blk;
- };
- struct extent_info ei; /* extent info */
-
- };
+ struct rb_node rb_node; /* rb node located in rb-tree */
+ struct extent_info ei; /* extent info */
struct list_head list; /* node in global extent list of sbi */
struct extent_tree *et; /* extent tree pointer */
};
@@ -661,6 +655,7 @@ struct f2fs_map_blocks {
pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
pgoff_t *m_next_extent; /* point to next possible extent */
int m_seg_type;
+ bool m_may_create; /* indicate it is from write path */
};
/* for flag in get_data_block */
@@ -949,7 +944,7 @@ struct flush_cmd_control {
struct task_struct *f2fs_issue_flush; /* flush thread */
wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
atomic_t issued_flush; /* # of issued flushes */
- atomic_t issing_flush; /* # of issing flushes */
+ atomic_t queued_flush; /* # of queued flushes */
struct llist_head issue_list; /* list for command issue */
struct llist_node *dispatch_list; /* list for command dispatch */
};
@@ -1016,6 +1011,8 @@ enum count_type {
F2FS_RD_DATA,
F2FS_RD_NODE,
F2FS_RD_META,
+ F2FS_DIO_WRITE,
+ F2FS_DIO_READ,
NR_COUNT_TYPE,
};
@@ -1230,8 +1227,6 @@ struct f2fs_sb_info {
/* for bio operations */
struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
- struct mutex wio_mutex[NR_PAGE_TYPE - 1][NR_TEMP_TYPE];
- /* bio ordering for NODE/DATA */
/* keep migration IO order for LFS mode */
struct rw_semaphore io_order_lock;
mempool_t *write_io_dummy; /* Dummy pages */
@@ -1323,6 +1318,7 @@ struct f2fs_sb_info {
struct f2fs_gc_kthread *gc_thread; /* GC thread */
unsigned int cur_victim_sec; /* current victim section num */
unsigned int gc_mode; /* current GC state */
+ unsigned int next_victim_seg[2]; /* next segment in victim section */
/* for skip statistic */
unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
unsigned long long skipped_gc_rwsem; /* FG_GC only */
@@ -1332,6 +1328,8 @@ struct f2fs_sb_info {
/* maximum # of trials to find a victim segment for SSR and GC */
unsigned int max_victim_search;
+ /* migration granularity of garbage collection, unit: segment */
+ unsigned int migration_granularity;
/*
* for stat information.
@@ -1390,6 +1388,13 @@ struct f2fs_sb_info {
__u32 s_chksum_seed;
};
+struct f2fs_private_dio {
+ struct inode *inode;
+ void *orig_private;
+ bio_end_io_t *orig_end_io;
+ bool write;
+};
+
#ifdef CONFIG_F2FS_FAULT_INJECTION
#define f2fs_show_injection_info(type) \
printk_ratelimited("%sF2FS-fs : inject %s in %s of %pF\n", \
@@ -1668,12 +1673,16 @@ static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
{
unsigned long flags;
- set_sbi_flag(sbi, SBI_NEED_FSCK);
+ /*
+ * In order to re-enable nat_bits we need to call fsck.f2fs by
+ * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
+ * so let's rely on regular fsck or unclean shutdown.
+ */
if (lock)
spin_lock_irqsave(&sbi->cp_lock, flags);
__clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
- kfree(NM_I(sbi)->nat_bits);
+ kvfree(NM_I(sbi)->nat_bits);
NM_I(sbi)->nat_bits = NULL;
if (lock)
spin_unlock_irqrestore(&sbi->cp_lock, flags);
@@ -2206,7 +2215,11 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
{
if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
- get_pages(sbi, F2FS_WB_CP_DATA))
+ get_pages(sbi, F2FS_WB_CP_DATA) ||
+ get_pages(sbi, F2FS_DIO_READ) ||
+ get_pages(sbi, F2FS_DIO_WRITE) ||
+ atomic_read(&SM_I(sbi)->dcc_info->queued_discard) ||
+ atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
return false;
return f2fs_time_over(sbi, type);
}
@@ -2430,6 +2443,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
case FI_NEW_INODE:
if (set)
return;
+ /* fall through */
case FI_DATA_EXIST:
case FI_INLINE_DOTS:
case FI_PIN_FILE:
@@ -2732,32 +2746,47 @@ static inline bool is_dot_dotdot(const struct qstr *str)
static inline bool f2fs_may_extent_tree(struct inode *inode)
{
- if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ if (!test_opt(sbi, EXTENT_CACHE) ||
is_inode_flag_set(inode, FI_NO_EXTENT))
return false;
+ /*
+ * for recovered files during mount do not create extents
+ * if shrinker is not registered.
+ */
+ if (list_empty(&sbi->s_list))
+ return false;
+
return S_ISREG(inode->i_mode);
}
+static inline void *kvmalloc(size_t size, gfp_t flags)
+{
+ void *ret;
+
+ ret = kmalloc(size, flags | __GFP_NOWARN);
+ if (!ret)
+ ret = __vmalloc(size, flags, PAGE_KERNEL);
+ return ret;
+}
+
static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags)
{
+ void *ret;
+
if (time_to_inject(sbi, FAULT_KMALLOC)) {
f2fs_show_injection_info(FAULT_KMALLOC);
return NULL;
}
- return kmalloc(size, flags);
-}
-
-static inline void *kvmalloc(size_t size, gfp_t flags)
-{
- void *ret;
+ ret = kmalloc(size, flags);
+ if (ret)
+ return ret;
- ret = kmalloc(size, flags | __GFP_NOWARN);
- if (!ret)
- ret = __vmalloc(size, flags, PAGE_KERNEL);
- return ret;
+ return kvmalloc(size, flags);
}
static inline void *kvzalloc(size_t size, gfp_t flags)
@@ -2842,6 +2871,8 @@ static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
spin_unlock(&sbi->iostat_lock);
}
+#define __is_large_section(sbi) ((sbi)->segs_per_sec > 1)
+
#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META && \
(!is_read_io(fio->op) || fio->is_meta))
@@ -3086,7 +3117,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
struct f2fs_summary *sum, int type,
struct f2fs_io_info *fio, bool add_list);
void f2fs_wait_on_page_writeback(struct page *page,
- enum page_type type, bool ordered);
+ enum page_type type, bool ordered, bool locked);
void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
block_t len);
@@ -3226,6 +3257,7 @@ struct f2fs_stat_info {
int total_count, utilization;
int bg_gc, nr_wb_cp_data, nr_wb_data;
int nr_rd_data, nr_rd_node, nr_rd_meta;
+ int nr_dio_read, nr_dio_write;
unsigned int io_skip_bggc, other_skip_bggc;
int nr_flushing, nr_flushed, flush_list_empty;
int nr_discarding, nr_discarded;
@@ -3537,9 +3569,9 @@ static inline bool f2fs_post_read_required(struct inode *inode)
}
#define F2FS_FEATURE_FUNCS(name, flagname) \
-static inline int f2fs_sb_has_##name(struct super_block *sb) \
+static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
{ \
- return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_##flagname); \
+ return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
}
F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
@@ -3569,7 +3601,7 @@ static inline int get_blkz_type(struct f2fs_sb_info *sbi,
static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
{
- return f2fs_sb_has_blkzoned(sbi->sb);
+ return f2fs_sb_has_blkzoned(sbi);
}
static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
@@ -3644,7 +3676,7 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
* for blkzoned device, fallback direct IO to buffered IO, so
* all IOs can be serialized by log-structured write.
*/
- if (f2fs_sb_has_blkzoned(sbi->sb))
+ if (f2fs_sb_has_blkzoned(sbi))
return true;
if (test_opt(sbi, LFS) && (rw == WRITE) &&
block_unaligned_IO(inode, iocb, iter))
@@ -3667,7 +3699,7 @@ extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
{
#ifdef CONFIG_QUOTA
- if (f2fs_sb_has_quota_ino(sbi->sb))
+ if (f2fs_sb_has_quota_ino(sbi))
return true;
if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 3c61f9619be1..b44e02b8c677 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -85,7 +85,7 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
}
/* fill the page */
- f2fs_wait_on_page_writeback(page, DATA, false);
+ f2fs_wait_on_page_writeback(page, DATA, false, true);
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
@@ -219,6 +219,9 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
trace_f2fs_sync_file_enter(inode);
+ if (S_ISDIR(inode->i_mode))
+ goto go_write;
+
/* if fdatasync is triggered, let's do in-place-update */
if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
set_inode_flag(inode, FI_NEED_IPU);
@@ -578,7 +581,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
if (IS_ERR(page))
return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
truncate_out:
- f2fs_wait_on_page_writeback(page, DATA, true);
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
zero_user(page, offset, PAGE_SIZE - offset);
/* An encrypted inode should have a key and truncate the last page. */
@@ -700,7 +703,7 @@ int f2fs_getattr(struct vfsmount *mnt,
unsigned int flags;
if (f2fs_has_extra_attr(inode) &&
- f2fs_sb_has_inode_crtime(inode->i_sb) &&
+ f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
stat->result_mask |= STATX_BTIME;
stat->btime.tv_sec = fi->i_crtime.tv_sec;
@@ -899,7 +902,7 @@ static int fill_zero(struct inode *inode, pgoff_t index,
if (IS_ERR(page))
return PTR_ERR(page);
- f2fs_wait_on_page_writeback(page, DATA, true);
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
zero_user(page, start, len);
set_page_dirty(page);
f2fs_put_page(page, 1);
@@ -1503,7 +1506,8 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
- .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE };
+ .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
+ .m_may_create = true };
pgoff_t pg_end;
loff_t new_size = i_size_read(inode);
loff_t off_end;
@@ -1746,10 +1750,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- if (!get_dirty_pages(inode))
- goto skip_flush;
-
- f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
+ /*
+ * Should wait end_io to count F2FS_WB_CP_DATA correctly by
+ * f2fs_is_atomic_file.
+ */
+ if (get_dirty_pages(inode))
+ f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
"Unexpected flush for atomic writes: ino=%lu, npages=%u",
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
@@ -1757,7 +1763,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
goto out;
}
-skip_flush:
+
set_inode_flag(inode, FI_ATOMIC_FILE);
clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -1962,6 +1968,13 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
f2fs_stop_checkpoint(sbi, false);
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
+ case F2FS_GOING_DOWN_NEED_FSCK:
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ /* do checkpoint only */
+ ret = f2fs_sync_fs(sb, 1);
+ if (ret)
+ goto out;
+ break;
default:
ret = -EINVAL;
goto out;
@@ -2030,7 +2043,7 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
- if (!f2fs_sb_has_encrypt(inode->i_sb))
+ if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
return -EOPNOTSUPP;
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
@@ -2040,7 +2053,7 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
{
- if (!f2fs_sb_has_encrypt(file_inode(filp)->i_sb))
+ if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
return -EOPNOTSUPP;
return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
}
@@ -2051,7 +2064,7 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int err;
- if (!f2fs_sb_has_encrypt(inode->i_sb))
+ if (!f2fs_sb_has_encrypt(sbi))
return -EOPNOTSUPP;
err = mnt_want_write_file(filp);
@@ -2155,7 +2168,7 @@ do_more:
}
ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
- range.start += sbi->blocks_per_seg;
+ range.start += BLKS_PER_SEC(sbi);
if (range.start <= end)
goto do_more;
out:
@@ -2197,7 +2210,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
{
struct inode *inode = file_inode(filp);
struct f2fs_map_blocks map = { .m_next_extent = NULL,
- .m_seg_type = NO_CHECK_TYPE };
+ .m_seg_type = NO_CHECK_TYPE ,
+ .m_may_create = false };
struct extent_info ei = {0, 0, 0};
pgoff_t pg_start, pg_end, next_pgofs;
unsigned int blk_per_seg = sbi->blocks_per_seg;
@@ -2560,7 +2574,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
return -EFAULT;
if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num ||
- sbi->segs_per_sec != 1) {
+ __is_large_section(sbi)) {
f2fs_msg(sbi->sb, KERN_WARNING,
"Can't flush %u in %d for segs_per_sec %u != 1\n",
range.dev_num, sbi->s_ndevs,
@@ -2711,6 +2725,7 @@ int f2fs_precache_extents(struct inode *inode)
map.m_next_pgofs = NULL;
map.m_next_extent = &m_next_extent;
map.m_seg_type = NO_CHECK_TYPE;
+ map.m_may_create = false;
end = F2FS_I_SB(inode)->max_file_blocks;
while (map.m_lblk < end) {
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 843430b55a73..3624d1336bd1 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -142,7 +142,7 @@ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(gc_th->f2fs_gc_task)) {
err = PTR_ERR(gc_th->f2fs_gc_task);
- kfree(gc_th);
+ kvfree(gc_th);
sbi->gc_thread = NULL;
}
out:
@@ -155,7 +155,7 @@ void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
if (!gc_th)
return;
kthread_stop(gc_th->f2fs_gc_task);
- kfree(gc_th);
+ kvfree(gc_th);
sbi->gc_thread = NULL;
}
@@ -323,8 +323,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
p.min_cost = get_max_cost(sbi, &p);
if (*result != NULL_SEGNO) {
- if (IS_DATASEG(get_seg_entry(sbi, *result)->type) &&
- get_valid_blocks(sbi, *result, false) &&
+ if (get_valid_blocks(sbi, *result, false) &&
!sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
p.min_segno = *result;
goto out;
@@ -333,6 +332,22 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
if (p.max_search == 0)
goto out;
+ if (__is_large_section(sbi) && p.alloc_mode == LFS) {
+ if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
+ p.min_segno = sbi->next_victim_seg[BG_GC];
+ *result = p.min_segno;
+ sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
+ goto got_result;
+ }
+ if (gc_type == FG_GC &&
+ sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
+ p.min_segno = sbi->next_victim_seg[FG_GC];
+ *result = p.min_segno;
+ sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
+ goto got_result;
+ }
+ }
+
last_victim = sm->last_victim[p.gc_mode];
if (p.alloc_mode == LFS && gc_type == FG_GC) {
p.min_segno = check_bg_victims(sbi);
@@ -395,6 +410,8 @@ next:
}
if (p.min_segno != NULL_SEGNO) {
got_it:
+ *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
+got_result:
if (p.alloc_mode == LFS) {
secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
if (gc_type == FG_GC)
@@ -402,13 +419,13 @@ got_it:
else
set_bit(secno, dirty_i->victim_secmap);
}
- *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
+ }
+out:
+ if (p.min_segno != NULL_SEGNO)
trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
sbi->cur_victim_sec,
prefree_segments(sbi), free_segments(sbi));
- }
-out:
mutex_unlock(&dirty_i->seglist_lock);
return (p.min_segno == NULL_SEGNO) ? 0 : 1;
@@ -658,6 +675,14 @@ got_it:
fio.page = page;
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
+ /*
+ * don't cache encrypted data into meta inode until previous dirty
+ * data were writebacked to avoid racing between GC and flush.
+ */
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
+
+ f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
+
fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
dn.data_blkaddr,
FGP_LOCK | FGP_CREAT, GFP_NOFS);
@@ -743,7 +768,9 @@ static int move_data_block(struct inode *inode, block_t bidx,
* don't cache encrypted data into meta inode until previous dirty
* data were writebacked to avoid racing between GC and flush.
*/
- f2fs_wait_on_page_writeback(page, DATA, true);
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
+
+ f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
if (err)
@@ -802,8 +829,8 @@ static int move_data_block(struct inode *inode, block_t bidx,
}
write_page:
+ f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
set_page_dirty(fio.encrypted_page);
- f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
if (clear_page_dirty_for_io(fio.encrypted_page))
dec_page_count(fio.sbi, F2FS_DIRTY_META);
@@ -811,7 +838,7 @@ write_page:
ClearPageError(page);
/* allocate block address */
- f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
+ f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
fio.op = REQ_OP_WRITE;
fio.op_flags = REQ_SYNC | REQ_NOIDLE;
@@ -897,8 +924,9 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
bool is_dirty = PageDirty(page);
retry:
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
+
set_page_dirty(page);
- f2fs_wait_on_page_writeback(page, DATA, true);
if (clear_page_dirty_for_io(page)) {
inode_dec_dirty_pages(inode);
f2fs_remove_dirty_inode(inode);
@@ -1093,15 +1121,18 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
struct blk_plug plug;
unsigned int segno = start_segno;
unsigned int end_segno = start_segno + sbi->segs_per_sec;
- int seg_freed = 0;
+ int seg_freed = 0, migrated = 0;
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
SUM_TYPE_DATA : SUM_TYPE_NODE;
int submitted = 0;
+ if (__is_large_section(sbi))
+ end_segno = rounddown(end_segno, sbi->segs_per_sec);
+
/* readahead multi ssa blocks those have contiguous address */
- if (sbi->segs_per_sec > 1)
+ if (__is_large_section(sbi))
f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
- sbi->segs_per_sec, META_SSA, true);
+ end_segno - segno, META_SSA, true);
/* reference all summary page */
while (segno < end_segno) {
@@ -1130,10 +1161,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
GET_SUM_BLOCK(sbi, segno));
f2fs_put_page(sum_page, 0);
- if (get_valid_blocks(sbi, segno, false) == 0 ||
- !PageUptodate(sum_page) ||
- unlikely(f2fs_cp_error(sbi)))
- goto next;
+ if (get_valid_blocks(sbi, segno, false) == 0)
+ goto freed;
+ if (__is_large_section(sbi) &&
+ migrated >= sbi->migration_granularity)
+ goto skip;
+ if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
+ goto skip;
sum = page_address(sum_page);
if (type != GET_SUM_TYPE((&sum->footer))) {
@@ -1141,7 +1175,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
"type [%d, %d] in SSA and SIT",
segno, type, GET_SUM_TYPE((&sum->footer)));
set_sbi_flag(sbi, SBI_NEED_FSCK);
- goto next;
+ goto skip;
}
/*
@@ -1160,10 +1194,15 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
stat_inc_seg_count(sbi, type, gc_type);
+freed:
if (gc_type == FG_GC &&
get_valid_blocks(sbi, segno, false) == 0)
seg_freed++;
-next:
+ migrated++;
+
+ if (__is_large_section(sbi) && segno + 1 < end_segno)
+ sbi->next_victim_seg[gc_type] = segno + 1;
+skip:
f2fs_put_page(sum_page, 0);
}
@@ -1307,7 +1346,7 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
/* give warm/cold data area from slower device */
- if (sbi->s_ndevs && sbi->segs_per_sec == 1)
+ if (sbi->s_ndevs && !__is_large_section(sbi))
SIT_I(sbi)->last_victim[ALLOC_NEXT] =
GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index fdf4abcf2dab..b63b2ae1acc7 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -73,7 +73,7 @@ void f2fs_truncate_inline_inode(struct inode *inode,
addr = inline_data_addr(inode, ipage);
- f2fs_wait_on_page_writeback(ipage, NODE, true);
+ f2fs_wait_on_page_writeback(ipage, NODE, true, true);
memset(addr + from, 0, MAX_INLINE_DATA(inode) - from);
set_page_dirty(ipage);
@@ -179,7 +179,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
fio.old_blkaddr = dn->data_blkaddr;
set_inode_flag(dn->inode, FI_HOT_DATA);
f2fs_outplace_write_data(dn, &fio);
- f2fs_wait_on_page_writeback(page, DATA, true);
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
if (dirty) {
inode_dec_dirty_pages(dn->inode);
f2fs_remove_dirty_inode(dn->inode);
@@ -254,7 +254,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
f2fs_bug_on(F2FS_I_SB(inode), page->index);
- f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
+ f2fs_wait_on_page_writeback(dn.inode_page, NODE, true, true);
src_addr = kmap_atomic(page);
dst_addr = inline_data_addr(inode, dn.inode_page);
memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
@@ -295,7 +295,7 @@ process_inline:
ipage = f2fs_get_node_page(sbi, inode->i_ino);
f2fs_bug_on(sbi, IS_ERR(ipage));
- f2fs_wait_on_page_writeback(ipage, NODE, true);
+ f2fs_wait_on_page_writeback(ipage, NODE, true, true);
src_addr = inline_data_addr(inode, npage);
dst_addr = inline_data_addr(inode, ipage);
@@ -409,7 +409,7 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
goto out;
}
- f2fs_wait_on_page_writeback(page, DATA, true);
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
dentry_blk = page_address(page);
@@ -519,18 +519,18 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
stat_dec_inline_dir(dir);
clear_inode_flag(dir, FI_INLINE_DENTRY);
- kfree(backup_dentry);
+ kvfree(backup_dentry);
return 0;
recover:
lock_page(ipage);
- f2fs_wait_on_page_writeback(ipage, NODE, true);
+ f2fs_wait_on_page_writeback(ipage, NODE, true, true);
memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
f2fs_i_depth_write(dir, 0);
f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
set_page_dirty(ipage);
f2fs_put_page(ipage, 1);
- kfree(backup_dentry);
+ kvfree(backup_dentry);
return err;
}
@@ -583,7 +583,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
}
}
- f2fs_wait_on_page_writeback(ipage, NODE, true);
+ f2fs_wait_on_page_writeback(ipage, NODE, true, true);
name_hash = f2fs_dentry_hash(new_name, NULL);
f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
@@ -615,7 +615,7 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
int i;
lock_page(page);
- f2fs_wait_on_page_writeback(page, NODE, true);
+ f2fs_wait_on_page_writeback(page, NODE, true, true);
inline_dentry = inline_data_addr(dir, page);
make_dentry_ptr_inline(dir, &d, inline_dentry);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 1b1f22faff22..3e739e4986b8 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -103,7 +103,7 @@ static void __recover_inline_status(struct inode *inode, struct page *ipage)
while (start < end) {
if (*start++) {
- f2fs_wait_on_page_writeback(ipage, NODE, true);
+ f2fs_wait_on_page_writeback(ipage, NODE, true, true);
set_inode_flag(inode, FI_DATA_EXIST);
set_raw_inline(inode, F2FS_INODE(ipage));
@@ -118,7 +118,7 @@ static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page
{
struct f2fs_inode *ri = &F2FS_NODE(page)->i;
- if (!f2fs_sb_has_inode_chksum(sbi->sb))
+ if (!f2fs_sb_has_inode_chksum(sbi))
return false;
if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
@@ -218,7 +218,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
return false;
}
- if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)
+ if (f2fs_sb_has_flexible_inline_xattr(sbi)
&& !f2fs_has_extra_attr(inode)) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING,
@@ -228,7 +228,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
}
if (f2fs_has_extra_attr(inode) &&
- !f2fs_sb_has_extra_attr(sbi->sb)) {
+ !f2fs_sb_has_extra_attr(sbi)) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING,
"%s: inode (ino=%lx) is with extra_attr, "
@@ -340,7 +340,7 @@ static int do_read_inode(struct inode *inode)
fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
le16_to_cpu(ri->i_extra_isize) : 0;
- if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) {
+ if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
} else if (f2fs_has_inline_xattr(inode) ||
f2fs_has_inline_dentry(inode)) {
@@ -390,14 +390,14 @@ static int do_read_inode(struct inode *inode)
if (fi->i_flags & F2FS_PROJINHERIT_FL)
set_inode_flag(inode, FI_PROJ_INHERIT);
- if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi->sb) &&
+ if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
i_projid = (projid_t)le32_to_cpu(ri->i_projid);
else
i_projid = F2FS_DEF_PROJID;
fi->i_projid = make_kprojid(&init_user_ns, i_projid);
- if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi->sb) &&
+ if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
@@ -497,7 +497,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
struct f2fs_inode *ri;
struct extent_tree *et = F2FS_I(inode)->extent_tree;
- f2fs_wait_on_page_writeback(node_page, NODE, true);
+ f2fs_wait_on_page_writeback(node_page, NODE, true, true);
set_page_dirty(node_page);
f2fs_inode_synced(inode);
@@ -542,11 +542,11 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
if (f2fs_has_extra_attr(inode)) {
ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
- if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)->sb))
+ if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
ri->i_inline_xattr_size =
cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
- if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
+ if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
i_projid)) {
projid_t i_projid;
@@ -556,7 +556,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
ri->i_projid = cpu_to_le32(i_projid);
}
- if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)->sb) &&
+ if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
i_crtime)) {
ri->i_crtime =
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index f2cdf47b76b6..367302907d07 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -61,7 +61,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
goto fail;
}
- if (f2fs_sb_has_project_quota(sbi->sb) &&
+ if (f2fs_sb_has_project_quota(sbi) &&
(F2FS_I(dir)->i_flags & F2FS_PROJINHERIT_FL))
F2FS_I(inode)->i_projid = F2FS_I(dir)->i_projid;
else
@@ -79,7 +79,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
f2fs_may_encrypt(inode))
f2fs_set_encrypted_inode(inode);
- if (f2fs_sb_has_extra_attr(sbi->sb)) {
+ if (f2fs_sb_has_extra_attr(sbi)) {
set_inode_flag(inode, FI_EXTRA_ATTR);
F2FS_I(inode)->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE;
}
@@ -92,7 +92,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (f2fs_may_inline_dentry(inode))
set_inode_flag(inode, FI_INLINE_DENTRY);
- if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) {
+ if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode));
if (f2fs_has_inline_xattr(inode))
xattr_size = F2FS_OPTION(sbi).inline_xattr_size;
@@ -634,7 +634,7 @@ out_f2fs_handle_failed_inode:
f2fs_handle_failed_inode(inode);
out_free_encrypted_link:
if (disk_link.name != (unsigned char *)symname)
- kfree(disk_link.name);
+ kvfree(disk_link.name);
return err;
}
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 08edc0930831..f6a1e1e0f891 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -826,6 +826,7 @@ static int truncate_node(struct dnode_of_data *dn)
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct node_info ni;
int err;
+ pgoff_t index;
err = f2fs_get_node_info(sbi, dn->nid, &ni);
if (err)
@@ -845,10 +846,11 @@ static int truncate_node(struct dnode_of_data *dn)
clear_node_page_dirty(dn->node_page);
set_sbi_flag(sbi, SBI_IS_DIRTY);
+ index = dn->node_page->index;
f2fs_put_page(dn->node_page, 1);
invalidate_mapping_pages(NODE_MAPPING(sbi),
- dn->node_page->index, dn->node_page->index);
+ index, index);
dn->node_page = NULL;
trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
@@ -1104,7 +1106,7 @@ skip_partial:
ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
lock_page(page);
BUG_ON(page->mapping != NODE_MAPPING(sbi));
- f2fs_wait_on_page_writeback(page, NODE, true);
+ f2fs_wait_on_page_writeback(page, NODE, true, true);
ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
set_page_dirty(page);
unlock_page(page);
@@ -1232,7 +1234,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
new_ni.version = 0;
set_node_addr(sbi, &new_ni, NEW_ADDR, false);
- f2fs_wait_on_page_writeback(page, NODE, true);
+ f2fs_wait_on_page_writeback(page, NODE, true, true);
fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
set_cold_node(page, S_ISDIR(dn->inode->i_mode));
if (!PageUptodate(page))
@@ -1598,10 +1600,10 @@ int f2fs_move_node_page(struct page *node_page, int gc_type)
.for_reclaim = 0,
};
+ f2fs_wait_on_page_writeback(node_page, NODE, true, true);
+
set_page_dirty(node_page);
- f2fs_wait_on_page_writeback(node_page, NODE, true);
- f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page));
if (!clear_page_dirty_for_io(node_page)) {
err = -EAGAIN;
goto out_page;
@@ -1689,8 +1691,7 @@ continue_unlock:
goto continue_unlock;
}
- f2fs_wait_on_page_writeback(page, NODE, true);
- BUG_ON(PageWriteback(page));
+ f2fs_wait_on_page_writeback(page, NODE, true, true);
set_fsync_mark(page, 0);
set_dentry_mark(page, 0);
@@ -1741,7 +1742,7 @@ continue_unlock:
"Retry to write fsync mark: ino=%u, idx=%lx",
ino, last_page->index);
lock_page(last_page);
- f2fs_wait_on_page_writeback(last_page, NODE, true);
+ f2fs_wait_on_page_writeback(last_page, NODE, true, true);
set_page_dirty(last_page);
unlock_page(last_page);
goto retry;
@@ -1822,9 +1823,8 @@ continue_unlock:
goto lock_node;
}
- f2fs_wait_on_page_writeback(page, NODE, true);
+ f2fs_wait_on_page_writeback(page, NODE, true, true);
- BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
@@ -1891,7 +1891,7 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
get_page(page);
spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
- f2fs_wait_on_page_writeback(page, NODE, true);
+ f2fs_wait_on_page_writeback(page, NODE, true, false);
if (TestClearPageError(page))
ret = -EIO;
@@ -2472,7 +2472,7 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
src_addr = inline_xattr_addr(inode, page);
inline_size = inline_xattr_size(inode);
- f2fs_wait_on_page_writeback(ipage, NODE, true);
+ f2fs_wait_on_page_writeback(ipage, NODE, true, true);
memcpy(dst_addr, src_addr, inline_size);
update_inode:
f2fs_update_inode(inode, ipage);
@@ -2566,17 +2566,17 @@ retry:
if (dst->i_inline & F2FS_EXTRA_ATTR) {
dst->i_extra_isize = src->i_extra_isize;
- if (f2fs_sb_has_flexible_inline_xattr(sbi->sb) &&
+ if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
i_inline_xattr_size))
dst->i_inline_xattr_size = src->i_inline_xattr_size;
- if (f2fs_sb_has_project_quota(sbi->sb) &&
+ if (f2fs_sb_has_project_quota(sbi) &&
F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
i_projid))
dst->i_projid = src->i_projid;
- if (f2fs_sb_has_inode_crtime(sbi->sb) &&
+ if (f2fs_sb_has_inode_crtime(sbi) &&
F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
i_crtime_nsec)) {
dst->i_crtime = src->i_crtime;
@@ -3118,17 +3118,17 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
for (i = 0; i < nm_i->nat_blocks; i++)
kvfree(nm_i->free_nid_bitmap[i]);
- kfree(nm_i->free_nid_bitmap);
+ kvfree(nm_i->free_nid_bitmap);
}
kvfree(nm_i->free_nid_count);
- kfree(nm_i->nat_bitmap);
- kfree(nm_i->nat_bits);
+ kvfree(nm_i->nat_bitmap);
+ kvfree(nm_i->nat_bits);
#ifdef CONFIG_F2FS_CHECK_FS
- kfree(nm_i->nat_bitmap_mir);
+ kvfree(nm_i->nat_bitmap_mir);
#endif
sbi->nm_info = NULL;
- kfree(nm_i);
+ kvfree(nm_i);
}
int __init f2fs_create_node_manager_caches(void)
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 1c73d879a9bc..e05af5df5648 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -361,7 +361,7 @@ static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
{
struct f2fs_node *rn = F2FS_NODE(p);
- f2fs_wait_on_page_writeback(p, NODE, true);
+ f2fs_wait_on_page_writeback(p, NODE, true, true);
if (i)
rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 336273a81fba..aa3f5633a20c 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -250,7 +250,7 @@ static int recover_inode(struct inode *inode, struct page *page)
i_gid_write(inode, le32_to_cpu(raw->i_gid));
if (raw->i_inline & F2FS_EXTRA_ATTR) {
- if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
+ if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
i_projid)) {
projid_t i_projid;
@@ -531,7 +531,7 @@ retry_dn:
goto out;
}
- f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
+ f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
err = f2fs_get_node_info(sbi, dn.nid, &ni);
if (err)
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index fb4be4629add..ccefcf8016c4 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -229,7 +229,7 @@ static int __revoke_inmem_pages(struct inode *inode,
lock_page(page);
- f2fs_wait_on_page_writeback(page, DATA, true);
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
if (recover) {
struct dnode_of_data dn;
@@ -387,8 +387,9 @@ static int __f2fs_commit_inmem_pages(struct inode *inode)
if (page->mapping == inode->i_mapping) {
trace_f2fs_commit_inmem_page(page, INMEM);
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
+
set_page_dirty(page);
- f2fs_wait_on_page_writeback(page, DATA, true);
if (clear_page_dirty_for_io(page)) {
inode_dec_dirty_pages(inode);
f2fs_remove_dirty_inode(inode);
@@ -620,14 +621,16 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
return 0;
if (!test_opt(sbi, FLUSH_MERGE)) {
+ atomic_inc(&fcc->queued_flush);
ret = submit_flush_wait(sbi, ino);
+ atomic_dec(&fcc->queued_flush);
atomic_inc(&fcc->issued_flush);
return ret;
}
- if (atomic_inc_return(&fcc->issing_flush) == 1 || sbi->s_ndevs > 1) {
+ if (atomic_inc_return(&fcc->queued_flush) == 1 || sbi->s_ndevs > 1) {
ret = submit_flush_wait(sbi, ino);
- atomic_dec(&fcc->issing_flush);
+ atomic_dec(&fcc->queued_flush);
atomic_inc(&fcc->issued_flush);
return ret;
@@ -646,14 +649,14 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
if (fcc->f2fs_issue_flush) {
wait_for_completion(&cmd.wait);
- atomic_dec(&fcc->issing_flush);
+ atomic_dec(&fcc->queued_flush);
} else {
struct llist_node *list;
list = llist_del_all(&fcc->issue_list);
if (!list) {
wait_for_completion(&cmd.wait);
- atomic_dec(&fcc->issing_flush);
+ atomic_dec(&fcc->queued_flush);
} else {
struct flush_cmd *tmp, *next;
@@ -662,7 +665,7 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
llist_for_each_entry_safe(tmp, next, list, llnode) {
if (tmp == &cmd) {
cmd.ret = ret;
- atomic_dec(&fcc->issing_flush);
+ atomic_dec(&fcc->queued_flush);
continue;
}
tmp->ret = ret;
@@ -691,7 +694,7 @@ int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
if (!fcc)
return -ENOMEM;
atomic_set(&fcc->issued_flush, 0);
- atomic_set(&fcc->issing_flush, 0);
+ atomic_set(&fcc->queued_flush, 0);
init_waitqueue_head(&fcc->flush_wait_queue);
init_llist_head(&fcc->issue_list);
SM_I(sbi)->fcc_info = fcc;
@@ -703,7 +706,7 @@ init_thread:
"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(fcc->f2fs_issue_flush)) {
err = PTR_ERR(fcc->f2fs_issue_flush);
- kfree(fcc);
+ kvfree(fcc);
SM_I(sbi)->fcc_info = NULL;
return err;
}
@@ -722,7 +725,7 @@ void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
kthread_stop(flush_thread);
}
if (free) {
- kfree(fcc);
+ kvfree(fcc);
SM_I(sbi)->fcc_info = NULL;
}
}
@@ -907,7 +910,7 @@ static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
dc->len = len;
dc->ref = 0;
dc->state = D_PREP;
- dc->issuing = 0;
+ dc->queued = 0;
dc->error = 0;
init_completion(&dc->wait);
list_add_tail(&dc->list, pend_list);
@@ -939,7 +942,7 @@ static void __detach_discard_cmd(struct discard_cmd_control *dcc,
struct discard_cmd *dc)
{
if (dc->state == D_DONE)
- atomic_sub(dc->issuing, &dcc->issing_discard);
+ atomic_sub(dc->queued, &dcc->queued_discard);
list_del(&dc->list);
rb_erase(&dc->rb_node, &dcc->root);
@@ -1223,12 +1226,12 @@ submit:
dc->bio_ref++;
spin_unlock_irqrestore(&dc->lock, flags);
- atomic_inc(&dcc->issing_discard);
- dc->issuing++;
+ atomic_inc(&dcc->queued_discard);
+ dc->queued++;
list_move_tail(&dc->list, wait_list);
/* sanity check on discard range */
- __check_sit_bitmap(sbi, start, start + len);
+ __check_sit_bitmap(sbi, lstart, lstart + len);
bio->bi_private = dc;
bio->bi_end_io = f2fs_submit_discard_endio;
@@ -1725,6 +1728,10 @@ static int issue_discard_thread(void *data)
if (dcc->discard_wake)
dcc->discard_wake = 0;
+ /* clean up pending candidates before going to sleep */
+ if (atomic_read(&dcc->queued_discard))
+ __wait_all_discard_cmd(sbi, NULL);
+
if (try_to_freeze())
continue;
if (f2fs_readonly(sbi->sb))
@@ -1810,7 +1817,7 @@ static int __issue_discard_async(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t blkstart, block_t blklen)
{
#ifdef CONFIG_BLK_DEV_ZONED
- if (f2fs_sb_has_blkzoned(sbi->sb) &&
+ if (f2fs_sb_has_blkzoned(sbi) &&
bdev_zoned_model(bdev) != BLK_ZONED_NONE)
return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
#endif
@@ -1958,7 +1965,7 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
unsigned int start = 0, end = -1;
unsigned int secno, start_segno;
bool force = (cpc->reason & CP_DISCARD);
- bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
+ bool need_align = test_opt(sbi, LFS) && __is_large_section(sbi);
mutex_lock(&dirty_i->seglist_lock);
@@ -1990,7 +1997,7 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
(end - 1) <= cpc->trim_end)
continue;
- if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
+ if (!test_opt(sbi, LFS) || !__is_large_section(sbi)) {
f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
(end - start) << sbi->log_blocks_per_seg);
continue;
@@ -2022,7 +2029,7 @@ find_next:
sbi->blocks_per_seg, cur_pos);
len = next_pos - cur_pos;
- if (f2fs_sb_has_blkzoned(sbi->sb) ||
+ if (f2fs_sb_has_blkzoned(sbi) ||
(force && len < cpc->trim_minlen))
goto skip;
@@ -2070,7 +2077,7 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&dcc->fstrim_list);
mutex_init(&dcc->cmd_lock);
atomic_set(&dcc->issued_discard, 0);
- atomic_set(&dcc->issing_discard, 0);
+ atomic_set(&dcc->queued_discard, 0);
atomic_set(&dcc->discard_cmd_cnt, 0);
dcc->nr_discards = 0;
dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
@@ -2086,7 +2093,7 @@ init_thread:
"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(dcc->f2fs_issue_discard)) {
err = PTR_ERR(dcc->f2fs_issue_discard);
- kfree(dcc);
+ kvfree(dcc);
SM_I(sbi)->dcc_info = NULL;
return err;
}
@@ -2103,7 +2110,7 @@ static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
f2fs_stop_discard_thread(sbi);
- kfree(dcc);
+ kvfree(dcc);
SM_I(sbi)->dcc_info = NULL;
}
@@ -2222,7 +2229,7 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
/* update total number of valid blocks to be written in ckpt area */
SIT_I(sbi)->written_valid_blocks += del;
- if (sbi->segs_per_sec > 1)
+ if (__is_large_section(sbi))
get_sec_entry(sbi, segno)->valid_blocks += del;
}
@@ -2488,7 +2495,7 @@ static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
{
/* if segs_per_sec is large than 1, we need to keep original policy. */
- if (sbi->segs_per_sec != 1)
+ if (__is_large_section(sbi))
return CURSEG_I(sbi, type)->segno;
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
@@ -2798,7 +2805,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
struct discard_policy dpolicy;
unsigned long long trimmed = 0;
int err = 0;
- bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
+ bool need_align = test_opt(sbi, LFS) && __is_large_section(sbi);
if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
return -EINVAL;
@@ -3349,16 +3356,18 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
}
void f2fs_wait_on_page_writeback(struct page *page,
- enum page_type type, bool ordered)
+ enum page_type type, bool ordered, bool locked)
{
if (PageWriteback(page)) {
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
- if (ordered)
+ if (ordered) {
wait_on_page_writeback(page);
- else
+ f2fs_bug_on(sbi, locked && PageWriteback(page));
+ } else {
wait_for_stable_page(page);
+ }
}
}
@@ -3375,7 +3384,7 @@ void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
if (cpage) {
- f2fs_wait_on_page_writeback(cpage, DATA, true);
+ f2fs_wait_on_page_writeback(cpage, DATA, true, true);
f2fs_put_page(cpage, 1);
}
}
@@ -3957,7 +3966,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
if (!sit_i->tmp_map)
return -ENOMEM;
- if (sbi->segs_per_sec > 1) {
+ if (__is_large_section(sbi)) {
sit_i->sec_entries =
f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
MAIN_SECS(sbi)),
@@ -4112,7 +4121,7 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
se->valid_blocks;
}
- if (sbi->segs_per_sec > 1)
+ if (__is_large_section(sbi))
get_sec_entry(sbi, start)->valid_blocks +=
se->valid_blocks;
}
@@ -4156,7 +4165,7 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
sbi->discard_blks -= se->valid_blocks;
}
- if (sbi->segs_per_sec > 1) {
+ if (__is_large_section(sbi)) {
get_sec_entry(sbi, start)->valid_blocks +=
se->valid_blocks;
get_sec_entry(sbi, start)->valid_blocks -=
@@ -4391,7 +4400,7 @@ static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
destroy_victim_secmap(sbi);
SM_I(sbi)->dirty_info = NULL;
- kfree(dirty_i);
+ kvfree(dirty_i);
}
static void destroy_curseg(struct f2fs_sb_info *sbi)
@@ -4403,10 +4412,10 @@ static void destroy_curseg(struct f2fs_sb_info *sbi)
return;
SM_I(sbi)->curseg_array = NULL;
for (i = 0; i < NR_CURSEG_TYPE; i++) {
- kfree(array[i].sum_blk);
- kfree(array[i].journal);
+ kvfree(array[i].sum_blk);
+ kvfree(array[i].journal);
}
- kfree(array);
+ kvfree(array);
}
static void destroy_free_segmap(struct f2fs_sb_info *sbi)
@@ -4417,7 +4426,7 @@ static void destroy_free_segmap(struct f2fs_sb_info *sbi)
SM_I(sbi)->free_info = NULL;
kvfree(free_i->free_segmap);
kvfree(free_i->free_secmap);
- kfree(free_i);
+ kvfree(free_i);
}
static void destroy_sit_info(struct f2fs_sb_info *sbi)
@@ -4430,26 +4439,26 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi)
if (sit_i->sentries) {
for (start = 0; start < MAIN_SEGS(sbi); start++) {
- kfree(sit_i->sentries[start].cur_valid_map);
+ kvfree(sit_i->sentries[start].cur_valid_map);
#ifdef CONFIG_F2FS_CHECK_FS
- kfree(sit_i->sentries[start].cur_valid_map_mir);
+ kvfree(sit_i->sentries[start].cur_valid_map_mir);
#endif
- kfree(sit_i->sentries[start].ckpt_valid_map);
- kfree(sit_i->sentries[start].discard_map);
+ kvfree(sit_i->sentries[start].ckpt_valid_map);
+ kvfree(sit_i->sentries[start].discard_map);
}
}
- kfree(sit_i->tmp_map);
+ kvfree(sit_i->tmp_map);
kvfree(sit_i->sentries);
kvfree(sit_i->sec_entries);
kvfree(sit_i->dirty_sentries_bitmap);
SM_I(sbi)->sit_info = NULL;
- kfree(sit_i->sit_bitmap);
+ kvfree(sit_i->sit_bitmap);
#ifdef CONFIG_F2FS_CHECK_FS
- kfree(sit_i->sit_bitmap_mir);
+ kvfree(sit_i->sit_bitmap_mir);
#endif
- kfree(sit_i);
+ kvfree(sit_i);
}
void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
@@ -4465,7 +4474,7 @@ void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
destroy_free_segmap(sbi);
destroy_sit_info(sbi);
sbi->sm_info = NULL;
- kfree(sm_info);
+ kvfree(sm_info);
}
int __init f2fs_create_segment_manager_caches(void)
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index ab3465faddf1..a77f76f528b6 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -333,7 +333,7 @@ static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
* In order to get # of valid blocks in a section instantly from many
* segments, f2fs manages two counting structures separately.
*/
- if (use_section && sbi->segs_per_sec > 1)
+ if (use_section && __is_large_section(sbi))
return get_sec_entry(sbi, segno)->valid_blocks;
else
return get_seg_entry(sbi, segno)->valid_blocks;
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index 9e13db994fdf..a467aca29cfe 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -135,6 +135,6 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
spin_lock(&f2fs_list_lock);
- list_del(&sbi->s_list);
+ list_del_init(&sbi->s_list);
spin_unlock(&f2fs_list_lock);
}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index c551d5336473..a9e7a1e62c66 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -38,7 +38,7 @@ static struct kmem_cache *f2fs_inode_cachep;
#ifdef CONFIG_F2FS_FAULT_INJECTION
-char *f2fs_fault_name[FAULT_MAX] = {
+const char *f2fs_fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] = "kmalloc",
[FAULT_KVMALLOC] = "kvmalloc",
[FAULT_PAGE_ALLOC] = "page alloc",
@@ -259,7 +259,7 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype,
"quota options when quota turned on");
return -EINVAL;
}
- if (f2fs_sb_has_quota_ino(sb)) {
+ if (f2fs_sb_has_quota_ino(sbi)) {
f2fs_msg(sb, KERN_INFO,
"QUOTA feature is enabled, so ignore qf_name");
return 0;
@@ -289,7 +289,7 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype,
set_opt(sbi, QUOTA);
return 0;
errout:
- kfree(qname);
+ kvfree(qname);
return ret;
}
@@ -302,7 +302,7 @@ static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
" when quota turned on");
return -EINVAL;
}
- kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
+ kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
return 0;
}
@@ -314,7 +314,7 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
* 'grpquota' mount options are allowed even without quota feature
* to support legacy quotas in quota files.
*/
- if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) {
+ if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
"Cannot enable project quota enforcement.");
return -1;
@@ -348,7 +348,7 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
}
}
- if (f2fs_sb_has_quota_ino(sbi->sb) && F2FS_OPTION(sbi).s_jquota_fmt) {
+ if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
f2fs_msg(sbi->sb, KERN_INFO,
"QUOTA feature is enabled, so ignore jquota_fmt");
F2FS_OPTION(sbi).s_jquota_fmt = 0;
@@ -399,10 +399,10 @@ static int parse_options(struct super_block *sb, char *options)
set_opt(sbi, BG_GC);
set_opt(sbi, FORCE_FG_GC);
} else {
- kfree(name);
+ kvfree(name);
return -EINVAL;
}
- kfree(name);
+ kvfree(name);
break;
case Opt_disable_roll_forward:
set_opt(sbi, DISABLE_ROLL_FORWARD);
@@ -417,7 +417,7 @@ static int parse_options(struct super_block *sb, char *options)
set_opt(sbi, DISCARD);
break;
case Opt_nodiscard:
- if (f2fs_sb_has_blkzoned(sb)) {
+ if (f2fs_sb_has_blkzoned(sbi)) {
f2fs_msg(sb, KERN_WARNING,
"discard is required for zoned block devices");
return -EINVAL;
@@ -566,11 +566,11 @@ static int parse_options(struct super_block *sb, char *options)
return -ENOMEM;
if (strlen(name) == 8 &&
!strncmp(name, "adaptive", 8)) {
- if (f2fs_sb_has_blkzoned(sb)) {
+ if (f2fs_sb_has_blkzoned(sbi)) {
f2fs_msg(sb, KERN_WARNING,
"adaptive mode is not allowed with "
"zoned block device feature");
- kfree(name);
+ kvfree(name);
return -EINVAL;
}
set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
@@ -578,10 +578,10 @@ static int parse_options(struct super_block *sb, char *options)
!strncmp(name, "lfs", 3)) {
set_opt_mode(sbi, F2FS_MOUNT_LFS);
} else {
- kfree(name);
+ kvfree(name);
return -EINVAL;
}
- kfree(name);
+ kvfree(name);
break;
case Opt_io_size_bits:
if (args->from && match_int(args, &arg))
@@ -714,10 +714,10 @@ static int parse_options(struct super_block *sb, char *options)
!strncmp(name, "fs-based", 8)) {
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
} else {
- kfree(name);
+ kvfree(name);
return -EINVAL;
}
- kfree(name);
+ kvfree(name);
break;
case Opt_alloc:
name = match_strdup(&args[0]);
@@ -731,10 +731,10 @@ static int parse_options(struct super_block *sb, char *options)
!strncmp(name, "reuse", 5)) {
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
} else {
- kfree(name);
+ kvfree(name);
return -EINVAL;
}
- kfree(name);
+ kvfree(name);
break;
case Opt_fsync:
name = match_strdup(&args[0]);
@@ -751,14 +751,14 @@ static int parse_options(struct super_block *sb, char *options)
F2FS_OPTION(sbi).fsync_mode =
FSYNC_MODE_NOBARRIER;
} else {
- kfree(name);
+ kvfree(name);
return -EINVAL;
}
- kfree(name);
+ kvfree(name);
break;
case Opt_test_dummy_encryption:
#ifdef CONFIG_F2FS_FS_ENCRYPTION
- if (!f2fs_sb_has_encrypt(sb)) {
+ if (!f2fs_sb_has_encrypt(sbi)) {
f2fs_msg(sb, KERN_ERR, "Encrypt feature is off");
return -EINVAL;
}
@@ -783,10 +783,10 @@ static int parse_options(struct super_block *sb, char *options)
!strncmp(name, "disable", 7)) {
set_opt(sbi, DISABLE_CHECKPOINT);
} else {
- kfree(name);
+ kvfree(name);
return -EINVAL;
}
- kfree(name);
+ kvfree(name);
break;
default:
f2fs_msg(sb, KERN_ERR,
@@ -799,13 +799,13 @@ static int parse_options(struct super_block *sb, char *options)
if (f2fs_check_quota_options(sbi))
return -EINVAL;
#else
- if (f2fs_sb_has_quota_ino(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+ if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
f2fs_msg(sbi->sb, KERN_INFO,
"Filesystem with quota feature cannot be mounted RDWR "
"without CONFIG_QUOTA");
return -EINVAL;
}
- if (f2fs_sb_has_project_quota(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+ if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
f2fs_msg(sb, KERN_ERR,
"Filesystem with project quota feature cannot be "
"mounted RDWR without CONFIG_QUOTA");
@@ -821,8 +821,8 @@ static int parse_options(struct super_block *sb, char *options)
}
if (test_opt(sbi, INLINE_XATTR_SIZE)) {
- if (!f2fs_sb_has_extra_attr(sb) ||
- !f2fs_sb_has_flexible_inline_xattr(sb)) {
+ if (!f2fs_sb_has_extra_attr(sbi) ||
+ !f2fs_sb_has_flexible_inline_xattr(sbi)) {
f2fs_msg(sb, KERN_ERR,
"extra_attr or flexible_inline_xattr "
"feature is off");
@@ -1017,10 +1017,10 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
for (i = 0; i < sbi->s_ndevs; i++) {
blkdev_put(FDEV(i).bdev, FMODE_EXCL);
#ifdef CONFIG_BLK_DEV_ZONED
- kfree(FDEV(i).blkz_type);
+ kvfree(FDEV(i).blkz_type);
#endif
}
- kfree(sbi->devs);
+ kvfree(sbi->devs);
}
static void f2fs_put_super(struct super_block *sb)
@@ -1058,9 +1058,6 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_write_checkpoint(sbi, &cpc);
}
- /* f2fs_write_checkpoint can update stat informaion */
- f2fs_destroy_stats(sbi);
-
/*
* normally superblock is clean, so we need to release this.
* In addition, EIO will skip do checkpoint, we need this as well.
@@ -1078,32 +1075,41 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_bug_on(sbi, sbi->fsync_node_num);
iput(sbi->node_inode);
+ sbi->node_inode = NULL;
+
iput(sbi->meta_inode);
+ sbi->meta_inode = NULL;
+
+ /*
+ * iput() can update stat information, if f2fs_write_checkpoint()
+ * above failed with error.
+ */
+ f2fs_destroy_stats(sbi);
/* destroy f2fs internal modules */
f2fs_destroy_node_manager(sbi);
f2fs_destroy_segment_manager(sbi);
- kfree(sbi->ckpt);
+ kvfree(sbi->ckpt);
f2fs_unregister_sysfs(sbi);
sb->s_fs_info = NULL;
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);
- kfree(sbi->raw_super);
+ kvfree(sbi->raw_super);
destroy_device_list(sbi);
if (sbi->write_io_dummy)
mempool_destroy(sbi->write_io_dummy);
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
- kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+ kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
#endif
destroy_percpu_info(sbi);
for (i = 0; i < NR_PAGE_TYPE; i++)
- kfree(sbi->write_io[i]);
- kfree(sbi);
+ kvfree(sbi->write_io[i]);
+ kvfree(sbi);
}
int f2fs_sync_fs(struct super_block *sb, int sync)
@@ -1432,7 +1438,7 @@ static void default_options(struct f2fs_sb_info *sbi)
clear_opt(sbi, DISABLE_CHECKPOINT);
set_opt(sbi, FLUSH_MERGE);
set_opt(sbi, DISCARD);
- if (f2fs_sb_has_blkzoned(sbi->sb))
+ if (f2fs_sb_has_blkzoned(sbi))
set_opt_mode(sbi, F2FS_MOUNT_LFS);
else
set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
@@ -1458,19 +1464,16 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
sbi->sb->s_flags |= MS_ACTIVE;
- mutex_lock(&sbi->gc_mutex);
f2fs_update_time(sbi, DISABLE_TIME);
while (!f2fs_time_over(sbi, DISABLE_TIME)) {
+ mutex_lock(&sbi->gc_mutex);
err = f2fs_gc(sbi, true, false, NULL_SEGNO);
if (err == -ENODATA)
break;
- if (err && err != -EAGAIN) {
- mutex_unlock(&sbi->gc_mutex);
+ if (err && err != -EAGAIN)
return err;
- }
}
- mutex_unlock(&sbi->gc_mutex);
err = sync_filesystem(sbi->sb);
if (err)
@@ -1532,7 +1535,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
GFP_KERNEL);
if (!org_mount_opt.s_qf_names[i]) {
for (j = 0; j < i; j++)
- kfree(org_mount_opt.s_qf_names[j]);
+ kvfree(org_mount_opt.s_qf_names[j]);
return -ENOMEM;
}
} else {
@@ -1576,7 +1579,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
sb->s_flags &= ~MS_RDONLY;
if (sb_any_quota_suspended(sb)) {
dquot_resume(sb, -1);
- } else if (f2fs_sb_has_quota_ino(sb)) {
+ } else if (f2fs_sb_has_quota_ino(sbi)) {
err = f2fs_enable_quotas(sb);
if (err)
goto restore_opts;
@@ -1652,7 +1655,7 @@ skip:
#ifdef CONFIG_QUOTA
/* Release old quota file names */
for (i = 0; i < MAXQUOTAS; i++)
- kfree(org_mount_opt.s_qf_names[i]);
+ kvfree(org_mount_opt.s_qf_names[i]);
#endif
/* Update the POSIXACL Flag */
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
@@ -1673,7 +1676,7 @@ restore_opts:
#ifdef CONFIG_QUOTA
F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
for (i = 0; i < MAXQUOTAS; i++) {
- kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+ kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
}
#endif
@@ -1818,7 +1821,7 @@ int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
int enabled = 0;
int i, err;
- if (f2fs_sb_has_quota_ino(sbi->sb) && rdonly) {
+ if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
err = f2fs_enable_quotas(sbi->sb);
if (err) {
f2fs_msg(sbi->sb, KERN_ERR,
@@ -1849,7 +1852,7 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
unsigned long qf_inum;
int err;
- BUG_ON(!f2fs_sb_has_quota_ino(sb));
+ BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
qf_inum = f2fs_qf_ino(sb, type);
if (!qf_inum)
@@ -1994,7 +1997,7 @@ static int f2fs_quota_off(struct super_block *sb, int type)
goto out_put;
err = dquot_quota_off(sb, type);
- if (err || f2fs_sb_has_quota_ino(sb))
+ if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
goto out_put;
inode_lock(inode);
@@ -2177,7 +2180,7 @@ static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
* if LOST_FOUND feature is enabled.
*
*/
- if (f2fs_sb_has_lost_found(sbi->sb) &&
+ if (f2fs_sb_has_lost_found(sbi) &&
inode->i_ino == F2FS_ROOT_INO(sbi))
return -EPERM;
@@ -2400,7 +2403,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
__u32 crc = 0;
/* Check checksum_offset and crc in superblock */
- if (le32_to_cpu(raw_super->feature) & F2FS_FEATURE_SB_CHKSUM) {
+ if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
crc_offset = le32_to_cpu(raw_super->checksum_offset);
if (crc_offset !=
offsetof(struct f2fs_super_block, crc)) {
@@ -2500,10 +2503,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
return 1;
}
- if (segment_count > (le32_to_cpu(raw_super->block_count) >> 9)) {
+ if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
f2fs_msg(sb, KERN_INFO,
- "Wrong segment_count / block_count (%u > %u)",
- segment_count, le32_to_cpu(raw_super->block_count));
+ "Wrong segment_count / block_count (%u > %llu)",
+ segment_count, le64_to_cpu(raw_super->block_count));
return 1;
}
@@ -2678,7 +2681,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
static void init_sb_info(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = sbi->raw_super;
- int i, j;
+ int i;
sbi->log_sectors_per_block =
le32_to_cpu(raw_super->log_sectors_per_block);
@@ -2696,7 +2699,10 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
sbi->cur_victim_sec = NULL_SECNO;
+ sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
+ sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
+ sbi->migration_granularity = sbi->segs_per_sec;
sbi->dir_level = DEF_DIR_LEVEL;
sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
@@ -2714,9 +2720,6 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&sbi->s_list);
mutex_init(&sbi->umount_mutex);
- for (i = 0; i < NR_PAGE_TYPE - 1; i++)
- for (j = HOT; j < NR_TEMP_TYPE; j++)
- mutex_init(&sbi->wio_mutex[i][j]);
init_rwsem(&sbi->io_order_lock);
spin_lock_init(&sbi->cp_lock);
@@ -2753,7 +2756,7 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
unsigned int n = 0;
int err = -EIO;
- if (!f2fs_sb_has_blkzoned(sbi->sb))
+ if (!f2fs_sb_has_blkzoned(sbi))
return 0;
if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
@@ -2804,7 +2807,7 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
}
}
- kfree(zones);
+ kvfree(zones);
return err;
}
@@ -2864,7 +2867,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
/* No valid superblock */
if (!*raw_super)
- kfree(super);
+ kvfree(super);
else
err = 0;
@@ -2884,7 +2887,7 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
}
/* we should update superblock crc here */
- if (!recover && f2fs_sb_has_sb_chksum(sbi->sb)) {
+ if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
offsetof(struct f2fs_super_block, crc));
F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
@@ -2976,7 +2979,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
#ifdef CONFIG_BLK_DEV_ZONED
if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
- !f2fs_sb_has_blkzoned(sbi->sb)) {
+ !f2fs_sb_has_blkzoned(sbi)) {
f2fs_msg(sbi->sb, KERN_ERR,
"Zoned block device feature not enabled\n");
return -EINVAL;
@@ -3072,7 +3075,7 @@ try_onemore:
sbi->raw_super = raw_super;
/* precompute checksum seed for metadata */
- if (f2fs_sb_has_inode_chksum(sb))
+ if (f2fs_sb_has_inode_chksum(sbi))
sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
sizeof(raw_super->uuid));
@@ -3082,7 +3085,7 @@ try_onemore:
* devices, but mandatory for host-managed zoned block devices.
*/
#ifndef CONFIG_BLK_DEV_ZONED
- if (f2fs_sb_has_blkzoned(sb)) {
+ if (f2fs_sb_has_blkzoned(sbi)) {
f2fs_msg(sb, KERN_ERR,
"Zoned block device support is not enabled\n");
err = -EOPNOTSUPP;
@@ -3109,13 +3112,13 @@ try_onemore:
#ifdef CONFIG_QUOTA
sb->dq_op = &f2fs_quota_operations;
- if (f2fs_sb_has_quota_ino(sb))
+ if (f2fs_sb_has_quota_ino(sbi))
sb->s_qcop = &dquot_quotactl_sysfile_ops;
else
sb->s_qcop = &f2fs_quotactl_ops;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
- if (f2fs_sb_has_quota_ino(sbi->sb)) {
+ if (f2fs_sb_has_quota_ino(sbi)) {
for (i = 0; i < MAXQUOTAS; i++) {
if (f2fs_qf_ino(sbi->sb, i))
sbi->nquota_files++;
@@ -3266,30 +3269,30 @@ try_onemore:
f2fs_build_gc_manager(sbi);
+ err = f2fs_build_stats(sbi);
+ if (err)
+ goto free_nm;
+
/* get an inode for node space */
sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
if (IS_ERR(sbi->node_inode)) {
f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
err = PTR_ERR(sbi->node_inode);
- goto free_nm;
+ goto free_stats;
}
- err = f2fs_build_stats(sbi);
- if (err)
- goto free_node_inode;
-
/* read root inode and dentry */
root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
if (IS_ERR(root)) {
f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
err = PTR_ERR(root);
- goto free_stats;
+ goto free_node_inode;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
!root->i_size || !root->i_nlink) {
iput(root);
err = -EINVAL;
- goto free_stats;
+ goto free_node_inode;
}
sb->s_root = d_make_root(root); /* allocate root dentry */
@@ -3304,7 +3307,7 @@ try_onemore:
#ifdef CONFIG_QUOTA
/* Enable quota usage during mount */
- if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
+ if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
err = f2fs_enable_quotas(sb);
if (err)
f2fs_msg(sb, KERN_ERR,
@@ -3376,7 +3379,7 @@ skip_recovery:
if (err)
goto free_meta;
}
- kfree(options);
+ kvfree(options);
/* recover broken superblock */
if (recovery) {
@@ -3399,7 +3402,7 @@ skip_recovery:
free_meta:
#ifdef CONFIG_QUOTA
f2fs_truncate_quota_inode_pages(sb);
- if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb))
+ if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
f2fs_quota_off_umount(sbi->sb);
#endif
/*
@@ -3413,41 +3416,43 @@ free_meta:
free_root_inode:
dput(sb->s_root);
sb->s_root = NULL;
-free_stats:
- f2fs_destroy_stats(sbi);
free_node_inode:
f2fs_release_ino_entry(sbi, true);
truncate_inode_pages_final(NODE_MAPPING(sbi));
iput(sbi->node_inode);
+ sbi->node_inode = NULL;
+free_stats:
+ f2fs_destroy_stats(sbi);
free_nm:
f2fs_destroy_node_manager(sbi);
free_sm:
f2fs_destroy_segment_manager(sbi);
free_devices:
destroy_device_list(sbi);
- kfree(sbi->ckpt);
+ kvfree(sbi->ckpt);
free_meta_inode:
make_bad_inode(sbi->meta_inode);
iput(sbi->meta_inode);
+ sbi->meta_inode = NULL;
free_io_dummy:
mempool_destroy(sbi->write_io_dummy);
free_percpu:
destroy_percpu_info(sbi);
free_bio_info:
for (i = 0; i < NR_PAGE_TYPE; i++)
- kfree(sbi->write_io[i]);
+ kvfree(sbi->write_io[i]);
free_options:
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
- kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+ kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
#endif
- kfree(options);
+ kvfree(options);
free_sb_buf:
- kfree(raw_super);
+ kvfree(raw_super);
free_sbi:
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);
- kfree(sbi);
+ kvfree(sbi);
/* give only one another chance */
if (retry) {
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index db89741b219b..948c1a211341 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -90,34 +90,34 @@ static ssize_t features_show(struct f2fs_attr *a,
if (!sb->s_bdev->bd_part)
return snprintf(buf, PAGE_SIZE, "0\n");
- if (f2fs_sb_has_encrypt(sb))
+ if (f2fs_sb_has_encrypt(sbi))
len += snprintf(buf, PAGE_SIZE - len, "%s",
"encryption");
- if (f2fs_sb_has_blkzoned(sb))
+ if (f2fs_sb_has_blkzoned(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "blkzoned");
- if (f2fs_sb_has_extra_attr(sb))
+ if (f2fs_sb_has_extra_attr(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "extra_attr");
- if (f2fs_sb_has_project_quota(sb))
+ if (f2fs_sb_has_project_quota(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "projquota");
- if (f2fs_sb_has_inode_chksum(sb))
+ if (f2fs_sb_has_inode_chksum(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "inode_checksum");
- if (f2fs_sb_has_flexible_inline_xattr(sb))
+ if (f2fs_sb_has_flexible_inline_xattr(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "flexible_inline_xattr");
- if (f2fs_sb_has_quota_ino(sb))
+ if (f2fs_sb_has_quota_ino(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "quota_ino");
- if (f2fs_sb_has_inode_crtime(sb))
+ if (f2fs_sb_has_inode_crtime(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "inode_crtime");
- if (f2fs_sb_has_lost_found(sb))
+ if (f2fs_sb_has_lost_found(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "lost_found");
- if (f2fs_sb_has_sb_chksum(sb))
+ if (f2fs_sb_has_sb_chksum(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "sb_checksum");
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
@@ -246,6 +246,11 @@ out:
return count;
}
+ if (!strcmp(a->attr.name, "migration_granularity")) {
+ if (t == 0 || t > sbi->segs_per_sec)
+ return -EINVAL;
+ }
+
if (!strcmp(a->attr.name, "trim_sections"))
return -EINVAL;
@@ -406,6 +411,7 @@ F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, migration_granularity, migration_granularity);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
@@ -460,6 +466,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(min_hot_blocks),
ATTR_LIST(min_ssr_sections),
ATTR_LIST(max_victim_search),
+ ATTR_LIST(migration_granularity),
ATTR_LIST(dir_level),
ATTR_LIST(ram_thresh),
ATTR_LIST(ra_nid_pages),
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index c7c3c2a63a85..71b27c55988a 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -334,7 +334,7 @@ static int read_xattr_block(struct inode *inode, void *txattr_addr)
static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
unsigned int index, unsigned int len,
const char *name, struct f2fs_xattr_entry **xe,
- void **base_addr)
+ void **base_addr, int *base_size)
{
void *cur_addr, *txattr_addr, *last_addr = NULL;
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
@@ -345,8 +345,8 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
if (!size && !inline_size)
return -ENODATA;
- txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode),
- inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS);
+ *base_size = inline_size + size + XATTR_PADDING_SIZE;
+ txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS);
if (!txattr_addr)
return -ENOMEM;
@@ -358,8 +358,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
*xe = __find_inline_xattr(inode, txattr_addr, &last_addr,
index, len, name);
- if (*xe)
+ if (*xe) {
+ *base_size = inline_size;
goto check;
+ }
}
/* read from xattr node block */
@@ -461,7 +463,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
}
f2fs_wait_on_page_writeback(ipage ? ipage : in_page,
- NODE, true);
+ NODE, true, true);
/* no need to use xattr node block */
if (hsize <= inline_size) {
err = f2fs_truncate_xattr_node(inode);
@@ -485,7 +487,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
goto in_page_out;
}
f2fs_bug_on(sbi, new_nid);
- f2fs_wait_on_page_writeback(xpage, NODE, true);
+ f2fs_wait_on_page_writeback(xpage, NODE, true, true);
} else {
struct dnode_of_data dn;
set_new_dnode(&dn, inode, NULL, NULL, new_nid);
@@ -520,6 +522,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
int error = 0;
unsigned int size, len;
void *base_addr = NULL;
+ int base_size;
if (name == NULL)
return -EINVAL;
@@ -530,7 +533,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
down_read(&F2FS_I(inode)->i_xattr_sem);
error = lookup_all_xattrs(inode, ipage, index, len, name,
- &entry, &base_addr);
+ &entry, &base_addr, &base_size);
up_read(&F2FS_I(inode)->i_xattr_sem);
if (error)
return error;
@@ -544,6 +547,11 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
if (buffer) {
char *pval = entry->e_name + entry->e_name_len;
+
+ if (base_size - (pval - (char *)base_addr) < size) {
+ error = -ERANGE;
+ goto out;
+ }
memcpy(buffer, pval, size);
}
error = size;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index ef24894edecc..763fe7737065 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1720,9 +1720,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
goto next_iter;
}
if (ret == -E2BIG) {
+ n += rbm->bii - initial_bii;
rbm->bii = 0;
rbm->offset = 0;
- n += (rbm->bii - initial_bii);
goto res_covered_end_of_rgrp;
}
return ret;
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 1544f530ccd0..023e7f32ee1b 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -101,7 +101,8 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
- cancel_delayed_work_sync(&c->wbuf_dwork);
+ if (jffs2_is_writebuffered(c))
+ cancel_delayed_work_sync(&c->wbuf_dwork);
#endif
mutex_lock(&c->alloc_sem);
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 0a4457fb0711..85111d740c9d 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -345,13 +345,18 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
if (num_used
|| alloc->id1.bitmap1.i_used
|| alloc->id1.bitmap1.i_total
- || la->la_bm_off)
- mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
+ || la->la_bm_off) {
+ mlog(ML_ERROR, "inconsistent detected, clean journal with"
+ " unrecovered local alloc, please run fsck.ocfs2!\n"
"found = %u, set = %u, taken = %u, off = %u\n",
num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
le32_to_cpu(alloc->id1.bitmap1.i_total),
OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
+ status = -EINVAL;
+ goto bail;
+ }
+
osb->local_alloc_bh = alloc_bh;
osb->local_alloc_state = OCFS2_LA_ENABLED;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 161441f52ebf..015cdc615dfb 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -333,7 +333,7 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
#ifdef CONFIG_SECCOMP
seq_printf(m, "Seccomp:\t%d\n", p->seccomp.mode);
#endif
- seq_printf(m, "\nSpeculation_Store_Bypass:\t");
+ seq_printf(m, "Speculation_Store_Bypass:\t");
switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
case -EINVAL:
seq_printf(m, "unknown");
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index ecdb3baa1283..11e558efd61e 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -488,6 +488,11 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
sig ^= PERSISTENT_RAM_SIG;
if (prz->buffer->sig == sig) {
+ if (buffer_size(prz) == 0) {
+ pr_debug("found existing empty buffer\n");
+ return 0;
+ }
+
if (buffer_size(prz) > prz->buffer_size ||
buffer_start(prz) > buffer_size(prz))
pr_info("found existing invalid buffer, size %zu, start %zu\n",
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index fb9636cc927c..5d8d12746e6e 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -528,7 +528,14 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
if (args->flags & ATTR_CREATE)
return retval;
retval = xfs_attr_shortform_remove(args);
- ASSERT(retval == 0);
+ if (retval)
+ return retval;
+ /*
+ * Since we have removed the old attr, clear ATTR_REPLACE so
+ * that the leaf format add routine won't trip over the attr
+ * not being around.
+ */
+ args->flags &= ~ATTR_REPLACE;
}
if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index de1414ada5a1..a6296c2ffd93 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -137,6 +137,7 @@ struct bdi_writeback {
struct backing_dev_info {
struct list_head bdi_list;
unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
+ unsigned long io_pages; /* max allowed IO size */
unsigned int capabilities; /* Device capabilities */
congested_fn *congested_fn; /* Function pointer if device is md/dm */
void *congested_data; /* Pointer to aux data for congested func */
@@ -227,6 +228,14 @@ static inline void wb_get(struct bdi_writeback *wb)
*/
static inline void wb_put(struct bdi_writeback *wb)
{
+ if (WARN_ON_ONCE(!wb->bdi)) {
+ /*
+ * A driver bug might cause a file to be removed before bdi was
+ * initialized.
+ */
+ return;
+ }
+
if (wb != &wb->bdi->wb)
percpu_ref_put(&wb->refcnt);
}
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index 519b6e2d769e..661e5c2a8e2a 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -16,6 +16,10 @@
struct hlist_head name[1 << (bits)] = \
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)] __read_mostly = \
+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
#define DECLARE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)]
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 48849acf34ff..49648aa63ee3 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -185,6 +185,9 @@ struct mmc_host_ops {
int (*notify_load)(struct mmc_host *, enum mmc_load);
void (*notify_halt)(struct mmc_host *mmc, bool halt);
void (*force_err_irq)(struct mmc_host *host, u64 errmask);
+ int (*check_temp)(struct mmc_host *host);
+ int (*reg_temp_callback)(struct mmc_host *host);
+ int (*dereg_temp_callback)(struct mmc_host *host);
};
struct mmc_card;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index cc0fc712bb82..a8ac3f25b4ec 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -290,9 +290,12 @@ struct svc_rqst {
struct svc_cacherep * rq_cacherep; /* cache info */
struct task_struct *rq_task; /* service thread */
spinlock_t rq_lock; /* per-request lock */
+ struct net *rq_bc_net; /* pointer to backchannel's
+ * net namespace
+ */
};
-#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net)
+#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
/*
* Rigorous type checking on sockaddr type conversions
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index e5ce8ab0b8b0..26c155bb639b 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -70,7 +70,7 @@ struct virtio_config_ops {
int (*find_vqs)(struct virtio_device *, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
- const char *names[]);
+ const char * const names[]);
void (*del_vqs)(struct virtio_device *);
u64 (*get_features)(struct virtio_device *vdev);
int (*finalize_features)(struct virtio_device *vdev);
@@ -149,6 +149,19 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
return __virtio_test_bit(vdev, fbit);
}
+/**
+ * virtio_has_iommu_quirk - determine whether this device has the iommu quirk
+ * @vdev: the device
+ */
+static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev)
+{
+ /*
+ * Note the reverse polarity of the quirk feature (compared to most
+ * other features), this is for compatibility with legacy systems.
+ */
+ return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
+}
+
static inline
struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
vq_callback_t *c, const char *n)
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
new file mode 100644
index 000000000000..584f9a647ad4
--- /dev/null
+++ b/include/linux/virtio_vsock.h
@@ -0,0 +1,157 @@
+#ifndef _LINUX_VIRTIO_VSOCK_H
+#define _LINUX_VIRTIO_VSOCK_H
+
+#include <uapi/linux/virtio_vsock.h>
+#include <linux/socket.h>
+#include <net/sock.h>
+#include <net/af_vsock.h>
+
+#define VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE 128
+#define VIRTIO_VSOCK_DEFAULT_BUF_SIZE (1024 * 256)
+#define VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE (1024 * 256)
+#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4)
+#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
+#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
+
+enum {
+ VSOCK_VQ_RX = 0, /* for host to guest data */
+ VSOCK_VQ_TX = 1, /* for guest to host data */
+ VSOCK_VQ_EVENT = 2,
+ VSOCK_VQ_MAX = 3,
+};
+
+/* Per-socket state (accessed via vsk->trans) */
+struct virtio_vsock_sock {
+ struct vsock_sock *vsk;
+
+ /* Protected by lock_sock(sk_vsock(trans->vsk)) */
+ u32 buf_size;
+ u32 buf_size_min;
+ u32 buf_size_max;
+
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+
+ /* Protected by tx_lock */
+ u32 tx_cnt;
+ u32 buf_alloc;
+ u32 peer_fwd_cnt;
+ u32 peer_buf_alloc;
+
+ /* Protected by rx_lock */
+ u32 fwd_cnt;
+ u32 rx_bytes;
+ struct list_head rx_queue;
+};
+
+struct virtio_vsock_pkt {
+ struct virtio_vsock_hdr hdr;
+ struct work_struct work;
+ struct list_head list;
+ /* socket refcnt not held, only use for cancellation */
+ struct vsock_sock *vsk;
+ void *buf;
+ u32 len;
+ u32 off;
+ bool reply;
+};
+
+struct virtio_vsock_pkt_info {
+ u32 remote_cid, remote_port;
+ struct vsock_sock *vsk;
+ struct msghdr *msg;
+ u32 pkt_len;
+ u16 type;
+ u16 op;
+ u32 flags;
+ bool reply;
+};
+
+struct virtio_transport {
+ /* This must be the first field */
+ struct vsock_transport transport;
+
+ /* Takes ownership of the packet */
+ int (*send_pkt)(struct virtio_vsock_pkt *pkt);
+};
+
+ssize_t
+virtio_transport_stream_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len,
+ int type);
+int
+virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len, int flags);
+
+s64 virtio_transport_stream_has_data(struct vsock_sock *vsk);
+s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
+
+int virtio_transport_do_socket_init(struct vsock_sock *vsk,
+ struct vsock_sock *psk);
+u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk);
+u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk);
+u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk);
+void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val);
+void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val);
+void virtio_transport_set_max_buffer_size(struct vsock_sock *vs, u64 val);
+int
+virtio_transport_notify_poll_in(struct vsock_sock *vsk,
+ size_t target,
+ bool *data_ready_now);
+int
+virtio_transport_notify_poll_out(struct vsock_sock *vsk,
+ size_t target,
+ bool *space_available_now);
+
+int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
+ size_t target, struct vsock_transport_recv_notify_data *data);
+int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
+ size_t target, struct vsock_transport_recv_notify_data *data);
+int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
+ size_t target, struct vsock_transport_recv_notify_data *data);
+int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
+ size_t target, ssize_t copied, bool data_read,
+ struct vsock_transport_recv_notify_data *data);
+int virtio_transport_notify_send_init(struct vsock_sock *vsk,
+ struct vsock_transport_send_notify_data *data);
+int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
+ struct vsock_transport_send_notify_data *data);
+int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
+ struct vsock_transport_send_notify_data *data);
+int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
+ ssize_t written, struct vsock_transport_send_notify_data *data);
+
+u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk);
+bool virtio_transport_stream_is_active(struct vsock_sock *vsk);
+bool virtio_transport_stream_allow(u32 cid, u32 port);
+int virtio_transport_dgram_bind(struct vsock_sock *vsk,
+ struct sockaddr_vm *addr);
+bool virtio_transport_dgram_allow(u32 cid, u32 port);
+
+int virtio_transport_connect(struct vsock_sock *vsk);
+
+int virtio_transport_shutdown(struct vsock_sock *vsk, int mode);
+
+void virtio_transport_release(struct vsock_sock *vsk);
+
+ssize_t
+virtio_transport_stream_enqueue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len);
+int
+virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
+ struct sockaddr_vm *remote_addr,
+ struct msghdr *msg,
+ size_t len);
+
+void virtio_transport_destruct(struct vsock_sock *vsk);
+
+void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt);
+void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
+void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
+u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
+void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
+
+#endif /* _LINUX_VIRTIO_VSOCK_H */
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index f7a35fcaaaf6..f38fe1c00564 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -64,6 +64,8 @@ struct vsock_sock {
bool rejected;
struct delayed_work connect_work;
struct delayed_work pending_work;
+ struct delayed_work close_work;
+ bool close_work_scheduled;
u32 peer_shutdown;
bool sent_request;
bool ignore_connecting_rst;
@@ -98,6 +100,9 @@ struct vsock_transport {
void (*destruct)(struct vsock_sock *);
void (*release)(struct vsock_sock *);
+ /* Cancel all pending packets sent on vsock. */
+ int (*cancel_pkt)(struct vsock_sock *vsk);
+
/* Connections. */
int (*connect)(struct vsock_sock *);
@@ -165,6 +170,9 @@ static inline int vsock_core_init(const struct vsock_transport *t)
}
void vsock_core_exit(void);
+/* The transport may downcast this to access transport-specific functions */
+const struct vsock_transport *vsock_core_get_transport(void);
+
/**** UTILS ****/
void vsock_release_pending(struct sock *pending);
@@ -177,6 +185,7 @@ void vsock_remove_connected(struct vsock_sock *vsk);
struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr);
struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
struct sockaddr_vm *dst);
+void vsock_remove_sock(struct vsock_sock *vsk);
void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
#endif /* __AF_VSOCK_H__ */
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
index cf6c74550baa..86316f90ea1e 100644
--- a/include/net/gro_cells.h
+++ b/include/net/gro_cells.h
@@ -84,6 +84,7 @@ static inline void gro_cells_destroy(struct gro_cells *gcells)
for_each_possible_cpu(i) {
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
+ napi_disable(&cell->napi);
netif_napi_del(&cell->napi);
__skb_queue_purge(&cell->napi_skbs);
}
diff --git a/include/net/sock.h b/include/net/sock.h
index 86eb7caa6459..288a57e94bb4 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -299,6 +299,7 @@ struct cg_proto;
* @sk_filter: socket filtering instructions
* @sk_timer: sock cleanup timer
* @sk_stamp: time stamp of last packet received
+ * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
* @sk_tsflags: SO_TIMESTAMPING socket options
* @sk_tskey: counter to disambiguate concurrent tstamp requests
* @sk_socket: Identd and reporting IO signals
@@ -434,6 +435,9 @@ struct sock {
long sk_sndtimeo;
struct timer_list sk_timer;
ktime_t sk_stamp;
+#if BITS_PER_LONG==32
+ seqlock_t sk_stamp_seq;
+#endif
u16 sk_tsflags;
u32 sk_tskey;
struct socket *sk_socket;
@@ -2156,6 +2160,34 @@ static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
atomic_add(segs, &sk->sk_drops);
}
+static inline ktime_t sock_read_timestamp(struct sock *sk)
+{
+#if BITS_PER_LONG==32
+ unsigned int seq;
+ ktime_t kt;
+
+ do {
+ seq = read_seqbegin(&sk->sk_stamp_seq);
+ kt = sk->sk_stamp;
+ } while (read_seqretry(&sk->sk_stamp_seq, seq));
+
+ return kt;
+#else
+ return sk->sk_stamp;
+#endif
+}
+
+static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
+{
+#if BITS_PER_LONG==32
+ write_seqlock(&sk->sk_stamp_seq);
+ sk->sk_stamp = kt;
+ write_sequnlock(&sk->sk_stamp_seq);
+#else
+ sk->sk_stamp = kt;
+#endif
+}
+
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
@@ -2180,7 +2212,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
else
- sk->sk_stamp = kt;
+ sock_write_timestamp(sk, kt);
if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
__sock_recv_wifi_status(msg, sk, skb);
@@ -2200,7 +2232,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
__sock_recv_ts_and_drops(msg, sk, skb);
else
- sk->sk_stamp = skb->tstamp;
+ sock_write_timestamp(sk, skb->tstamp);
}
void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 594b4b29a224..7ef11b97cb2a 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -223,6 +223,26 @@ TRACE_EVENT(ext4_drop_inode,
(unsigned long) __entry->ino, __entry->drop)
);
+TRACE_EVENT(ext4_nfs_commit_metadata,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ ),
+
+ TP_printk("dev %d,%d ino %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino)
+);
+
TRACE_EVENT(ext4_mark_inode_dirty,
TP_PROTO(struct inode *inode, unsigned long IP),
diff --git a/include/trace/events/vsock_virtio_transport_common.h b/include/trace/events/vsock_virtio_transport_common.h
new file mode 100644
index 000000000000..b7f1d6278280
--- /dev/null
+++ b/include/trace/events/vsock_virtio_transport_common.h
@@ -0,0 +1,144 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vsock
+
+#if !defined(_TRACE_VSOCK_VIRTIO_TRANSPORT_COMMON_H) || \
+ defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VSOCK_VIRTIO_TRANSPORT_COMMON_H
+
+#include <linux/tracepoint.h>
+
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_STREAM);
+
+#define show_type(val) \
+ __print_symbolic(val, { VIRTIO_VSOCK_TYPE_STREAM, "STREAM" })
+
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_INVALID);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_REQUEST);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_RESPONSE);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_RST);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_SHUTDOWN);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_RW);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_CREDIT_UPDATE);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_CREDIT_REQUEST);
+
+#define show_op(val) \
+ __print_symbolic(val, \
+ { VIRTIO_VSOCK_OP_INVALID, "INVALID" }, \
+ { VIRTIO_VSOCK_OP_REQUEST, "REQUEST" }, \
+ { VIRTIO_VSOCK_OP_RESPONSE, "RESPONSE" }, \
+ { VIRTIO_VSOCK_OP_RST, "RST" }, \
+ { VIRTIO_VSOCK_OP_SHUTDOWN, "SHUTDOWN" }, \
+ { VIRTIO_VSOCK_OP_RW, "RW" }, \
+ { VIRTIO_VSOCK_OP_CREDIT_UPDATE, "CREDIT_UPDATE" }, \
+ { VIRTIO_VSOCK_OP_CREDIT_REQUEST, "CREDIT_REQUEST" })
+
+TRACE_EVENT(virtio_transport_alloc_pkt,
+ TP_PROTO(
+ __u32 src_cid, __u32 src_port,
+ __u32 dst_cid, __u32 dst_port,
+ __u32 len,
+ __u16 type,
+ __u16 op,
+ __u32 flags
+ ),
+ TP_ARGS(
+ src_cid, src_port,
+ dst_cid, dst_port,
+ len,
+ type,
+ op,
+ flags
+ ),
+ TP_STRUCT__entry(
+ __field(__u32, src_cid)
+ __field(__u32, src_port)
+ __field(__u32, dst_cid)
+ __field(__u32, dst_port)
+ __field(__u32, len)
+ __field(__u16, type)
+ __field(__u16, op)
+ __field(__u32, flags)
+ ),
+ TP_fast_assign(
+ __entry->src_cid = src_cid;
+ __entry->src_port = src_port;
+ __entry->dst_cid = dst_cid;
+ __entry->dst_port = dst_port;
+ __entry->len = len;
+ __entry->type = type;
+ __entry->op = op;
+ __entry->flags = flags;
+ ),
+ TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x",
+ __entry->src_cid, __entry->src_port,
+ __entry->dst_cid, __entry->dst_port,
+ __entry->len,
+ show_type(__entry->type),
+ show_op(__entry->op),
+ __entry->flags)
+);
+
+TRACE_EVENT(virtio_transport_recv_pkt,
+ TP_PROTO(
+ __u32 src_cid, __u32 src_port,
+ __u32 dst_cid, __u32 dst_port,
+ __u32 len,
+ __u16 type,
+ __u16 op,
+ __u32 flags,
+ __u32 buf_alloc,
+ __u32 fwd_cnt
+ ),
+ TP_ARGS(
+ src_cid, src_port,
+ dst_cid, dst_port,
+ len,
+ type,
+ op,
+ flags,
+ buf_alloc,
+ fwd_cnt
+ ),
+ TP_STRUCT__entry(
+ __field(__u32, src_cid)
+ __field(__u32, src_port)
+ __field(__u32, dst_cid)
+ __field(__u32, dst_port)
+ __field(__u32, len)
+ __field(__u16, type)
+ __field(__u16, op)
+ __field(__u32, flags)
+ __field(__u32, buf_alloc)
+ __field(__u32, fwd_cnt)
+ ),
+ TP_fast_assign(
+ __entry->src_cid = src_cid;
+ __entry->src_port = src_port;
+ __entry->dst_cid = dst_cid;
+ __entry->dst_port = dst_port;
+ __entry->len = len;
+ __entry->type = type;
+ __entry->op = op;
+ __entry->flags = flags;
+ __entry->buf_alloc = buf_alloc;
+ __entry->fwd_cnt = fwd_cnt;
+ ),
+ TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x "
+ "buf_alloc=%u fwd_cnt=%u",
+ __entry->src_cid, __entry->src_port,
+ __entry->dst_cid, __entry->dst_port,
+ __entry->len,
+ show_type(__entry->type),
+ show_op(__entry->op),
+ __entry->flags,
+ __entry->buf_alloc,
+ __entry->fwd_cnt)
+);
+
+#endif /* _TRACE_VSOCK_VIRTIO_TRANSPORT_COMMON_H */
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE vsock_virtio_transport_common
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index c06237170542..01269fa43e77 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -508,6 +508,7 @@ header-y += virtio_ring.h
header-y += virtio_rng.h
header-y += virtio_scsi.h
header-y += virtio_types.h
+header-y += virtio_vsock.h
header-y += vm_sockets.h
header-y += vt.h
header-y += wait.h
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 88956276c8ab..0d9bac9f9ebb 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -708,6 +708,15 @@
#define ABS_MISC 0x28
+/*
+ * 0x2e is reserved and should not be used in input drivers.
+ * It was used by HID as ABS_MISC+6 and userspace needs to detect if
+ * the next ABS_* event is correct or is just ABS_MISC + n.
+ * We define here ABS_RESERVED so userspace can rely on it and detect
+ * the situation described above.
+ */
+#define ABS_RESERVED 0x2e
+
#define ABS_MT_SLOT 0x2f /* MT slot being modified */
#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index ab3731917bac..56b7ab584cc0 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -47,6 +47,32 @@ struct vhost_vring_addr {
__u64 log_guest_addr;
};
+/* no alignment requirement */
+struct vhost_iotlb_msg {
+ __u64 iova;
+ __u64 size;
+ __u64 uaddr;
+#define VHOST_ACCESS_RO 0x1
+#define VHOST_ACCESS_WO 0x2
+#define VHOST_ACCESS_RW 0x3
+ __u8 perm;
+#define VHOST_IOTLB_MISS 1
+#define VHOST_IOTLB_UPDATE 2
+#define VHOST_IOTLB_INVALIDATE 3
+#define VHOST_IOTLB_ACCESS_FAIL 4
+ __u8 type;
+};
+
+#define VHOST_IOTLB_MSG 0x1
+
+struct vhost_msg {
+ int type;
+ union {
+ struct vhost_iotlb_msg iotlb;
+ __u8 padding[64];
+ };
+};
+
struct vhost_memory_region {
__u64 guest_phys_addr;
__u64 memory_size; /* bytes */
@@ -126,6 +152,12 @@ struct vhost_memory {
#define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
/* Set eventfd to signal an error */
#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
+/* Set busy loop timeout (in us) */
+#define VHOST_SET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x23, \
+ struct vhost_vring_state)
+/* Get busy loop timeout (in us) */
+#define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \
+ struct vhost_vring_state)
/* VHOST_NET specific defines */
@@ -140,6 +172,8 @@ struct vhost_memory {
#define VHOST_F_LOG_ALL 26
/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
#define VHOST_NET_F_VIRTIO_NET_HDR 27
+/* Vhost have device IOTLB */
+#define VHOST_F_DEVICE_IOTLB 63
/* VHOST_SCSI specific definitions */
@@ -169,4 +203,9 @@ struct vhost_scsi_target {
#define VHOST_SCSI_SET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x43, __u32)
#define VHOST_SCSI_GET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x44, __u32)
+/* VHOST_VSOCK specific defines */
+
+#define VHOST_VSOCK_SET_GUEST_CID _IOW(VHOST_VIRTIO, 0x60, __u64)
+#define VHOST_VSOCK_SET_RUNNING _IOW(VHOST_VIRTIO, 0x61, int)
+
#endif
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index c18264df9504..cf49c7e2cfdb 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -47,7 +47,7 @@
* transport being used (eg. virtio_ring), the rest are per-device feature
* bits. */
#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 33
+#define VIRTIO_TRANSPORT_F_END 34
#ifndef VIRTIO_CONFIG_NO_LEGACY
/* Do we get callbacks when the ring is completely used, even if we've
@@ -61,4 +61,12 @@
/* v1.0 compliant. */
#define VIRTIO_F_VERSION_1 32
+/*
+ * If clear - device has the IOMMU bypass quirk feature.
+ * If set - use platform tools to detect the IOMMU.
+ *
+ * Note the reverse polarity (compared to most other features),
+ * this is for compatibility with legacy systems.
+ */
+#define VIRTIO_F_IOMMU_PLATFORM 33
#endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 77925f587b15..3228d582234a 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -41,5 +41,6 @@
#define VIRTIO_ID_CAIF 12 /* Virtio caif */
#define VIRTIO_ID_GPU 16 /* virtio GPU */
#define VIRTIO_ID_INPUT 18 /* virtio input */
+#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_vsock.h b/include/uapi/linux/virtio_vsock.h
new file mode 100644
index 000000000000..1d57ed3d84d2
--- /dev/null
+++ b/include/uapi/linux/virtio_vsock.h
@@ -0,0 +1,94 @@
+/*
+ * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
+ * anyone can use the definitions to implement compatible drivers/servers:
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (C) Red Hat, Inc., 2013-2015
+ * Copyright (C) Asias He <asias@redhat.com>, 2013
+ * Copyright (C) Stefan Hajnoczi <stefanha@redhat.com>, 2015
+ */
+
+#ifndef _UAPI_LINUX_VIRTIO_VSOCK_H
+#define _UAPI_LINUX_VIRTIO_VSOCK_H
+
+#include <linux/types.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+
+struct virtio_vsock_config {
+ __le64 guest_cid;
+} __attribute__((packed));
+
+enum virtio_vsock_event_id {
+ VIRTIO_VSOCK_EVENT_TRANSPORT_RESET = 0,
+};
+
+struct virtio_vsock_event {
+ __le32 id;
+} __attribute__((packed));
+
+struct virtio_vsock_hdr {
+ __le64 src_cid;
+ __le64 dst_cid;
+ __le32 src_port;
+ __le32 dst_port;
+ __le32 len;
+ __le16 type; /* enum virtio_vsock_type */
+ __le16 op; /* enum virtio_vsock_op */
+ __le32 flags;
+ __le32 buf_alloc;
+ __le32 fwd_cnt;
+} __attribute__((packed));
+
+enum virtio_vsock_type {
+ VIRTIO_VSOCK_TYPE_STREAM = 1,
+};
+
+enum virtio_vsock_op {
+ VIRTIO_VSOCK_OP_INVALID = 0,
+
+ /* Connect operations */
+ VIRTIO_VSOCK_OP_REQUEST = 1,
+ VIRTIO_VSOCK_OP_RESPONSE = 2,
+ VIRTIO_VSOCK_OP_RST = 3,
+ VIRTIO_VSOCK_OP_SHUTDOWN = 4,
+
+ /* To send payload */
+ VIRTIO_VSOCK_OP_RW = 5,
+
+ /* Tell the peer our credit info */
+ VIRTIO_VSOCK_OP_CREDIT_UPDATE = 6,
+ /* Request the peer to send the credit info to us */
+ VIRTIO_VSOCK_OP_CREDIT_REQUEST = 7,
+};
+
+/* VIRTIO_VSOCK_OP_SHUTDOWN flags values */
+enum virtio_vsock_shutdown {
+ VIRTIO_VSOCK_SHUTDOWN_RCV = 1,
+ VIRTIO_VSOCK_SHUTDOWN_SEND = 2,
+};
+
+#endif /* _UAPI_LINUX_VIRTIO_VSOCK_H */
diff --git a/kernel/fork.c b/kernel/fork.c
index 77a21c3d02e3..25d0a60d166c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1446,8 +1446,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
posix_cpu_timers_init(p);
- p->start_time = ktime_get_ns();
- p->real_start_time = ktime_get_boot_ns();
p->io_context = NULL;
p->audit_context = NULL;
cgroup_fork(p);
@@ -1608,6 +1606,17 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_free_pid;
/*
+ * From this point on we must avoid any synchronous user-space
+ * communication until we take the tasklist-lock. In particular, we do
+ * not want user-space to be able to predict the process start-time by
+ * stalling fork(2) after we recorded the start_time but before it is
+ * visible to the system.
+ */
+
+ p->start_time = ktime_get_ns();
+ p->real_start_time = ktime_get_boot_ns();
+
+ /*
* Make it visible to the rest of the system, but dont wake it up yet.
* Need tasklist lock for parent etc handling!
*/
diff --git a/kernel/memremap.c b/kernel/memremap.c
index f719c925cb54..1be42f9b3e00 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -171,15 +171,12 @@ void *devm_memremap_pages(struct device *dev, struct resource *res)
struct page_map *page_map;
int error, nid;
- if (is_ram == REGION_MIXED) {
- WARN_ONCE(1, "%s attempted on mixed region %pr\n",
- __func__, res);
+ if (is_ram != REGION_DISJOINT) {
+ WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
+ is_ram == REGION_MIXED ? "mixed" : "ram", res);
return ERR_PTR(-ENXIO);
}
- if (is_ram == REGION_INTERSECTS)
- return __va(res->start);
-
page_map = devres_alloc_node(devm_memremap_pages_release,
sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
if (!page_map)
@@ -202,5 +199,5 @@ void *devm_memremap_pages(struct device *dev, struct resource *res)
devres_add(dev, page_map);
return __va(res->start);
}
-EXPORT_SYMBOL(devm_memremap_pages);
+EXPORT_SYMBOL_GPL(devm_memremap_pages);
#endif /* CONFIG_ZONE_DEVICE */
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 191b9eff7fc3..77712cdcd23a 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -33,6 +33,7 @@
#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <linux/compaction.h>
+#include <linux/rmap.h>
#include <asm/tlbflush.h>
@@ -1479,6 +1480,21 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
continue;
}
+ /*
+ * HWPoison pages have elevated reference counts so the migration would
+ * fail on them. It also doesn't make any sense to migrate them in the
+ * first place. Still try to unmap such a page in case it is still mapped
+ * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
+ * the unmap as the catch all safety net).
+ */
+ if (PageHWPoison(page)) {
+ if (WARN_ON(PageLRU(page)))
+ isolate_lru_page(page);
+ if (page_mapped(page))
+ try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS, NULL);
+ continue;
+ }
+
if (!get_page_unless_zero(page))
continue;
/*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 3ce079e4ed65..4087e77cd033 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2167,6 +2167,7 @@ int write_cache_pages(struct address_space *mapping,
{
int ret = 0;
int done = 0;
+ int error;
struct pagevec pvec;
int nr_pages;
pgoff_t uninitialized_var(writeback_index);
@@ -2247,25 +2248,31 @@ continue_unlock:
goto continue_unlock;
trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
- ret = (*writepage)(page, wbc, data);
- if (unlikely(ret)) {
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
+ error = (*writepage)(page, wbc, data);
+ if (unlikely(error)) {
+ /*
+ * Handle errors according to the type of
+ * writeback. There's no need to continue for
+ * background writeback. Just push done_index
+ * past this page so media errors won't choke
+ * writeout for the entire file. For integrity
+ * writeback, we must process the entire dirty
+ * set regardless of errors because the fs may
+ * still have state to clear for each page. In
+ * that case we continue processing and return
+ * the first error.
+ */
+ if (error == AOP_WRITEPAGE_ACTIVATE) {
unlock_page(page);
- ret = 0;
- } else {
- /*
- * done_index is set past this page,
- * so media errors will not choke
- * background writeout for the entire
- * file. This has consequences for
- * range_cyclic semantics (ie. it may
- * not be suitable for data integrity
- * writeout).
- */
+ error = 0;
+ } else if (wbc->sync_mode != WB_SYNC_ALL) {
+ ret = error;
done_index = page->index + 1;
done = 1;
break;
}
+ if (!ret)
+ ret = error;
}
/*
diff --git a/mm/readahead.c b/mm/readahead.c
index 72c17e77a6c7..fb1d210dbf05 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -208,12 +208,21 @@ out:
* memory at once.
*/
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
- pgoff_t offset, unsigned long nr_to_read)
+ pgoff_t offset, unsigned long nr_to_read)
{
+ struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
+ struct file_ra_state *ra = &filp->f_ra;
+ unsigned long max_pages;
+
if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
return -EINVAL;
- nr_to_read = min(nr_to_read, inode_to_bdi(mapping->host)->ra_pages);
+ /*
+ * If the request exceeds the readahead window, allow the read to
+ * be up to the optimal hardware IO size
+ */
+ max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
+ nr_to_read = min(nr_to_read, max_pages);
while (nr_to_read) {
int err;
@@ -372,10 +381,19 @@ ondemand_readahead(struct address_space *mapping,
bool hit_readahead_marker, pgoff_t offset,
unsigned long req_size)
{
- unsigned long max = ra->ra_pages;
+ struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
+ unsigned long max_pages = ra->ra_pages;
+ unsigned long add_pages;
pgoff_t prev_offset;
/*
+ * If the request exceeds the readahead window, allow the read to
+ * be up to the optimal hardware IO size
+ */
+ if (req_size > max_pages && bdi->io_pages > max_pages)
+ max_pages = min(req_size, bdi->io_pages);
+
+ /*
* start of file
*/
if (!offset)
@@ -388,7 +406,7 @@ ondemand_readahead(struct address_space *mapping,
if ((offset == (ra->start + ra->size - ra->async_size) ||
offset == (ra->start + ra->size))) {
ra->start += ra->size;
- ra->size = get_next_ra_size(ra, max);
+ ra->size = get_next_ra_size(ra, max_pages);
ra->async_size = ra->size;
goto readit;
}
@@ -403,16 +421,16 @@ ondemand_readahead(struct address_space *mapping,
pgoff_t start;
rcu_read_lock();
- start = page_cache_next_hole(mapping, offset + 1, max);
+ start = page_cache_next_hole(mapping, offset + 1, max_pages);
rcu_read_unlock();
- if (!start || start - offset > max)
+ if (!start || start - offset > max_pages)
return 0;
ra->start = start;
ra->size = start - offset; /* old async_size */
ra->size += req_size;
- ra->size = get_next_ra_size(ra, max);
+ ra->size = get_next_ra_size(ra, max_pages);
ra->async_size = ra->size;
goto readit;
}
@@ -420,7 +438,7 @@ ondemand_readahead(struct address_space *mapping,
/*
* oversize read
*/
- if (req_size > max)
+ if (req_size > max_pages)
goto initial_readahead;
/*
@@ -436,7 +454,7 @@ ondemand_readahead(struct address_space *mapping,
* Query the page cache and look for the traces(cached history pages)
* that a sequential stream would leave behind.
*/
- if (try_context_readahead(mapping, ra, offset, req_size, max))
+ if (try_context_readahead(mapping, ra, offset, req_size, max_pages))
goto readit;
/*
@@ -447,7 +465,7 @@ ondemand_readahead(struct address_space *mapping,
initial_readahead:
ra->start = offset;
- ra->size = get_init_ra_size(req_size, max);
+ ra->size = get_init_ra_size(req_size, max_pages);
ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
readit:
@@ -455,10 +473,17 @@ readit:
* Will this read hit the readahead marker made by itself?
* If so, trigger the readahead marker hit now, and merge
* the resulted next readahead window into the current one.
+ * Take care of maximum IO pages as above.
*/
if (offset == ra->start && ra->size == ra->async_size) {
- ra->async_size = get_next_ra_size(ra, max);
- ra->size += ra->async_size;
+ add_pages = get_next_ra_size(ra, max_pages);
+ if (ra->size + add_pages <= max_pages) {
+ ra->async_size = add_pages;
+ ra->size += add_pages;
+ } else {
+ ra->size = max_pages;
+ ra->async_size = max_pages >> 1;
+ }
}
return ra_submit(ra, mapping, filp);
diff --git a/mm/slab.c b/mm/slab.c
index 80ca19a122f3..6180cba0df78 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -859,8 +859,10 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
struct alien_cache *alc = NULL;
alc = kmalloc_node(memsize, gfp, node);
- init_arraycache(&alc->ac, entries, batch);
- spin_lock_init(&alc->lock);
+ if (alc) {
+ init_arraycache(&alc->ac, entries, batch);
+ spin_lock_init(&alc->lock);
+ }
return alc;
}
diff --git a/net/9p/client.c b/net/9p/client.c
index ed8738c4dc09..8fba9cd973c1 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -156,6 +156,12 @@ static int parse_opts(char *opts, struct p9_client *clnt)
ret = r;
continue;
}
+ if (option < 4096) {
+ p9_debug(P9_DEBUG_ERROR,
+ "msize should be at least 4k\n");
+ ret = -EINVAL;
+ continue;
+ }
clnt->msize = option;
break;
case Opt_trans:
@@ -972,10 +978,18 @@ static int p9_client_version(struct p9_client *c)
else if (!strncmp(version, "9P2000", 6))
c->proto_version = p9_proto_legacy;
else {
+ p9_debug(P9_DEBUG_ERROR,
+ "server returned an unknown version: %s\n", version);
err = -EREMOTEIO;
goto error;
}
+ if (msize < 4096) {
+ p9_debug(P9_DEBUG_ERROR,
+ "server returned a msize < 4096: %d\n", msize);
+ err = -EREMOTEIO;
+ goto error;
+ }
if (msize < c->msize)
c->msize = msize;
@@ -1040,6 +1054,13 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
if (clnt->msize > clnt->trans_mod->maxsize)
clnt->msize = clnt->trans_mod->maxsize;
+ if (clnt->msize < 4096) {
+ p9_debug(P9_DEBUG_ERROR,
+ "Please specify a msize of at least 4k\n");
+ err = -EINVAL;
+ goto free_client;
+ }
+
err = p9_client_version(clnt);
if (err)
goto close_trans;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 2fdebabbfacd..2772f6a13fcb 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -654,15 +654,22 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
}
- dev = dev_get_by_name(&init_net, devname);
+ rtnl_lock();
+ dev = __dev_get_by_name(&init_net, devname);
if (!dev) {
+ rtnl_unlock();
res = -ENODEV;
break;
}
ax25->ax25_dev = ax25_dev_ax25dev(dev);
+ if (!ax25->ax25_dev) {
+ rtnl_unlock();
+ res = -ENODEV;
+ break;
+ }
ax25_fillin_cb(ax25, ax25->ax25_dev);
- dev_put(dev);
+ rtnl_unlock();
break;
default:
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 3d106767b272..5faca5db6385 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -116,6 +116,7 @@ void ax25_dev_device_down(struct net_device *dev)
if ((s = ax25_dev_list) == ax25_dev) {
ax25_dev_list = s->next;
spin_unlock_bh(&ax25_dev_lock);
+ dev->ax25_ptr = NULL;
dev_put(dev);
kfree(ax25_dev);
return;
@@ -125,6 +126,7 @@ void ax25_dev_device_down(struct net_device *dev)
if (s->next == ax25_dev) {
s->next = ax25_dev->next;
spin_unlock_bh(&ax25_dev_lock);
+ dev->ax25_ptr = NULL;
dev_put(dev);
kfree(ax25_dev);
return;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 55dcb2b20b59..6def85d75b1d 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -267,7 +267,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
int ret;
- if (neigh->hh.hh_len) {
+ if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
neigh_hh_bridge(&neigh->hh, skb);
skb->dev = nf_bridge->physindev;
ret = br_handle_frame_finish(net, sk, skb);
diff --git a/net/can/gw.c b/net/can/gw.c
index 77c8af4047ef..81650affa3fa 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -418,13 +418,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
(*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
- /* check for checksum updates when the CAN frame has been modified */
+ /* Has the CAN frame been modified? */
if (modidx) {
- if (gwj->mod.csumfunc.crc8)
+ /* get available space for the processed CAN frame type */
+ int max_len = nskb->len - offsetof(struct can_frame, data);
+
+ /* dlc may have changed, make sure it fits to the CAN frame */
+ if (cf->can_dlc > max_len)
+ goto out_delete;
+
+ /* check for checksum updates in classic CAN length only */
+ if (gwj->mod.csumfunc.crc8) {
+ if (cf->can_dlc > 8)
+ goto out_delete;
+
(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
+ }
+
+ if (gwj->mod.csumfunc.xor) {
+ if (cf->can_dlc > 8)
+ goto out_delete;
- if (gwj->mod.csumfunc.xor)
(*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
+ }
}
/* clear the skb timestamp if not configured the other way */
@@ -436,6 +452,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
gwj->dropped_frames++;
else
gwj->handled_frames++;
+
+ return;
+
+ out_delete:
+ /* delete frame due to misconfiguration */
+ gwj->deleted_frames++;
+ kfree_skb(nskb);
+ return;
}
static inline int cgw_register_filter(struct cgw_job *gwj)
diff --git a/net/compat.c b/net/compat.c
index 17e97b106458..d67684010455 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -443,12 +443,14 @@ int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
err = -ENOENT;
if (!sock_flag(sk, SOCK_TIMESTAMP))
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- tv = ktime_to_timeval(sk->sk_stamp);
+ tv = ktime_to_timeval(sock_read_timestamp(sk));
+
if (tv.tv_sec == -1)
return err;
if (tv.tv_sec == 0) {
- sk->sk_stamp = ktime_get_real();
- tv = ktime_to_timeval(sk->sk_stamp);
+ ktime_t kt = ktime_get_real();
+ sock_write_timestamp(sk, kt);
+ tv = ktime_to_timeval(kt);
}
err = 0;
if (put_user(tv.tv_sec, &ctv->tv_sec) ||
@@ -471,12 +473,13 @@ int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *usersta
err = -ENOENT;
if (!sock_flag(sk, SOCK_TIMESTAMP))
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- ts = ktime_to_timespec(sk->sk_stamp);
+ ts = ktime_to_timespec(sock_read_timestamp(sk));
if (ts.tv_sec == -1)
return err;
if (ts.tv_sec == 0) {
- sk->sk_stamp = ktime_get_real();
- ts = ktime_to_timespec(sk->sk_stamp);
+ ktime_t kt = ktime_get_real();
+ sock_write_timestamp(sk, kt);
+ ts = ktime_to_timespec(kt);
}
err = 0;
if (put_user(ts.tv_sec, &ctv->tv_sec) ||
diff --git a/net/core/dev.c b/net/core/dev.c
index 4810e43501c6..01012cf847a0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -185,7 +185,7 @@ EXPORT_SYMBOL(dev_base_lock);
static DEFINE_SPINLOCK(napi_hash_lock);
static unsigned int napi_gen_id = NR_CPUS;
-static DEFINE_HASHTABLE(napi_hash, 8);
+static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5bd724d09287..2e486ab7c878 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2414,12 +2414,15 @@ EXPORT_SYMBOL(skb_queue_purge);
*/
void skb_rbtree_purge(struct rb_root *root)
{
- struct sk_buff *skb, *next;
+ struct rb_node *p = rb_first(root);
- rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode)
- kfree_skb(skb);
+ while (p) {
+ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
- *root = RB_ROOT;
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, root);
+ kfree_skb(skb);
+ }
}
/**
diff --git a/net/core/sock.c b/net/core/sock.c
index 3e666487ecc1..14dcc631f922 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -732,6 +732,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
break;
case SO_DONTROUTE:
sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
+ sk_dst_reset(sk);
break;
case SO_BROADCAST:
sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
@@ -2438,6 +2439,9 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
sk->sk_stamp = ktime_set(-1L, 0);
+#if BITS_PER_LONG==32
+ seqlock_init(&sk->sk_stamp_seq);
+#endif
#ifdef CONFIG_NET_RX_BUSY_POLL
sk->sk_napi_id = 0;
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index a10db45b2e1e..df32134da924 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -55,6 +55,9 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
const u8 *daddr = _daddr;
struct lowpan_addr_info *info;
+ if (!daddr)
+ return -EINVAL;
+
/* TODO:
* if this package isn't ipv6 one, where should it be routed?
*/
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 3f8caf7d19b8..1ea36bf778e6 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -133,19 +133,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{
+ __be16 _ports[2], *ports;
struct sockaddr_in sin;
- __be16 *ports;
- int end;
-
- end = skb_transport_offset(skb) + 4;
- if (end > 0 && !pskb_may_pull(skb, end))
- return;
/* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is
* written with this assumption in mind.
*/
- ports = (__be16 *)skb_transport_header(skb);
+ ports = skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_ports), &_ports);
+ if (!ports)
+ return;
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 8e77786549c6..1cb865fcc91b 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -66,6 +66,7 @@
#include <net/netlink.h>
#include <net/fib_rules.h>
#include <linux/netconf.h>
+#include <linux/nospec.h>
#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
#define CONFIG_IP_PIMSM 1
@@ -1574,6 +1575,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
return -EFAULT;
if (vr.vifi >= mrt->maxvif)
return -EINVAL;
+ vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
read_lock(&mrt_lock);
vif = &mrt->vif_table[vr.vifi];
if (VIF_EXISTS(mrt, vr.vifi)) {
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index e1fe8d227ef1..043c30d08220 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -308,6 +308,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* Check if the address belongs to the host. */
if (addr_type == IPV6_ADDR_MAPPED) {
+ struct net_device *dev = NULL;
int chk_addr_ret;
/* Binding to v4-mapped address on a v6-only socket
@@ -318,9 +319,20 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
}
+ rcu_read_lock();
+ if (sk->sk_bound_dev_if) {
+ dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
+ if (!dev) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+ }
+
/* Reproduce AF_INET checks to make the bindings consistent */
v4addr = addr->sin6_addr.s6_addr32[3];
- chk_addr_ret = inet_addr_type(net, v4addr);
+ chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr);
+ rcu_read_unlock();
+
if (!net->ipv4.sysctl_ip_nonlocal_bind &&
!(inet->freebind || inet->transparent) &&
v4addr != htonl(INADDR_ANY) &&
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 7a62fd9173d0..674fc3861e41 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -296,6 +296,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
iph->daddr = fl6->daddr;
+ ip6_flow_hdr(iph, 0, 0);
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
@@ -663,17 +664,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
}
if (np->rxopt.bits.rxorigdstaddr) {
struct sockaddr_in6 sin6;
- __be16 *ports;
- int end;
+ __be16 _ports[2], *ports;
- end = skb_transport_offset(skb) + 4;
- if (end <= 0 || pskb_may_pull(skb, end)) {
+ ports = skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_ports), &_ports);
+ if (ports) {
/* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is
* written with this assumption in mind.
*/
- ports = (__be16 *)skb_transport_header(skb);
-
sin6.sin6_family = AF_INET6;
sin6.sin6_addr = ipv6_hdr(skb)->daddr;
sin6.sin6_port = ports[1];
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index 14dacf1df529..30b03d8e321a 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -15,7 +15,7 @@
int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
struct socket **sockp)
{
- struct sockaddr_in6 udp6_addr;
+ struct sockaddr_in6 udp6_addr = {};
int err;
struct socket *sock = NULL;
@@ -42,6 +42,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
goto error;
if (cfg->peer_udp_port) {
+ memset(&udp6_addr, 0, sizeof(udp6_addr));
udp6_addr.sin6_family = AF_INET6;
memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
sizeof(udp6_addr.sin6_addr));
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 9b92960f024d..74b3e9718e84 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -72,6 +72,8 @@ struct mr6_table {
#endif
};
+#include <linux/nospec.h>
+
struct ip6mr_rule {
struct fib_rule common;
};
@@ -1871,6 +1873,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
return -EFAULT;
if (vr.mifi >= mrt->maxvif)
return -EINVAL;
+ vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
read_lock(&mrt_lock);
vif = &mrt->vif6_table[vr.mifi];
if (MIF_EXISTS(mrt, vr.mifi)) {
@@ -1945,6 +1948,7 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
return -EFAULT;
if (vr.mifi >= mrt->maxvif)
return -EINVAL;
+ vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
read_lock(&mrt_lock);
vif = &mrt->vif6_table[vr.mifi];
if (MIF_EXISTS(mrt, vr.mifi)) {
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index ed212ffc1d9d..046ae1caecea 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -153,7 +153,7 @@ static struct sock *nr_find_listener(ax25_address *addr)
sk_for_each(s, &nr_list)
if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
s->sk_state == TCP_LISTEN) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
s = NULL;
@@ -174,7 +174,7 @@ static struct sock *nr_find_socket(unsigned char index, unsigned char id)
struct nr_sock *nr = nr_sk(s);
if (nr->my_index == index && nr->my_id == id) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
}
@@ -198,7 +198,7 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id,
if (nr->your_index == index && nr->your_id == id &&
!ax25cmp(&nr->dest_addr, dest)) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
}
@@ -224,7 +224,7 @@ static unsigned short nr_find_next_circuit(void)
if (i != 0 && j != 0) {
if ((sk=nr_find_socket(i, j)) == NULL)
break;
- bh_unlock_sock(sk);
+ sock_put(sk);
}
id++;
@@ -918,6 +918,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
}
if (sk != NULL) {
+ bh_lock_sock(sk);
skb_reset_transport_header(skb);
if (frametype == NR_CONNACK && skb->len == 22)
@@ -927,6 +928,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
ret = nr_process_rx_frame(sk, skb);
bh_unlock_sock(sk);
+ sock_put(sk);
return ret;
}
@@ -958,10 +960,12 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
(make = nr_make_new(sk)) == NULL) {
nr_transmit_refusal(skb, 0);
if (sk)
- bh_unlock_sock(sk);
+ sock_put(sk);
return 0;
}
+ bh_lock_sock(sk);
+
window = skb->data[20];
skb->sk = make;
@@ -1014,6 +1018,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
sk->sk_data_ready(sk);
bh_unlock_sock(sk);
+ sock_put(sk);
nr_insert_socket(make);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 07668f152a3a..753b2837318d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2511,8 +2511,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
sll_addr)))
goto out;
proto = saddr->sll_protocol;
- addr = saddr->sll_addr;
+ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
+ if (addr && dev && saddr->sll_halen < dev->addr_len)
+ goto out_put;
}
err = -ENXIO;
@@ -2678,8 +2680,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
goto out;
proto = saddr->sll_protocol;
- addr = saddr->sll_addr;
+ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
+ if (addr && dev && saddr->sll_halen < dev->addr_len)
+ goto out_unlock;
}
err = -ENXIO;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 5ca8309ea7b1..9fa0b0dc3868 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -97,10 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
switch (ev) {
case NETDEV_UP:
- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
- addr->a.v6.sin6_port = 0;
addr->a.v6.sin6_addr = ifa->addr;
addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
addr->valid = 1;
@@ -411,7 +410,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
- addr->a.v6.sin6_port = 0;
addr->a.v6.sin6_addr = ifp->addr;
addr->a.v6.sin6_scope_id = dev->ifindex;
addr->valid = 1;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index dc030efa4447..9f2f3c48b7b6 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -151,7 +151,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v4.sin_family = AF_INET;
- addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
addr->valid = 1;
INIT_LIST_HEAD(&addr->list);
@@ -775,10 +774,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
switch (ev) {
case NETDEV_UP:
- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v4.sin_family = AF_INET;
- addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
addr->valid = 1;
spin_lock_bh(&net->sctp.local_addr_lock);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 036bbf2b44c1..b5291ea54a3d 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1105,7 +1105,7 @@ static int svcauth_gss_legacy_init(struct svc_rqst *rqstp,
struct kvec *resv = &rqstp->rq_res.head[0];
struct rsi *rsip, rsikey;
int ret;
- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
+ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
memset(&rsikey, 0, sizeof(rsikey));
ret = gss_read_verf(gc, argv, authp,
@@ -1216,7 +1216,7 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
uint64_t handle;
int status;
int ret;
- struct net *net = rqstp->rq_xprt->xpt_net;
+ struct net *net = SVC_NET(rqstp);
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
memset(&ud, 0, sizeof(ud));
@@ -1406,7 +1406,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
__be32 *rpcstart;
__be32 *reject_stat = resv->iov_base + resv->iov_len;
int ret;
- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
+ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",
argv->iov_len);
@@ -1694,7 +1694,7 @@ svcauth_gss_release(struct svc_rqst *rqstp)
struct rpc_gss_wire_cred *gc = &gsd->clcred;
struct xdr_buf *resbuf = &rqstp->rq_res;
int stat = -EINVAL;
- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
+ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
if (gc->gc_proc != RPC_GSS_PROC_DATA)
goto out;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 63fb5ee212cf..af17b00145e1 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -54,6 +54,11 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
h->last_refresh = now;
}
+static void cache_fresh_locked(struct cache_head *head, time_t expiry,
+ struct cache_detail *detail);
+static void cache_fresh_unlocked(struct cache_head *head,
+ struct cache_detail *detail);
+
struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
struct cache_head *key, int hash)
{
@@ -95,6 +100,7 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
if (cache_is_expired(detail, tmp)) {
hlist_del_init(&tmp->cache_list);
detail->entries --;
+ cache_fresh_locked(tmp, 0, detail);
freeme = tmp;
break;
}
@@ -110,8 +116,10 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
cache_get(new);
write_unlock(&detail->hash_lock);
- if (freeme)
+ if (freeme) {
+ cache_fresh_unlocked(freeme, detail);
cache_put(freeme, detail);
+ }
return new;
}
EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index cf5770d8f49a..c89626b2afff 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -772,6 +772,12 @@ void rpcb_getport_async(struct rpc_task *task)
case RPCBVERS_3:
map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
+ if (!map->r_addr) {
+ status = -ENOMEM;
+ dprintk("RPC: %5u %s: no memory available\n",
+ task->tk_pid, __func__);
+ goto bailout_free_args;
+ }
map->r_owner = "";
break;
case RPCBVERS_2:
@@ -794,6 +800,8 @@ void rpcb_getport_async(struct rpc_task *task)
rpc_put_task(child);
return;
+bailout_free_args:
+ kfree(map);
bailout_release_client:
rpc_release_client(rpcb_clnt);
bailout_nofree:
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index c5b0cb4f4056..41f6e964fe91 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1062,6 +1062,8 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
#endif
+extern void svc_tcp_prep_reply_hdr(struct svc_rqst *);
+
/*
* Common routine for processing the RPC request.
*/
@@ -1091,7 +1093,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
clear_bit(RQ_DROPME, &rqstp->rq_flags);
/* Setup reply header */
- rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
+ if (rqstp->rq_prot == IPPROTO_TCP)
+ svc_tcp_prep_reply_hdr(rqstp);
svc_putu32(resv, rqstp->rq_xid);
@@ -1138,7 +1141,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
case SVC_DENIED:
goto err_bad_auth;
case SVC_CLOSE:
- if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
+ if (rqstp->rq_xprt &&
+ test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
svc_close_xprt(rqstp->rq_xprt);
case SVC_DROP:
goto dropit;
@@ -1360,10 +1364,10 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
dprintk("svc: %s(%p)\n", __func__, req);
/* Build the svc_rqst used by the common processing routine */
- rqstp->rq_xprt = serv->sv_bc_xprt;
rqstp->rq_xid = req->rq_xid;
rqstp->rq_prot = req->rq_xprt->prot;
rqstp->rq_server = serv;
+ rqstp->rq_bc_net = req->rq_xprt->xprt_net;
rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 71f15da72f02..2b8e80c721db 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -454,10 +454,11 @@ out:
*/
void svc_reserve(struct svc_rqst *rqstp, int space)
{
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+
space += rqstp->rq_res.head[0].iov_len;
- if (space < rqstp->rq_reserved) {
- struct svc_xprt *xprt = rqstp->rq_xprt;
+ if (xprt && space < rqstp->rq_reserved) {
atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
rqstp->rq_reserved = space;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 1413cdcc131c..0a9fe033132c 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -614,7 +614,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
/* Don't enable netstamp, sunrpc doesn't
need that much accuracy */
}
- svsk->sk_sk->sk_stamp = skb->tstamp;
+ sock_write_timestamp(svsk->sk_sk, skb->tstamp);
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
len = skb->len - sizeof(struct udphdr);
@@ -1240,7 +1240,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
/*
* Setup response header. TCP has a 4B record length field.
*/
-static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
+void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
{
struct kvec *resv = &rqstp->rq_res.head[0];
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index f86c6555a539..e9653c42cdd1 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
return limit;
}
+static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
+{
+ return TLV_GET_LEN(tlv) - TLV_SPACE(0);
+}
+
static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
{
struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
@@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
return buf;
}
+static inline bool string_is_valid(char *s, int len)
+{
+ return memchr(s, '\0', len) ? true : false;
+}
+
static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
struct tipc_nl_compat_msg *msg,
struct sk_buff *arg)
@@ -364,6 +374,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
struct nlattr *prop;
struct nlattr *bearer;
struct tipc_bearer_config *b;
+ int len;
b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
@@ -371,6 +382,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
if (!bearer)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+ if (!string_is_valid(b->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
return -EMSGSIZE;
@@ -396,6 +411,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
{
char *name;
struct nlattr *bearer;
+ int len;
name = (char *)TLV_DATA(msg->req);
@@ -403,6 +419,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
if (!bearer)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+ if (!string_is_valid(name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
return -EMSGSIZE;
@@ -462,6 +482,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
+ int len;
nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
@@ -472,6 +493,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
NULL);
name = (char *)TLV_DATA(msg->req);
+
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(name, len))
+ return -EINVAL;
+
if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
return 0;
@@ -605,6 +631,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
struct nlattr *prop;
struct nlattr *media;
struct tipc_link_config *lc;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
@@ -612,6 +639,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
if (!media)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
return -EMSGSIZE;
@@ -632,6 +663,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
struct nlattr *prop;
struct nlattr *bearer;
struct tipc_link_config *lc;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
@@ -639,6 +671,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
if (!bearer)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
return -EMSGSIZE;
@@ -687,9 +723,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
struct tipc_link_config *lc;
struct tipc_bearer *bearer;
struct tipc_media *media;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
media = tipc_media_find(lc->name);
if (media) {
cmd->doit = &tipc_nl_media_set;
@@ -711,6 +752,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
{
char *name;
struct nlattr *link;
+ int len;
name = (char *)TLV_DATA(msg->req);
@@ -718,6 +760,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
if (!link)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
return -EMSGSIZE;
@@ -739,6 +785,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
};
ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
+ if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
+ return -EINVAL;
depth = ntohl(ntq->depth);
@@ -1117,7 +1165,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
}
len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
- if (len && !TLV_OK(msg.req, len)) {
+ if (!len || !TLV_OK(msg.req, len)) {
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
err = -EOPNOTSUPP;
goto send;
diff --git a/net/vmw_vsock/Kconfig b/net/vmw_vsock/Kconfig
index 14810abedc2e..8831e7c42167 100644
--- a/net/vmw_vsock/Kconfig
+++ b/net/vmw_vsock/Kconfig
@@ -26,3 +26,23 @@ config VMWARE_VMCI_VSOCKETS
To compile this driver as a module, choose M here: the module
will be called vmw_vsock_vmci_transport. If unsure, say N.
+
+config VIRTIO_VSOCKETS
+ tristate "virtio transport for Virtual Sockets"
+ depends on VSOCKETS && VIRTIO
+ select VIRTIO_VSOCKETS_COMMON
+ help
+ This module implements a virtio transport for Virtual Sockets.
+
+ Enable this transport if your Virtual Machine host supports Virtual
+ Sockets over virtio.
+
+ To compile this driver as a module, choose M here: the module will be
+ called vmw_vsock_virtio_transport. If unsure, say N.
+
+config VIRTIO_VSOCKETS_COMMON
+ tristate
+ help
+ This option is selected by any driver which needs to access
+ the virtio_vsock. The module will be called
+ vmw_vsock_virtio_transport_common.
diff --git a/net/vmw_vsock/Makefile b/net/vmw_vsock/Makefile
index 2ce52d70f224..bc27c70e0e59 100644
--- a/net/vmw_vsock/Makefile
+++ b/net/vmw_vsock/Makefile
@@ -1,7 +1,13 @@
obj-$(CONFIG_VSOCKETS) += vsock.o
obj-$(CONFIG_VMWARE_VMCI_VSOCKETS) += vmw_vsock_vmci_transport.o
+obj-$(CONFIG_VIRTIO_VSOCKETS) += vmw_vsock_virtio_transport.o
+obj-$(CONFIG_VIRTIO_VSOCKETS_COMMON) += vmw_vsock_virtio_transport_common.o
vsock-y += af_vsock.o vsock_addr.o
vmw_vsock_vmci_transport-y += vmci_transport.o vmci_transport_notify.o \
vmci_transport_notify_qstate.o
+
+vmw_vsock_virtio_transport-y += virtio_transport.o
+
+vmw_vsock_virtio_transport_common-y += virtio_transport_common.o
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 7f1d166ce612..7566395e526d 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -61,6 +61,14 @@
* function will also cleanup rejected sockets, those that reach the connected
* state but leave it before they have been accepted.
*
+ * - Lock ordering for pending or accept queue sockets is:
+ *
+ * lock_sock(listener);
+ * lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
+ *
+ * Using explicit nested locking keeps lockdep happy since normally only one
+ * lock of a given class may be taken at a time.
+ *
* - Sockets created by user action will be cleaned up when the user process
* calls close(2), causing our release implementation to be called. Our release
* implementation will perform some cleanup then drop the last reference so our
@@ -336,6 +344,16 @@ static bool vsock_in_connected_table(struct vsock_sock *vsk)
return ret;
}
+void vsock_remove_sock(struct vsock_sock *vsk)
+{
+ if (vsock_in_bound_table(vsk))
+ vsock_remove_bound(vsk);
+
+ if (vsock_in_connected_table(vsk))
+ vsock_remove_connected(vsk);
+}
+EXPORT_SYMBOL_GPL(vsock_remove_sock);
+
void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
{
int i;
@@ -443,10 +461,12 @@ static void vsock_pending_work(struct work_struct *work)
cleanup = true;
lock_sock(listener);
- lock_sock(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (vsock_is_pending(sk)) {
vsock_remove_pending(listener, sk);
+
+ listener->sk_ack_backlog--;
} else if (!vsk->rejected) {
/* We are not on the pending list and accept() did not reject
* us, so we must have been accepted by our user process. We
@@ -457,8 +477,6 @@ static void vsock_pending_work(struct work_struct *work)
goto out;
}
- listener->sk_ack_backlog--;
-
/* We need to remove ourself from the global connected sockets list so
* incoming packets can't find this socket, and to reduce the reference
* count.
@@ -655,12 +673,6 @@ static void __vsock_release(struct sock *sk)
vsk = vsock_sk(sk);
pending = NULL; /* Compiler warning. */
- if (vsock_in_bound_table(vsk))
- vsock_remove_bound(vsk);
-
- if (vsock_in_connected_table(vsk))
- vsock_remove_connected(vsk);
-
transport->release(vsk);
lock_sock(sk);
@@ -1092,10 +1104,19 @@ static const struct proto_ops vsock_dgram_ops = {
.sendpage = sock_no_sendpage,
};
+static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+ if (!transport->cancel_pkt)
+ return -EOPNOTSUPP;
+
+ return transport->cancel_pkt(vsk);
+}
+
static void vsock_connect_timeout(struct work_struct *work)
{
struct sock *sk;
struct vsock_sock *vsk;
+ int cancel = 0;
vsk = container_of(work, struct vsock_sock, connect_work.work);
sk = sk_vsock(vsk);
@@ -1106,8 +1127,11 @@ static void vsock_connect_timeout(struct work_struct *work)
sk->sk_state = SS_UNCONNECTED;
sk->sk_err = ETIMEDOUT;
sk->sk_error_report(sk);
+ cancel = 1;
}
release_sock(sk);
+ if (cancel)
+ vsock_transport_cancel_pkt(vsk);
sock_put(sk);
}
@@ -1212,11 +1236,13 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
err = sock_intr_errno(timeout);
sk->sk_state = SS_UNCONNECTED;
sock->state = SS_UNCONNECTED;
+ vsock_transport_cancel_pkt(vsk);
goto out_wait;
} else if (timeout == 0) {
err = -ETIMEDOUT;
sk->sk_state = SS_UNCONNECTED;
sock->state = SS_UNCONNECTED;
+ vsock_transport_cancel_pkt(vsk);
goto out_wait;
}
@@ -1293,7 +1319,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
if (connected) {
listener->sk_ack_backlog--;
- lock_sock(connected);
+ lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
vconnected = vsock_sk(connected);
/* If the listener socket has received an error, then we should
@@ -1983,7 +2009,16 @@ void vsock_core_exit(void)
}
EXPORT_SYMBOL_GPL(vsock_core_exit);
+const struct vsock_transport *vsock_core_get_transport(void)
+{
+ /* vsock_register_mutex not taken since only the transport uses this
+ * function and only while registered.
+ */
+ return transport;
+}
+EXPORT_SYMBOL_GPL(vsock_core_get_transport);
+
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Socket Family");
-MODULE_VERSION("1.0.1.0-k");
+MODULE_VERSION("1.0.2.0-k");
MODULE_LICENSE("GPL v2");
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
new file mode 100644
index 000000000000..936d7eee62d0
--- /dev/null
+++ b/net/vmw_vsock/virtio_transport.c
@@ -0,0 +1,620 @@
+/*
+ * virtio transport for vsock
+ *
+ * Copyright (C) 2013-2015 Red Hat, Inc.
+ * Author: Asias He <asias@redhat.com>
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s
+ * early virtio-vsock proof-of-concept bits.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_vsock.h>
+#include <net/sock.h>
+#include <linux/mutex.h>
+#include <net/af_vsock.h>
+
+static struct workqueue_struct *virtio_vsock_workqueue;
+static struct virtio_vsock *the_virtio_vsock;
+static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */
+
+struct virtio_vsock {
+ struct virtio_device *vdev;
+ struct virtqueue *vqs[VSOCK_VQ_MAX];
+
+ /* Virtqueue processing is deferred to a workqueue */
+ struct work_struct tx_work;
+ struct work_struct rx_work;
+ struct work_struct event_work;
+
+ /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX]
+ * must be accessed with tx_lock held.
+ */
+ struct mutex tx_lock;
+
+ struct work_struct send_pkt_work;
+ spinlock_t send_pkt_list_lock;
+ struct list_head send_pkt_list;
+
+ atomic_t queued_replies;
+
+ /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX]
+ * must be accessed with rx_lock held.
+ */
+ struct mutex rx_lock;
+ int rx_buf_nr;
+ int rx_buf_max_nr;
+
+ /* The following fields are protected by event_lock.
+ * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
+ */
+ struct mutex event_lock;
+ struct virtio_vsock_event event_list[8];
+
+ u32 guest_cid;
+};
+
+static struct virtio_vsock *virtio_vsock_get(void)
+{
+ return the_virtio_vsock;
+}
+
+static u32 virtio_transport_get_local_cid(void)
+{
+ struct virtio_vsock *vsock = virtio_vsock_get();
+
+ return vsock->guest_cid;
+}
+
+static void
+virtio_transport_send_pkt_work(struct work_struct *work)
+{
+ struct virtio_vsock *vsock =
+ container_of(work, struct virtio_vsock, send_pkt_work);
+ struct virtqueue *vq;
+ bool added = false;
+ bool restart_rx = false;
+
+ mutex_lock(&vsock->tx_lock);
+
+ vq = vsock->vqs[VSOCK_VQ_TX];
+
+ for (;;) {
+ struct virtio_vsock_pkt *pkt;
+ struct scatterlist hdr, buf, *sgs[2];
+ int ret, in_sg = 0, out_sg = 0;
+ bool reply;
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ if (list_empty(&vsock->send_pkt_list)) {
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+ break;
+ }
+
+ pkt = list_first_entry(&vsock->send_pkt_list,
+ struct virtio_vsock_pkt, list);
+ list_del_init(&pkt->list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ reply = pkt->reply;
+
+ sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
+ sgs[out_sg++] = &hdr;
+ if (pkt->buf) {
+ sg_init_one(&buf, pkt->buf, pkt->len);
+ sgs[out_sg++] = &buf;
+ }
+
+ ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
+ /* Usually this means that there is no more space available in
+ * the vq
+ */
+ if (ret < 0) {
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+ break;
+ }
+
+ if (reply) {
+ struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
+ int val;
+
+ val = atomic_dec_return(&vsock->queued_replies);
+
+ /* Do we now have resources to resume rx processing? */
+ if (val + 1 == virtqueue_get_vring_size(rx_vq))
+ restart_rx = true;
+ }
+
+ added = true;
+ }
+
+ if (added)
+ virtqueue_kick(vq);
+
+ mutex_unlock(&vsock->tx_lock);
+
+ if (restart_rx)
+ queue_work(virtio_vsock_workqueue, &vsock->rx_work);
+}
+
+static int
+virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
+{
+ struct virtio_vsock *vsock;
+ int len = pkt->len;
+
+ vsock = virtio_vsock_get();
+ if (!vsock) {
+ virtio_transport_free_pkt(pkt);
+ return -ENODEV;
+ }
+
+ if (pkt->reply)
+ atomic_inc(&vsock->queued_replies);
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add_tail(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+ return len;
+}
+
+static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
+{
+ int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
+ struct virtio_vsock_pkt *pkt;
+ struct scatterlist hdr, buf, *sgs[2];
+ struct virtqueue *vq;
+ int ret;
+
+ vq = vsock->vqs[VSOCK_VQ_RX];
+
+ do {
+ pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ break;
+
+ pkt->buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!pkt->buf) {
+ virtio_transport_free_pkt(pkt);
+ break;
+ }
+
+ pkt->len = buf_len;
+
+ sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
+ sgs[0] = &hdr;
+
+ sg_init_one(&buf, pkt->buf, buf_len);
+ sgs[1] = &buf;
+ ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
+ if (ret) {
+ virtio_transport_free_pkt(pkt);
+ break;
+ }
+ vsock->rx_buf_nr++;
+ } while (vq->num_free);
+ if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
+ vsock->rx_buf_max_nr = vsock->rx_buf_nr;
+ virtqueue_kick(vq);
+}
+
+static void virtio_transport_tx_work(struct work_struct *work)
+{
+ struct virtio_vsock *vsock =
+ container_of(work, struct virtio_vsock, tx_work);
+ struct virtqueue *vq;
+ bool added = false;
+
+ vq = vsock->vqs[VSOCK_VQ_TX];
+ mutex_lock(&vsock->tx_lock);
+ do {
+ struct virtio_vsock_pkt *pkt;
+ unsigned int len;
+
+ virtqueue_disable_cb(vq);
+ while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) {
+ virtio_transport_free_pkt(pkt);
+ added = true;
+ }
+ } while (!virtqueue_enable_cb(vq));
+ mutex_unlock(&vsock->tx_lock);
+
+ if (added)
+ queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+}
+
+/* Is there space left for replies to rx packets? */
+static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
+{
+ struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX];
+ int val;
+
+ smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
+ val = atomic_read(&vsock->queued_replies);
+
+ return val < virtqueue_get_vring_size(vq);
+}
+
+static void virtio_transport_rx_work(struct work_struct *work)
+{
+ struct virtio_vsock *vsock =
+ container_of(work, struct virtio_vsock, rx_work);
+ struct virtqueue *vq;
+
+ vq = vsock->vqs[VSOCK_VQ_RX];
+
+ mutex_lock(&vsock->rx_lock);
+
+ do {
+ virtqueue_disable_cb(vq);
+ for (;;) {
+ struct virtio_vsock_pkt *pkt;
+ unsigned int len;
+
+ if (!virtio_transport_more_replies(vsock)) {
+ /* Stop rx until the device processes already
+ * pending replies. Leave rx virtqueue
+ * callbacks disabled.
+ */
+ goto out;
+ }
+
+ pkt = virtqueue_get_buf(vq, &len);
+ if (!pkt) {
+ break;
+ }
+
+ vsock->rx_buf_nr--;
+
+ /* Drop short/long packets */
+ if (unlikely(len < sizeof(pkt->hdr) ||
+ len > sizeof(pkt->hdr) + pkt->len)) {
+ virtio_transport_free_pkt(pkt);
+ continue;
+ }
+
+ pkt->len = len - sizeof(pkt->hdr);
+ virtio_transport_recv_pkt(pkt);
+ }
+ } while (!virtqueue_enable_cb(vq));
+
+out:
+ if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
+ virtio_vsock_rx_fill(vsock);
+ mutex_unlock(&vsock->rx_lock);
+}
+
+/* event_lock must be held */
+static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
+ struct virtio_vsock_event *event)
+{
+ struct scatterlist sg;
+ struct virtqueue *vq;
+
+ vq = vsock->vqs[VSOCK_VQ_EVENT];
+
+ sg_init_one(&sg, event, sizeof(*event));
+
+ return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL);
+}
+
+/* event_lock must be held */
+static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) {
+ struct virtio_vsock_event *event = &vsock->event_list[i];
+
+ virtio_vsock_event_fill_one(vsock, event);
+ }
+
+ virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
+}
+
+static void virtio_vsock_reset_sock(struct sock *sk)
+{
+ lock_sock(sk);
+ sk->sk_state = SS_UNCONNECTED;
+ sk->sk_err = ECONNRESET;
+ sk->sk_error_report(sk);
+ release_sock(sk);
+}
+
+static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
+{
+ struct virtio_device *vdev = vsock->vdev;
+ u64 guest_cid;
+
+ vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid),
+ &guest_cid, sizeof(guest_cid));
+ vsock->guest_cid = le64_to_cpu(guest_cid);
+}
+
+/* event_lock must be held */
+static void virtio_vsock_event_handle(struct virtio_vsock *vsock,
+ struct virtio_vsock_event *event)
+{
+ switch (le32_to_cpu(event->id)) {
+ case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET:
+ virtio_vsock_update_guest_cid(vsock);
+ vsock_for_each_connected_socket(virtio_vsock_reset_sock);
+ break;
+ }
+}
+
+static void virtio_transport_event_work(struct work_struct *work)
+{
+ struct virtio_vsock *vsock =
+ container_of(work, struct virtio_vsock, event_work);
+ struct virtqueue *vq;
+
+ vq = vsock->vqs[VSOCK_VQ_EVENT];
+
+ mutex_lock(&vsock->event_lock);
+
+ do {
+ struct virtio_vsock_event *event;
+ unsigned int len;
+
+ virtqueue_disable_cb(vq);
+ while ((event = virtqueue_get_buf(vq, &len)) != NULL) {
+ if (len == sizeof(*event))
+ virtio_vsock_event_handle(vsock, event);
+
+ virtio_vsock_event_fill_one(vsock, event);
+ }
+ } while (!virtqueue_enable_cb(vq));
+
+ virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
+
+ mutex_unlock(&vsock->event_lock);
+}
+
+static void virtio_vsock_event_done(struct virtqueue *vq)
+{
+ struct virtio_vsock *vsock = vq->vdev->priv;
+
+ if (!vsock)
+ return;
+ queue_work(virtio_vsock_workqueue, &vsock->event_work);
+}
+
+static void virtio_vsock_tx_done(struct virtqueue *vq)
+{
+ struct virtio_vsock *vsock = vq->vdev->priv;
+
+ if (!vsock)
+ return;
+ queue_work(virtio_vsock_workqueue, &vsock->tx_work);
+}
+
+static void virtio_vsock_rx_done(struct virtqueue *vq)
+{
+ struct virtio_vsock *vsock = vq->vdev->priv;
+
+ if (!vsock)
+ return;
+ queue_work(virtio_vsock_workqueue, &vsock->rx_work);
+}
+
+static struct virtio_transport virtio_transport = {
+ .transport = {
+ .get_local_cid = virtio_transport_get_local_cid,
+
+ .init = virtio_transport_do_socket_init,
+ .destruct = virtio_transport_destruct,
+ .release = virtio_transport_release,
+ .connect = virtio_transport_connect,
+ .shutdown = virtio_transport_shutdown,
+
+ .dgram_bind = virtio_transport_dgram_bind,
+ .dgram_dequeue = virtio_transport_dgram_dequeue,
+ .dgram_enqueue = virtio_transport_dgram_enqueue,
+ .dgram_allow = virtio_transport_dgram_allow,
+
+ .stream_dequeue = virtio_transport_stream_dequeue,
+ .stream_enqueue = virtio_transport_stream_enqueue,
+ .stream_has_data = virtio_transport_stream_has_data,
+ .stream_has_space = virtio_transport_stream_has_space,
+ .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
+ .stream_is_active = virtio_transport_stream_is_active,
+ .stream_allow = virtio_transport_stream_allow,
+
+ .notify_poll_in = virtio_transport_notify_poll_in,
+ .notify_poll_out = virtio_transport_notify_poll_out,
+ .notify_recv_init = virtio_transport_notify_recv_init,
+ .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
+ .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
+ .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
+ .notify_send_init = virtio_transport_notify_send_init,
+ .notify_send_pre_block = virtio_transport_notify_send_pre_block,
+ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
+
+ .set_buffer_size = virtio_transport_set_buffer_size,
+ .set_min_buffer_size = virtio_transport_set_min_buffer_size,
+ .set_max_buffer_size = virtio_transport_set_max_buffer_size,
+ .get_buffer_size = virtio_transport_get_buffer_size,
+ .get_min_buffer_size = virtio_transport_get_min_buffer_size,
+ .get_max_buffer_size = virtio_transport_get_max_buffer_size,
+ },
+
+ .send_pkt = virtio_transport_send_pkt,
+};
+
+static int virtio_vsock_probe(struct virtio_device *vdev)
+{
+ vq_callback_t *callbacks[] = {
+ virtio_vsock_rx_done,
+ virtio_vsock_tx_done,
+ virtio_vsock_event_done,
+ };
+ static const char * const names[] = {
+ "rx",
+ "tx",
+ "event",
+ };
+ struct virtio_vsock *vsock = NULL;
+ int ret;
+
+ ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
+ if (ret)
+ return ret;
+
+ /* Only one virtio-vsock device per guest is supported */
+ if (the_virtio_vsock) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
+ if (!vsock) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ vsock->vdev = vdev;
+
+ ret = vsock->vdev->config->find_vqs(vsock->vdev, VSOCK_VQ_MAX,
+ vsock->vqs, callbacks, names);
+ if (ret < 0)
+ goto out;
+
+ virtio_vsock_update_guest_cid(vsock);
+
+ ret = vsock_core_init(&virtio_transport.transport);
+ if (ret < 0)
+ goto out_vqs;
+
+ vsock->rx_buf_nr = 0;
+ vsock->rx_buf_max_nr = 0;
+ atomic_set(&vsock->queued_replies, 0);
+
+ vdev->priv = vsock;
+ the_virtio_vsock = vsock;
+ mutex_init(&vsock->tx_lock);
+ mutex_init(&vsock->rx_lock);
+ mutex_init(&vsock->event_lock);
+ spin_lock_init(&vsock->send_pkt_list_lock);
+ INIT_LIST_HEAD(&vsock->send_pkt_list);
+ INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
+ INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
+ INIT_WORK(&vsock->event_work, virtio_transport_event_work);
+ INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
+
+ mutex_lock(&vsock->rx_lock);
+ virtio_vsock_rx_fill(vsock);
+ mutex_unlock(&vsock->rx_lock);
+
+ mutex_lock(&vsock->event_lock);
+ virtio_vsock_event_fill(vsock);
+ mutex_unlock(&vsock->event_lock);
+
+ mutex_unlock(&the_virtio_vsock_mutex);
+ return 0;
+
+out_vqs:
+ vsock->vdev->config->del_vqs(vsock->vdev);
+out:
+ kfree(vsock);
+ mutex_unlock(&the_virtio_vsock_mutex);
+ return ret;
+}
+
+static void virtio_vsock_remove(struct virtio_device *vdev)
+{
+ struct virtio_vsock *vsock = vdev->priv;
+ struct virtio_vsock_pkt *pkt;
+
+ flush_work(&vsock->rx_work);
+ flush_work(&vsock->tx_work);
+ flush_work(&vsock->event_work);
+ flush_work(&vsock->send_pkt_work);
+
+ vdev->config->reset(vdev);
+
+ mutex_lock(&vsock->rx_lock);
+ while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
+ virtio_transport_free_pkt(pkt);
+ mutex_unlock(&vsock->rx_lock);
+
+ mutex_lock(&vsock->tx_lock);
+ while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
+ virtio_transport_free_pkt(pkt);
+ mutex_unlock(&vsock->tx_lock);
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ while (!list_empty(&vsock->send_pkt_list)) {
+ pkt = list_first_entry(&vsock->send_pkt_list,
+ struct virtio_vsock_pkt, list);
+ list_del(&pkt->list);
+ virtio_transport_free_pkt(pkt);
+ }
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ mutex_lock(&the_virtio_vsock_mutex);
+ the_virtio_vsock = NULL;
+ vsock_core_exit();
+ mutex_unlock(&the_virtio_vsock_mutex);
+
+ vdev->config->del_vqs(vdev);
+
+ kfree(vsock);
+}
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static unsigned int features[] = {
+};
+
+static struct virtio_driver virtio_vsock_driver = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtio_vsock_probe,
+ .remove = virtio_vsock_remove,
+};
+
+static int __init virtio_vsock_init(void)
+{
+ int ret;
+
+ virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
+ if (!virtio_vsock_workqueue)
+ return -ENOMEM;
+ ret = register_virtio_driver(&virtio_vsock_driver);
+ if (ret)
+ destroy_workqueue(virtio_vsock_workqueue);
+ return ret;
+}
+
+static void __exit virtio_vsock_exit(void)
+{
+ unregister_virtio_driver(&virtio_vsock_driver);
+ destroy_workqueue(virtio_vsock_workqueue);
+}
+
+module_init(virtio_vsock_init);
+module_exit(virtio_vsock_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Asias He");
+MODULE_DESCRIPTION("virtio transport for vsock");
+MODULE_DEVICE_TABLE(virtio, id_table);
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
new file mode 100644
index 000000000000..9c07c76c504d
--- /dev/null
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -0,0 +1,999 @@
+/*
+ * common code for virtio vsock
+ *
+ * Copyright (C) 2013-2015 Red Hat, Inc.
+ * Author: Asias He <asias@redhat.com>
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_vsock.h>
+
+#include <net/sock.h>
+#include <net/af_vsock.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/vsock_virtio_transport_common.h>
+
+/* How long to wait for graceful shutdown of a connection */
+#define VSOCK_CLOSE_TIMEOUT (8 * HZ)
+
+static const struct virtio_transport *virtio_transport_get_ops(void)
+{
+ const struct vsock_transport *t = vsock_core_get_transport();
+
+ return container_of(t, struct virtio_transport, transport);
+}
+
+struct virtio_vsock_pkt *
+virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
+ size_t len,
+ u32 src_cid,
+ u32 src_port,
+ u32 dst_cid,
+ u32 dst_port)
+{
+ struct virtio_vsock_pkt *pkt;
+ int err;
+
+ pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ return NULL;
+
+ pkt->hdr.type = cpu_to_le16(info->type);
+ pkt->hdr.op = cpu_to_le16(info->op);
+ pkt->hdr.src_cid = cpu_to_le64(src_cid);
+ pkt->hdr.dst_cid = cpu_to_le64(dst_cid);
+ pkt->hdr.src_port = cpu_to_le32(src_port);
+ pkt->hdr.dst_port = cpu_to_le32(dst_port);
+ pkt->hdr.flags = cpu_to_le32(info->flags);
+ pkt->len = len;
+ pkt->hdr.len = cpu_to_le32(len);
+ pkt->reply = info->reply;
+ pkt->vsk = info->vsk;
+
+ if (info->msg && len > 0) {
+ pkt->buf = kmalloc(len, GFP_KERNEL);
+ if (!pkt->buf)
+ goto out_pkt;
+ err = memcpy_from_msg(pkt->buf, info->msg, len);
+ if (err)
+ goto out;
+ }
+
+ trace_virtio_transport_alloc_pkt(src_cid, src_port,
+ dst_cid, dst_port,
+ len,
+ info->type,
+ info->op,
+ info->flags);
+
+ return pkt;
+
+out:
+ kfree(pkt->buf);
+out_pkt:
+ kfree(pkt);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_alloc_pkt);
+
+static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
+ struct virtio_vsock_pkt_info *info)
+{
+ u32 src_cid, src_port, dst_cid, dst_port;
+ struct virtio_vsock_sock *vvs;
+ struct virtio_vsock_pkt *pkt;
+ u32 pkt_len = info->pkt_len;
+
+ src_cid = vm_sockets_get_local_cid();
+ src_port = vsk->local_addr.svm_port;
+ if (!info->remote_cid) {
+ dst_cid = vsk->remote_addr.svm_cid;
+ dst_port = vsk->remote_addr.svm_port;
+ } else {
+ dst_cid = info->remote_cid;
+ dst_port = info->remote_port;
+ }
+
+ vvs = vsk->trans;
+
+ /* we can send less than pkt_len bytes */
+ if (pkt_len > VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE)
+ pkt_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
+
+ /* virtio_transport_get_credit might return less than pkt_len credit */
+ pkt_len = virtio_transport_get_credit(vvs, pkt_len);
+
+ /* Do not send zero length OP_RW pkt */
+ if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
+ return pkt_len;
+
+ pkt = virtio_transport_alloc_pkt(info, pkt_len,
+ src_cid, src_port,
+ dst_cid, dst_port);
+ if (!pkt) {
+ virtio_transport_put_credit(vvs, pkt_len);
+ return -ENOMEM;
+ }
+
+ virtio_transport_inc_tx_pkt(vvs, pkt);
+
+ return virtio_transport_get_ops()->send_pkt(pkt);
+}
+
+static void virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
+ struct virtio_vsock_pkt *pkt)
+{
+ vvs->rx_bytes += pkt->len;
+}
+
+static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
+ struct virtio_vsock_pkt *pkt)
+{
+ vvs->rx_bytes -= pkt->len;
+ vvs->fwd_cnt += pkt->len;
+}
+
+void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt)
+{
+ spin_lock_bh(&vvs->tx_lock);
+ pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
+ pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc);
+ spin_unlock_bh(&vvs->tx_lock);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
+
+u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
+{
+ u32 ret;
+
+ spin_lock_bh(&vvs->tx_lock);
+ ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
+ if (ret > credit)
+ ret = credit;
+ vvs->tx_cnt += ret;
+ spin_unlock_bh(&vvs->tx_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_get_credit);
+
+void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
+{
+ spin_lock_bh(&vvs->tx_lock);
+ vvs->tx_cnt -= credit;
+ spin_unlock_bh(&vvs->tx_lock);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
+
+static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
+ int type,
+ struct virtio_vsock_hdr *hdr)
+{
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
+ .type = type,
+ .vsk = vsk,
+ };
+
+ return virtio_transport_send_pkt_info(vsk, &info);
+}
+
+static ssize_t
+virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ struct virtio_vsock_pkt *pkt;
+ size_t bytes, total = 0;
+ int err = -EFAULT;
+
+ spin_lock_bh(&vvs->rx_lock);
+ while (total < len && !list_empty(&vvs->rx_queue)) {
+ pkt = list_first_entry(&vvs->rx_queue,
+ struct virtio_vsock_pkt, list);
+
+ bytes = len - total;
+ if (bytes > pkt->len - pkt->off)
+ bytes = pkt->len - pkt->off;
+
+ /* sk_lock is held by caller so no one else can dequeue.
+ * Unlock rx_lock since memcpy_to_msg() may sleep.
+ */
+ spin_unlock_bh(&vvs->rx_lock);
+
+ err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes);
+ if (err)
+ goto out;
+
+ spin_lock_bh(&vvs->rx_lock);
+
+ total += bytes;
+ pkt->off += bytes;
+ if (pkt->off == pkt->len) {
+ virtio_transport_dec_rx_pkt(vvs, pkt);
+ list_del(&pkt->list);
+ virtio_transport_free_pkt(pkt);
+ }
+ }
+ spin_unlock_bh(&vvs->rx_lock);
+
+ /* Send a credit pkt to peer */
+ virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
+ NULL);
+
+ return total;
+
+out:
+ if (total)
+ err = total;
+ return err;
+}
+
+ssize_t
+virtio_transport_stream_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len, int flags)
+{
+ if (flags & MSG_PEEK)
+ return -EOPNOTSUPP;
+
+ return virtio_transport_stream_do_dequeue(vsk, msg, len);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
+
+int
+virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len, int flags)
+{
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue);
+
+s64 virtio_transport_stream_has_data(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ s64 bytes;
+
+ spin_lock_bh(&vvs->rx_lock);
+ bytes = vvs->rx_bytes;
+ spin_unlock_bh(&vvs->rx_lock);
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data);
+
+static s64 virtio_transport_has_space(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ s64 bytes;
+
+ bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
+ if (bytes < 0)
+ bytes = 0;
+
+ return bytes;
+}
+
+s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ s64 bytes;
+
+ spin_lock_bh(&vvs->tx_lock);
+ bytes = virtio_transport_has_space(vsk);
+ spin_unlock_bh(&vvs->tx_lock);
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space);
+
+int virtio_transport_do_socket_init(struct vsock_sock *vsk,
+ struct vsock_sock *psk)
+{
+ struct virtio_vsock_sock *vvs;
+
+ vvs = kzalloc(sizeof(*vvs), GFP_KERNEL);
+ if (!vvs)
+ return -ENOMEM;
+
+ vsk->trans = vvs;
+ vvs->vsk = vsk;
+ if (psk) {
+ struct virtio_vsock_sock *ptrans = psk->trans;
+
+ vvs->buf_size = ptrans->buf_size;
+ vvs->buf_size_min = ptrans->buf_size_min;
+ vvs->buf_size_max = ptrans->buf_size_max;
+ vvs->peer_buf_alloc = ptrans->peer_buf_alloc;
+ } else {
+ vvs->buf_size = VIRTIO_VSOCK_DEFAULT_BUF_SIZE;
+ vvs->buf_size_min = VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE;
+ vvs->buf_size_max = VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE;
+ }
+
+ vvs->buf_alloc = vvs->buf_size;
+
+ spin_lock_init(&vvs->rx_lock);
+ spin_lock_init(&vvs->tx_lock);
+ INIT_LIST_HEAD(&vvs->rx_queue);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
+
+u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ return vvs->buf_size;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_get_buffer_size);
+
+u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ return vvs->buf_size_min;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_get_min_buffer_size);
+
+u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ return vvs->buf_size_max;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_get_max_buffer_size);
+
+void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
+ val = VIRTIO_VSOCK_MAX_BUF_SIZE;
+ if (val < vvs->buf_size_min)
+ vvs->buf_size_min = val;
+ if (val > vvs->buf_size_max)
+ vvs->buf_size_max = val;
+ vvs->buf_size = val;
+ vvs->buf_alloc = val;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_set_buffer_size);
+
+void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
+ val = VIRTIO_VSOCK_MAX_BUF_SIZE;
+ if (val > vvs->buf_size)
+ vvs->buf_size = val;
+ vvs->buf_size_min = val;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_set_min_buffer_size);
+
+void virtio_transport_set_max_buffer_size(struct vsock_sock *vsk, u64 val)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
+ val = VIRTIO_VSOCK_MAX_BUF_SIZE;
+ if (val < vvs->buf_size)
+ vvs->buf_size = val;
+ vvs->buf_size_max = val;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_set_max_buffer_size);
+
+int
+virtio_transport_notify_poll_in(struct vsock_sock *vsk,
+ size_t target,
+ bool *data_ready_now)
+{
+ if (vsock_stream_has_data(vsk))
+ *data_ready_now = true;
+ else
+ *data_ready_now = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in);
+
+int
+virtio_transport_notify_poll_out(struct vsock_sock *vsk,
+ size_t target,
+ bool *space_avail_now)
+{
+ s64 free_space;
+
+ free_space = vsock_stream_has_space(vsk);
+ if (free_space > 0)
+ *space_avail_now = true;
+ else if (free_space == 0)
+ *space_avail_now = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out);
+
+int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
+ size_t target, struct vsock_transport_recv_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init);
+
+int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
+ size_t target, struct vsock_transport_recv_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block);
+
+int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
+ size_t target, struct vsock_transport_recv_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue);
+
+int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
+ size_t target, ssize_t copied, bool data_read,
+ struct vsock_transport_recv_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue);
+
+int virtio_transport_notify_send_init(struct vsock_sock *vsk,
+ struct vsock_transport_send_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init);
+
+int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
+ struct vsock_transport_send_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block);
+
+int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
+ struct vsock_transport_send_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue);
+
+int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
+ ssize_t written, struct vsock_transport_send_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue);
+
+u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ return vvs->buf_size;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat);
+
+bool virtio_transport_stream_is_active(struct vsock_sock *vsk)
+{
+ return true;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active);
+
+bool virtio_transport_stream_allow(u32 cid, u32 port)
+{
+ return true;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_stream_allow);
+
+int virtio_transport_dgram_bind(struct vsock_sock *vsk,
+ struct sockaddr_vm *addr)
+{
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind);
+
+bool virtio_transport_dgram_allow(u32 cid, u32 port)
+{
+ return false;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow);
+
+int virtio_transport_connect(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_REQUEST,
+ .type = VIRTIO_VSOCK_TYPE_STREAM,
+ .vsk = vsk,
+ };
+
+ return virtio_transport_send_pkt_info(vsk, &info);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_connect);
+
+int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
+{
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_SHUTDOWN,
+ .type = VIRTIO_VSOCK_TYPE_STREAM,
+ .flags = (mode & RCV_SHUTDOWN ?
+ VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
+ (mode & SEND_SHUTDOWN ?
+ VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
+ .vsk = vsk,
+ };
+
+ return virtio_transport_send_pkt_info(vsk, &info);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_shutdown);
+
+int
+virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
+ struct sockaddr_vm *remote_addr,
+ struct msghdr *msg,
+ size_t dgram_len)
+{
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue);
+
+ssize_t
+virtio_transport_stream_enqueue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len)
+{
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_RW,
+ .type = VIRTIO_VSOCK_TYPE_STREAM,
+ .msg = msg,
+ .pkt_len = len,
+ .vsk = vsk,
+ };
+
+ return virtio_transport_send_pkt_info(vsk, &info);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue);
+
+void virtio_transport_destruct(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ kfree(vvs);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_destruct);
+
+static int virtio_transport_reset(struct vsock_sock *vsk,
+ struct virtio_vsock_pkt *pkt)
+{
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_RST,
+ .type = VIRTIO_VSOCK_TYPE_STREAM,
+ .reply = !!pkt,
+ .vsk = vsk,
+ };
+
+ /* Send RST only if the original pkt is not a RST pkt */
+ if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
+ return 0;
+
+ return virtio_transport_send_pkt_info(vsk, &info);
+}
+
+/* Normally packets are associated with a socket. There may be no socket if an
+ * attempt was made to connect to a socket that does not exist.
+ */
+static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
+{
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_RST,
+ .type = le16_to_cpu(pkt->hdr.type),
+ .reply = true,
+ };
+
+ /* Send RST only if the original pkt is not a RST pkt */
+ if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
+ return 0;
+
+ pkt = virtio_transport_alloc_pkt(&info, 0,
+ le64_to_cpu(pkt->hdr.dst_cid),
+ le32_to_cpu(pkt->hdr.dst_port),
+ le64_to_cpu(pkt->hdr.src_cid),
+ le32_to_cpu(pkt->hdr.src_port));
+ if (!pkt)
+ return -ENOMEM;
+
+ return virtio_transport_get_ops()->send_pkt(pkt);
+}
+
+static void virtio_transport_wait_close(struct sock *sk, long timeout)
+{
+ if (timeout) {
+ DEFINE_WAIT(wait);
+
+ do {
+ prepare_to_wait(sk_sleep(sk), &wait,
+ TASK_INTERRUPTIBLE);
+ if (sk_wait_event(sk, &timeout,
+ sock_flag(sk, SOCK_DONE)))
+ break;
+ } while (!signal_pending(current) && timeout);
+
+ finish_wait(sk_sleep(sk), &wait);
+ }
+}
+
+static void virtio_transport_do_close(struct vsock_sock *vsk,
+ bool cancel_timeout)
+{
+ struct sock *sk = sk_vsock(vsk);
+
+ sock_set_flag(sk, SOCK_DONE);
+ vsk->peer_shutdown = SHUTDOWN_MASK;
+ if (vsock_stream_has_data(vsk) <= 0)
+ sk->sk_state = SS_DISCONNECTING;
+ sk->sk_state_change(sk);
+
+ if (vsk->close_work_scheduled &&
+ (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
+ vsk->close_work_scheduled = false;
+
+ vsock_remove_sock(vsk);
+
+ /* Release refcnt obtained when we scheduled the timeout */
+ sock_put(sk);
+ }
+}
+
+static void virtio_transport_close_timeout(struct work_struct *work)
+{
+ struct vsock_sock *vsk =
+ container_of(work, struct vsock_sock, close_work.work);
+ struct sock *sk = sk_vsock(vsk);
+
+ sock_hold(sk);
+ lock_sock(sk);
+
+ if (!sock_flag(sk, SOCK_DONE)) {
+ (void)virtio_transport_reset(vsk, NULL);
+
+ virtio_transport_do_close(vsk, false);
+ }
+
+ vsk->close_work_scheduled = false;
+
+ release_sock(sk);
+ sock_put(sk);
+}
+
+/* User context, vsk->sk is locked */
+static bool virtio_transport_close(struct vsock_sock *vsk)
+{
+ struct sock *sk = &vsk->sk;
+
+ if (!(sk->sk_state == SS_CONNECTED ||
+ sk->sk_state == SS_DISCONNECTING))
+ return true;
+
+ /* Already received SHUTDOWN from peer, reply with RST */
+ if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) {
+ (void)virtio_transport_reset(vsk, NULL);
+ return true;
+ }
+
+ if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
+ (void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK);
+
+ if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING))
+ virtio_transport_wait_close(sk, sk->sk_lingertime);
+
+ if (sock_flag(sk, SOCK_DONE)) {
+ return true;
+ }
+
+ sock_hold(sk);
+ INIT_DELAYED_WORK(&vsk->close_work,
+ virtio_transport_close_timeout);
+ vsk->close_work_scheduled = true;
+ schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT);
+ return false;
+}
+
+void virtio_transport_release(struct vsock_sock *vsk)
+{
+ struct sock *sk = &vsk->sk;
+ bool remove_sock = true;
+
+ lock_sock(sk);
+ if (sk->sk_type == SOCK_STREAM)
+ remove_sock = virtio_transport_close(vsk);
+ release_sock(sk);
+
+ if (remove_sock)
+ vsock_remove_sock(vsk);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_release);
+
+static int
+virtio_transport_recv_connecting(struct sock *sk,
+ struct virtio_vsock_pkt *pkt)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+ int err;
+ int skerr;
+
+ switch (le16_to_cpu(pkt->hdr.op)) {
+ case VIRTIO_VSOCK_OP_RESPONSE:
+ sk->sk_state = SS_CONNECTED;
+ sk->sk_socket->state = SS_CONNECTED;
+ vsock_insert_connected(vsk);
+ sk->sk_state_change(sk);
+ break;
+ case VIRTIO_VSOCK_OP_INVALID:
+ break;
+ case VIRTIO_VSOCK_OP_RST:
+ skerr = ECONNRESET;
+ err = 0;
+ goto destroy;
+ default:
+ skerr = EPROTO;
+ err = -EINVAL;
+ goto destroy;
+ }
+ return 0;
+
+destroy:
+ virtio_transport_reset(vsk, pkt);
+ sk->sk_state = SS_UNCONNECTED;
+ sk->sk_err = skerr;
+ sk->sk_error_report(sk);
+ return err;
+}
+
+static int
+virtio_transport_recv_connected(struct sock *sk,
+ struct virtio_vsock_pkt *pkt)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ int err = 0;
+
+ switch (le16_to_cpu(pkt->hdr.op)) {
+ case VIRTIO_VSOCK_OP_RW:
+ pkt->len = le32_to_cpu(pkt->hdr.len);
+ pkt->off = 0;
+
+ spin_lock_bh(&vvs->rx_lock);
+ virtio_transport_inc_rx_pkt(vvs, pkt);
+ list_add_tail(&pkt->list, &vvs->rx_queue);
+ spin_unlock_bh(&vvs->rx_lock);
+
+ sk->sk_data_ready(sk);
+ return err;
+ case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
+ sk->sk_write_space(sk);
+ break;
+ case VIRTIO_VSOCK_OP_SHUTDOWN:
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
+ vsk->peer_shutdown |= RCV_SHUTDOWN;
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
+ vsk->peer_shutdown |= SEND_SHUTDOWN;
+ if (vsk->peer_shutdown == SHUTDOWN_MASK &&
+ vsock_stream_has_data(vsk) <= 0)
+ sk->sk_state = SS_DISCONNECTING;
+ if (le32_to_cpu(pkt->hdr.flags))
+ sk->sk_state_change(sk);
+ break;
+ case VIRTIO_VSOCK_OP_RST:
+ virtio_transport_do_close(vsk, true);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ virtio_transport_free_pkt(pkt);
+ return err;
+}
+
+static void
+virtio_transport_recv_disconnecting(struct sock *sk,
+ struct virtio_vsock_pkt *pkt)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+
+ if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
+ virtio_transport_do_close(vsk, true);
+}
+
+static int
+virtio_transport_send_response(struct vsock_sock *vsk,
+ struct virtio_vsock_pkt *pkt)
+{
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_RESPONSE,
+ .type = VIRTIO_VSOCK_TYPE_STREAM,
+ .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
+ .remote_port = le32_to_cpu(pkt->hdr.src_port),
+ .reply = true,
+ .vsk = vsk,
+ };
+
+ return virtio_transport_send_pkt_info(vsk, &info);
+}
+
+/* Handle server socket */
+static int
+virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+ struct vsock_sock *vchild;
+ struct sock *child;
+
+ if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
+ virtio_transport_reset(vsk, pkt);
+ return -EINVAL;
+ }
+
+ if (sk_acceptq_is_full(sk)) {
+ virtio_transport_reset(vsk, pkt);
+ return -ENOMEM;
+ }
+
+ child = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
+ sk->sk_type, 0);
+ if (!child) {
+ virtio_transport_reset(vsk, pkt);
+ return -ENOMEM;
+ }
+
+ sk->sk_ack_backlog++;
+
+ lock_sock_nested(child, SINGLE_DEPTH_NESTING);
+
+ child->sk_state = SS_CONNECTED;
+
+ vchild = vsock_sk(child);
+ vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
+ le32_to_cpu(pkt->hdr.dst_port));
+ vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
+ le32_to_cpu(pkt->hdr.src_port));
+
+ vsock_insert_connected(vchild);
+ vsock_enqueue_accept(sk, child);
+ virtio_transport_send_response(vchild, pkt);
+
+ release_sock(child);
+
+ sk->sk_data_ready(sk);
+ return 0;
+}
+
+static bool virtio_transport_space_update(struct sock *sk,
+ struct virtio_vsock_pkt *pkt)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ bool space_available;
+
+ /* buf_alloc and fwd_cnt is always included in the hdr */
+ spin_lock_bh(&vvs->tx_lock);
+ vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
+ vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
+ space_available = virtio_transport_has_space(vsk);
+ spin_unlock_bh(&vvs->tx_lock);
+ return space_available;
+}
+
+/* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
+ * lock.
+ */
+void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
+{
+ struct sockaddr_vm src, dst;
+ struct vsock_sock *vsk;
+ struct sock *sk;
+ bool space_available;
+
+ vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
+ le32_to_cpu(pkt->hdr.src_port));
+ vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
+ le32_to_cpu(pkt->hdr.dst_port));
+
+ trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
+ dst.svm_cid, dst.svm_port,
+ le32_to_cpu(pkt->hdr.len),
+ le16_to_cpu(pkt->hdr.type),
+ le16_to_cpu(pkt->hdr.op),
+ le32_to_cpu(pkt->hdr.flags),
+ le32_to_cpu(pkt->hdr.buf_alloc),
+ le32_to_cpu(pkt->hdr.fwd_cnt));
+
+ if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
+ (void)virtio_transport_reset_no_sock(pkt);
+ goto free_pkt;
+ }
+
+ /* The socket must be in connected or bound table
+ * otherwise send reset back
+ */
+ sk = vsock_find_connected_socket(&src, &dst);
+ if (!sk) {
+ sk = vsock_find_bound_socket(&dst);
+ if (!sk) {
+ (void)virtio_transport_reset_no_sock(pkt);
+ goto free_pkt;
+ }
+ }
+
+ vsk = vsock_sk(sk);
+
+ space_available = virtio_transport_space_update(sk, pkt);
+
+ lock_sock(sk);
+
+ /* Update CID in case it has changed after a transport reset event */
+ vsk->local_addr.svm_cid = dst.svm_cid;
+
+ if (space_available)
+ sk->sk_write_space(sk);
+
+ switch (sk->sk_state) {
+ case VSOCK_SS_LISTEN:
+ virtio_transport_recv_listen(sk, pkt);
+ virtio_transport_free_pkt(pkt);
+ break;
+ case SS_CONNECTING:
+ virtio_transport_recv_connecting(sk, pkt);
+ virtio_transport_free_pkt(pkt);
+ break;
+ case SS_CONNECTED:
+ virtio_transport_recv_connected(sk, pkt);
+ break;
+ case SS_DISCONNECTING:
+ virtio_transport_recv_disconnecting(sk, pkt);
+ virtio_transport_free_pkt(pkt);
+ break;
+ default:
+ virtio_transport_free_pkt(pkt);
+ break;
+ }
+ release_sock(sk);
+
+ /* Release refcnt obtained when we fetched this socket out of the
+ * bound or connected list.
+ */
+ sock_put(sk);
+ return;
+
+free_pkt:
+ virtio_transport_free_pkt(pkt);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
+
+void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
+{
+ kfree(pkt->buf);
+ kfree(pkt);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Asias He");
+MODULE_DESCRIPTION("common code for virtio vsock");
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 589c8b9908a5..008f3424dcbc 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -273,6 +273,31 @@ vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src,
}
static int
+vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src,
+ struct sockaddr_vm *dst,
+ enum vmci_transport_packet_type type,
+ u64 size,
+ u64 mode,
+ struct vmci_transport_waiting_info *wait,
+ u16 proto,
+ struct vmci_handle handle)
+{
+ struct vmci_transport_packet *pkt;
+ int err;
+
+ pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
+ mode, wait, proto, handle,
+ true);
+ kfree(pkt);
+
+ return err;
+}
+
+static int
vmci_transport_send_control_pkt(struct sock *sk,
enum vmci_transport_packet_type type,
u64 size,
@@ -281,9 +306,7 @@ vmci_transport_send_control_pkt(struct sock *sk,
u16 proto,
struct vmci_handle handle)
{
- struct vmci_transport_packet *pkt;
struct vsock_sock *vsk;
- int err;
vsk = vsock_sk(sk);
@@ -293,17 +316,10 @@ vmci_transport_send_control_pkt(struct sock *sk,
if (!vsock_addr_bound(&vsk->remote_addr))
return -EINVAL;
- pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return -ENOMEM;
-
- err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr,
- &vsk->remote_addr, type, size,
- mode, wait, proto, handle,
- true);
- kfree(pkt);
-
- return err;
+ return vmci_transport_alloc_send_control_pkt(&vsk->local_addr,
+ &vsk->remote_addr,
+ type, size, mode,
+ wait, proto, handle);
}
static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
@@ -321,12 +337,29 @@ static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
static int vmci_transport_send_reset(struct sock *sk,
struct vmci_transport_packet *pkt)
{
+ struct sockaddr_vm *dst_ptr;
+ struct sockaddr_vm dst;
+ struct vsock_sock *vsk;
+
if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
return 0;
- return vmci_transport_send_control_pkt(sk,
- VMCI_TRANSPORT_PACKET_TYPE_RST,
- 0, 0, NULL, VSOCK_PROTO_INVALID,
- VMCI_INVALID_HANDLE);
+
+ vsk = vsock_sk(sk);
+
+ if (!vsock_addr_bound(&vsk->local_addr))
+ return -EINVAL;
+
+ if (vsock_addr_bound(&vsk->remote_addr)) {
+ dst_ptr = &vsk->remote_addr;
+ } else {
+ vsock_addr_init(&dst, pkt->dg.src.context,
+ pkt->src_port);
+ dst_ptr = &dst;
+ }
+ return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr,
+ VMCI_TRANSPORT_PACKET_TYPE_RST,
+ 0, 0, NULL, VSOCK_PROTO_INVALID,
+ VMCI_INVALID_HANDLE);
}
static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
@@ -1643,6 +1676,8 @@ static void vmci_transport_destruct(struct vsock_sock *vsk)
static void vmci_transport_release(struct vsock_sock *vsk)
{
+ vsock_remove_sock(vsk);
+
if (!vmci_handle_is_invalid(vmci_trans(vsk)->dg_handle)) {
vmci_datagram_destroy_handle(vmci_trans(vsk)->dg_handle);
vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
@@ -1734,11 +1769,8 @@ static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
/* Retrieve the head sk_buff from the socket's receive queue. */
err = 0;
skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
- if (err)
- return err;
-
if (!skb)
- return -EAGAIN;
+ return err;
dg = (struct vmci_datagram *)skb->data;
if (!dg)
@@ -2053,7 +2085,7 @@ static u32 vmci_transport_get_local_cid(void)
return vmci_get_context_id();
}
-static struct vsock_transport vmci_transport = {
+static const struct vsock_transport vmci_transport = {
.init = vmci_transport_socket_init,
.destruct = vmci_transport_destruct,
.release = vmci_transport_release,
@@ -2153,7 +2185,7 @@ module_exit(vmci_transport_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
-MODULE_VERSION("1.0.3.0-k");
+MODULE_VERSION("1.0.4.0-k");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("vmware_vsock");
MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 99e013f3a88d..2e48d0b1ddd0 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -623,7 +623,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
{
spin_lock_bh(&net->xfrm.xfrm_state_lock);
si->sadcnt = net->xfrm.state_num;
- si->sadhcnt = net->xfrm.state_hmask;
+ si->sadhcnt = net->xfrm.state_hmask + 1;
si->sadhmcnt = xfrm_state_hashmax;
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
}
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index dd8397894d5c..12a6940741fe 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -46,8 +46,8 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
$xs = "[0-9a-f ]"; # hex character or space
$funcre = qr/^$x* <(.*)>:$/;
if ($arch eq 'aarch64') {
- #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp,#-80]!
- $re = qr/^.*stp.*sp,\#-([0-9]{1,8})\]\!/o;
+ #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]!
+ $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
} elsif ($arch eq 'arm') {
#c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64
$re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
index c410d257da06..0c7800112ff5 100644
--- a/scripts/kconfig/zconf.l
+++ b/scripts/kconfig/zconf.l
@@ -71,7 +71,7 @@ static void warn_ignored_character(char chr)
{
fprintf(stderr,
"%s:%d:warning: ignoring unsupported character '%c'\n",
- zconf_curname(), zconf_lineno(), chr);
+ current_file->name, yylineno, chr);
}
%}
@@ -191,6 +191,8 @@ n [A-Za-z0-9_-]
}
<<EOF>> {
BEGIN(INITIAL);
+ yylval.string = text;
+ return T_WORD_QUOTE;
}
}
diff --git a/security/security.c b/security/security.c
index b636dd5caa77..e60d560e45f8 100644
--- a/security/security.c
+++ b/security/security.c
@@ -870,6 +870,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
void security_cred_free(struct cred *cred)
{
+ /*
+ * There is a failure case in prepare_creds() that
+ * may result in a call here with ->security being NULL.
+ */
+ if (unlikely(cred->security == NULL))
+ return;
+
call_void_hook(cred_free, cred);
}
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 992a31530825..965a55eacaba 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -726,7 +726,8 @@ static int sens_destroy(void *key, void *datum, void *p)
kfree(key);
if (datum) {
levdatum = datum;
- ebitmap_destroy(&levdatum->level->cat);
+ if (levdatum->level)
+ ebitmap_destroy(&levdatum->level->cat);
kfree(levdatum->level);
}
kfree(datum);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index cb6ed10816d4..0a8808954bd8 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -288,7 +288,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
break;
case YAMA_SCOPE_RELATIONAL:
rcu_read_lock();
- if (!task_is_descendant(current, child) &&
+ if (!pid_alive(child))
+ rc = -EPERM;
+ if (!rc && !task_is_descendant(current, child) &&
!ptracer_exception_found(current, child) &&
!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
rc = -EPERM;
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index d79a04e703dc..8239ebc9349c 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -25,6 +25,7 @@
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/device.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/minors.h>
#include <sound/pcm.h>
@@ -125,6 +126,7 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
return -EFAULT;
if (stream < 0 || stream > 1)
return -EINVAL;
+ stream = array_index_nospec(stream, 2);
if (get_user(subdevice, &info->subdevice))
return -EFAULT;
mutex_lock(&register_mutex);
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 091290d1f3ea..1898fa4228ad 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -382,7 +382,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
/* Apogee Electronics, DA/AD/DD-16X (X-FireWire card) */
SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00010048, &spec_normal),
/* Apogee Electronics, Ensemble */
- SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00001eee, &spec_normal),
+ SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x01eeee, &spec_normal),
/* ESI, Quatafire610 */
SND_BEBOB_DEV_ENTRY(VEN_ESI, 0x00010064, &spec_normal),
/* AcousticReality, eARMasterOne */
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index d2951ed4bf71..1984291ebd07 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -899,6 +899,9 @@ int cs46xx_dsp_proc_done (struct snd_cs46xx *chip)
struct dsp_spos_instance * ins = chip->dsp_spos_instance;
int i;
+ if (!ins)
+ return 0;
+
snd_info_free_entry(ins->proc_sym_info_entry);
ins->proc_sym_info_entry = NULL;
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 50b216fc369f..5d422d65e62b 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -36,6 +36,7 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/moduleparam.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/tlv.h>
@@ -1000,6 +1001,8 @@ static int snd_emu10k1_ipcm_poke(struct snd_emu10k1 *emu,
if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
return -EINVAL;
+ ipcm->substream = array_index_nospec(ipcm->substream,
+ EMU10K1_FX8010_PCM_COUNT);
if (ipcm->channels > 32)
return -EINVAL;
pcm = &emu->fx8010.pcm[ipcm->substream];
@@ -1046,6 +1049,8 @@ static int snd_emu10k1_ipcm_peek(struct snd_emu10k1 *emu,
if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
return -EINVAL;
+ ipcm->substream = array_index_nospec(ipcm->substream,
+ EMU10K1_FX8010_PCM_COUNT);
pcm = &emu->fx8010.pcm[ipcm->substream];
mutex_lock(&emu->fx8010.lock);
spin_lock_irq(&emu->reg_lock);
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 17fd81736d3d..039fbbb1e53c 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -249,10 +249,12 @@ static int hda_tegra_suspend(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
+ struct hdac_bus *bus = azx_bus(chip);
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
azx_stop_chip(chip);
+ synchronize_irq(bus->irq);
azx_enter_link_reset(chip);
hda_tegra_disable_clocks(hda);
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index aea3cc2abe3a..536184ac315d 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -853,6 +853,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0467e5ba82e0..5d8ac2d798df 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4792,6 +4792,13 @@ static void alc280_fixup_hp_9480m(struct hda_codec *codec,
}
}
+static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
+ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
+}
+
/* for hda_fixup_thinkpad_acpi() */
#include "thinkpad_helper.c"
@@ -4891,6 +4898,7 @@ enum {
ALC293_FIXUP_LENOVO_SPK_NOISE,
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
ALC255_FIXUP_DELL_SPK_NOISE,
+ ALC225_FIXUP_DISABLE_MIC_VREF,
ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC295_FIXUP_DISABLE_DAC3,
ALC280_FIXUP_HP_HEADSET_MIC,
@@ -5546,6 +5554,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
},
+ [ALC225_FIXUP_DISABLE_MIC_VREF] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_disable_mic_vref,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+ },
[ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@@ -5555,7 +5569,7 @@ static const struct hda_fixup alc269_fixups[] = {
{}
},
.chained = true,
- .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+ .chain_id = ALC225_FIXUP_DISABLE_MIC_VREF
},
[ALC280_FIXUP_HP_HEADSET_MIC] = {
.type = HDA_FIXUP_FUNC,
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 7c8941b8b2de..dd6c9e6a1d53 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -30,6 +30,7 @@
#include <linux/math64.h>
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -4065,15 +4066,16 @@ static int snd_hdsp_channel_info(struct snd_pcm_substream *substream,
struct snd_pcm_channel_info *info)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
- int mapped_channel;
+ unsigned int channel = info->channel;
- if (snd_BUG_ON(info->channel >= hdsp->max_channels))
+ if (snd_BUG_ON(channel >= hdsp->max_channels))
return -EINVAL;
+ channel = array_index_nospec(channel, hdsp->max_channels);
- if ((mapped_channel = hdsp->channel_map[info->channel]) < 0)
+ if (hdsp->channel_map[channel] < 0)
return -EINVAL;
- info->offset = mapped_channel * HDSP_CHANNEL_BUFFER_BYTES;
+ info->offset = hdsp->channel_map[channel] * HDSP_CHANNEL_BUFFER_BYTES;
info->first = 0;
info->step = 32;
return 0;
diff --git a/sound/synth/emux/emux_hwdep.c b/sound/synth/emux/emux_hwdep.c
index e557946718a9..d9fcae071b47 100644
--- a/sound/synth/emux/emux_hwdep.c
+++ b/sound/synth/emux/emux_hwdep.c
@@ -22,9 +22,9 @@
#include <sound/core.h>
#include <sound/hwdep.h>
#include <linux/uaccess.h>
+#include <linux/nospec.h>
#include "emux_voice.h"
-
#define TMP_CLIENT_ID 0x1001
/*
@@ -66,13 +66,16 @@ snd_emux_hwdep_misc_mode(struct snd_emux *emu, void __user *arg)
return -EFAULT;
if (info.mode < 0 || info.mode >= EMUX_MD_END)
return -EINVAL;
+ info.mode = array_index_nospec(info.mode, EMUX_MD_END);
if (info.port < 0) {
for (i = 0; i < emu->num_ports; i++)
emu->portptrs[i]->ctrls[info.mode] = info.value;
} else {
- if (info.port < emu->num_ports)
+ if (info.port < emu->num_ports) {
+ info.port = array_index_nospec(info.port, emu->num_ports);
emu->portptrs[info.port]->ctrls[info.mode] = info.value;
+ }
}
return 0;
}
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 81e7d4717194..80d7a380fade 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -2080,7 +2080,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
char *name)
{
struct uac_processing_unit_descriptor *desc = raw_desc;
- int num_ins = desc->bNrInPins;
+ int num_ins;
struct usb_mixer_elem_info *cval;
struct snd_kcontrol *kctl;
int i, err, nameid, type, len;
@@ -2095,7 +2095,13 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
0, NULL, default_value_info
};
- if (desc->bLength < 13 || desc->bLength < 13 + num_ins ||
+ if (desc->bLength < 13) {
+ usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
+ return -EINVAL;
+ }
+
+ num_ins = desc->bNrInPins;
+ if (desc->bLength < 13 + num_ins ||
desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
return -EINVAL;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 15cbe2565703..d32727c74a16 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3321,6 +3321,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
}
}
},
+ {
+ .ifnum = -1
+ },
}
}
},
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index de3965c4e4aa..15e8e0ec28a5 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -473,10 +473,21 @@ static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
struct perf_evsel *evsel)
{
int err;
+ char c;
if (!evsel)
return 0;
+ /*
+ * If supported, force pass-through config term (pt=1) even if user
+ * sets pt=0, which avoids senseless kernel errors.
+ */
+ if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
+ !(evsel->attr.config & 1)) {
+ pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
+ evsel->attr.config |= 1;
+ }
+
err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
"cyc_thresh", "caps/psb_cyc",
evsel->attr.config);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 6e825dbaddea..4a7e0132294f 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1936,7 +1936,7 @@ restart:
if (!name_only && strlen(syms->alias))
snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
else
- strncpy(name, syms->symbol, MAX_NAME_LEN);
+ strlcpy(name, syms->symbol, MAX_NAME_LEN);
evt_list[evt_i] = strdup(name);
if (evt_list[evt_i] == NULL)
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 593066c68e3d..4f650ebd564a 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -100,7 +100,7 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *
char path[PATH_MAX];
const char *lc;
- snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -147,7 +147,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
ssize_t sret;
int fd;
- snprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -177,7 +177,7 @@ perf_pmu__parse_per_pkg(struct perf_pmu_alias *alias, char *dir, char *name)
char path[PATH_MAX];
int fd;
- snprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -195,7 +195,7 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
char path[PATH_MAX];
int fd;
- snprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index eec6c1149f44..132878d4847a 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -333,7 +333,7 @@ static char *cpu_model(void)
if (file) {
while (fgets(buf, 255, file)) {
if (strstr(buf, "model name")) {
- strncpy(cpu_m, &buf[13], 255);
+ strlcpy(cpu_m, &buf[13], 255);
break;
}
}
diff --git a/tools/virtio/linux/dma-mapping.h b/tools/virtio/linux/dma-mapping.h
new file mode 100644
index 000000000000..4f93af89ae16
--- /dev/null
+++ b/tools/virtio/linux/dma-mapping.h
@@ -0,0 +1,17 @@
+#ifndef _LINUX_DMA_MAPPING_H
+#define _LINUX_DMA_MAPPING_H
+
+#ifdef CONFIG_HAS_DMA
+# error Virtio userspace code does not support CONFIG_HAS_DMA
+#endif
+
+#define PCI_DMA_BUS_IS_PHYS 1
+
+enum dma_data_direction {
+ DMA_BIDIRECTIONAL = 0,
+ DMA_TO_DEVICE = 1,
+ DMA_FROM_DEVICE = 2,
+ DMA_NONE = 3,
+};
+
+#endif