summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--android/configs/android-base.cfg2
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi9
-rw-r--r--arch/arm/boot/dts/qcom/msm8998.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/sdm630.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi84
-rw-r--r--arch/arm/boot/dts/qcom/sdm660.dtsi4
-rw-r--r--arch/arm/kvm/init.S5
-rw-r--r--arch/arm/kvm/mmu.c3
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h13
-rw-r--r--arch/arm64/include/asm/barrier.h18
-rw-r--r--arch/arm64/include/asm/uaccess.h8
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c3
-rw-r--r--arch/arm64/kernel/entry.S6
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c1
-rw-r--r--arch/arm64/kernel/topology.c7
-rw-r--r--arch/arm64/mm/dma-mapping.c27
-rw-r--r--arch/powerpc/include/asm/topology.h14
-rw-r--r--arch/powerpc/kernel/eeh_driver.c19
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c2
-rw-r--r--arch/sparc/Kconfig4
-rw-r--r--arch/sparc/include/asm/mmu_64.h2
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h32
-rw-r--r--arch/sparc/include/asm/pgtable_32.h4
-rw-r--r--arch/sparc/include/asm/pil.h1
-rw-r--r--arch/sparc/include/asm/setup.h2
-rw-r--r--arch/sparc/include/asm/vio.h1
-rw-r--r--arch/sparc/kernel/irq_64.c17
-rw-r--r--arch/sparc/kernel/kernel.h1
-rw-r--r--arch/sparc/kernel/smp_64.c31
-rw-r--r--arch/sparc/kernel/tsb.S11
-rw-r--r--arch/sparc/kernel/ttable_64.S2
-rw-r--r--arch/sparc/kernel/vio.c68
-rw-r--r--arch/sparc/mm/init_32.c2
-rw-r--r--arch/sparc/mm/init_64.c86
-rw-r--r--arch/sparc/mm/tsb.c7
-rw-r--r--arch/sparc/mm/ultra.S5
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kvm/cpuid.c20
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/kvm/mmu.h1
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--crypto/gcm.c6
-rw-r--r--drivers/char/diag/diag_masks.c3
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c6
-rw-r--r--drivers/char/random.c4
-rw-r--r--drivers/clk/qcom/clk-cpu-osm.c24
-rw-r--r--drivers/cpufreq/Kconfig26
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/cpufreq.c33
-rw-r--r--drivers/cpufreq/cpufreq_governor_attr_set.c84
-rw-r--r--drivers/dma/ep93xx_dma.c2
-rw-r--r--drivers/dma/sh/usb-dmac.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c18
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c39
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c16
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c54
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_snapshot.c30
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c89
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h23
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c172
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h18
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c63
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h9
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c54
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c69
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h16
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c7
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/cik.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c21
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c6
-rw-r--r--drivers/gpu/msm/adreno_dispatch.h2
-rw-r--r--drivers/gpu/msm/kgsl.c49
-rw-r--r--drivers/gpu/msm/kgsl.h5
-rw-r--r--drivers/gpu/msm/kgsl_events.c8
-rw-r--r--drivers/hid/uhid.c17
-rw-r--r--drivers/hid/wacom_wac.c45
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c25
-rw-r--r--drivers/iio/light/ltr501.c4
-rw-r--r--drivers/iio/proximity/as3935.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c4
-rw-r--r--drivers/input/mouse/elantech.c16
-rw-r--r--drivers/media/platform/msm/ais/camera/camera.c15
-rw-r--r--drivers/media/platform/msm/ais/common/cam_hw_ops.c4
-rw-r--r--drivers/media/platform/msm/ais/common/cam_smmu_api.c2
-rw-r--r--drivers/media/platform/msm/ais/common/cam_smmu_api.h4
-rw-r--r--drivers/media/platform/msm/ais/common/cam_soc_api.c4
-rw-r--r--drivers/media/platform/msm/ais/common/msm_camera_io_util.c10
-rw-r--r--drivers/media/platform/msm/ais/common/msm_camera_io_util.h2
-rw-r--r--drivers/media/platform/msm/ais/fd/msm_fd_dev.c19
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_buf_mgr.c4
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp.c17
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp47.c1
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp47.h4
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h16
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c8
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_stats_util.h2
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_util.c18
-rw-r--r--drivers/media/platform/msm/ais/msm.c16
-rw-r--r--drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.c10
-rw-r--r--drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c2
-rw-r--r--drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c17
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_0_hwreg.h4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_2_hwreg.h4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_0_hwreg.h4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_1_hwreg.h4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_2_hwreg.h4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_1_hwreg.h4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_2_hwreg.h4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_3_hwreg.h4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_1_hwreg.h4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_6_0_hwreg.h4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/msm_csid.c17
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h2
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h2
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h2
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h2
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h2
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h4
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c6
-rw-r--r--drivers/media/platform/msm/ais/sensor/flash/msm_flash.c18
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_cci_i2c.c11
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.c2
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.h3
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_qup_i2c.c22
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.c16
-rw-r--r--drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.h8
-rw-r--r--drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.c4
-rw-r--r--drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.c4
-rw-r--r--drivers/media/platform/msm/ais/sensor/msm_sensor.c45
-rw-r--r--drivers/media/platform/msm/ais/sensor/msm_sensor.h2
-rw-r--r--drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c32
-rw-r--r--drivers/media/platform/msm/ais/sensor/ois/msm_ois.c15
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c16
-rw-r--r--drivers/media/platform/msm/vidc/hfi_packetization.c6
-rw-r--r--drivers/media/platform/msm/vidc/msm_venc.c16
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c78
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_dcvs.c8
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi_api.h4
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi_helper.h4
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/cxl/file.c7
-rw-r--r--drivers/misc/qseecom.c4
-rw-r--r--drivers/misc/uid_sys_stats.c10
-rw-r--r--drivers/mmc/host/sdhci-iproc.c3
-rw-r--r--drivers/mmc/host/sdhci-msm.c23
-rw-r--r--drivers/mmc/host/sdhci-msm.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c4
-rw-r--r--drivers/net/ethernet/ethoc.c3
-rw-r--r--drivers/net/phy/marvell.c66
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/virtio_net.c1
-rw-r--r--drivers/net/vxlan.c19
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c86
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c6
-rw-r--r--drivers/net/xen-netfront.c4
-rw-r--r--drivers/platform/msm/sps/sps.c15
-rw-r--r--drivers/platform/msm/sps/spsi.h19
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c1
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c7
-rw-r--r--drivers/power/supply/qcom/smb-lib.c120
-rw-r--r--drivers/power/supply/qcom/smb-lib.h5
-rw-r--r--drivers/power/supply/qcom/storm-watch.c10
-rw-r--r--drivers/power/supply/qcom/storm-watch.h1
-rw-r--r--drivers/s390/net/qeth_core.h4
-rw-r--r--drivers/s390/net/qeth_core_main.c21
-rw-r--r--drivers/s390/net/qeth_core_sys.c24
-rw-r--r--drivers/s390/net/qeth_l2.h2
-rw-r--r--drivers/s390/net/qeth_l2_main.c26
-rw-r--r--drivers/s390/net/qeth_l2_sys.c8
-rw-r--r--drivers/s390/net/qeth_l3_main.c6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c8
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/soc/qcom/glink.c23
-rw-r--r--drivers/soc/qcom/glink_smem_native_xprt.c68
-rw-r--r--drivers/soc/qcom/icnss.c12
-rw-r--r--drivers/soc/qcom/rpm_master_stat.c50
-rw-r--r--drivers/soc/qcom/rpm_rail_stats.c53
-rw-r--r--drivers/soc/qcom/rpm_stats.c76
-rw-r--r--drivers/soc/qcom/scm.c9
-rw-r--r--drivers/soc/qcom/smp2p_sleepstate.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c9
-rw-r--r--drivers/target/target_core_transport.c23
-rw-r--r--drivers/thermal/qpnp-adc-tm.c61
-rw-r--r--drivers/tty/serial/ifx6x60.c2
-rw-r--r--drivers/tty/serial/sh-sci.c10
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/tty/tty_mutex.c7
-rw-r--r--drivers/usb/chipidea/debug.c3
-rw-r--r--drivers/usb/chipidea/udc.c8
-rw-r--r--drivers/usb/core/config.c8
-rw-r--r--drivers/usb/core/generic.c38
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c13
-rw-r--r--drivers/xen/privcmd.c4
-rw-r--r--fs/btrfs/extent-tree.c1
-rw-r--r--fs/btrfs/file.c2
-rw-r--r--fs/btrfs/inode.c4
-rw-r--r--fs/buffer.c12
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/ext4/extents.c5
-rw-r--r--fs/ext4/file.c50
-rw-r--r--fs/ext4/inode.c9
-rw-r--r--fs/ext4/ioctl.c3
-rw-r--r--fs/ext4/move_extent.c2
-rw-r--r--fs/f2fs/segment.c9
-rw-r--r--fs/f2fs/super.c18
-rw-r--r--fs/jfs/super.c4
-rw-r--r--fs/mpage.c2
-rw-r--r--fs/nfs/dir.c21
-rw-r--r--fs/nfsd/blocklayout.c4
-rw-r--r--fs/nfsd/nfs4proc.c13
-rw-r--r--fs/nfsd/nfs4xdr.c13
-rw-r--r--fs/nilfs2/btnode.c2
-rw-r--r--fs/nilfs2/inode.c4
-rw-r--r--fs/nilfs2/mdt.c4
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/pnode.c11
-rw-r--r--fs/reiserfs/file.c2
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/sdcardfs/lookup.c3
-rw-r--r--fs/stat.c3
-rw-r--r--fs/udf/inode.c2
-rw-r--r--fs/ufs/balloc.c26
-rw-r--r--fs/ufs/inode.c9
-rw-r--r--fs/ufs/super.c18
-rw-r--r--fs/ufs/util.h10
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c7
-rw-r--r--fs/xfs/libxfs/xfs_btree.c2
-rw-r--r--fs/xfs/xfs_aops.c10
-rw-r--r--fs/xfs/xfs_attr.h1
-rw-r--r--fs/xfs/xfs_attr_list.c8
-rw-r--r--fs/xfs/xfs_bmap_util.c2
-rw-r--r--fs/xfs/xfs_buf.c24
-rw-r--r--fs/xfs/xfs_buf.h1
-rw-r--r--fs/xfs/xfs_dir2_readdir.c15
-rw-r--r--fs/xfs/xfs_file.c35
-rw-r--r--fs/xfs/xfs_icache.c58
-rw-r--r--fs/xfs/xfs_icache.h8
-rw-r--r--fs/xfs/xfs_inode.h4
-rw-r--r--fs/xfs/xfs_ioctl.c11
-rw-r--r--fs/xfs/xfs_qm.c7
-rw-r--r--fs/xfs/xfs_qm_syscalls.c3
-rw-r--r--fs/xfs/xfs_xattr.c16
-rw-r--r--include/linux/cgroup.h20
-rw-r--r--include/linux/cpufreq.h52
-rw-r--r--include/linux/diagchar.h21
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/if_vlan.h19
-rw-r--r--include/linux/interrupt.h7
-rw-r--r--include/linux/kthread.h4
-rw-r--r--include/linux/memblock.h8
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/ptrace.h7
-rw-r--r--include/linux/sched.h80
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/net/dst.h8
-rw-r--r--include/net/ip_fib.h10
-rw-r--r--include/net/ipv6.h1
-rw-r--r--include/trace/events/sched.h44
-rw-r--r--include/uapi/linux/msm_ipa.h39
-rw-r--r--include/uapi/linux/usb/ch9.h23
-rw-r--r--include/uapi/linux/v4l2-controls.h7
-rw-r--r--include/uapi/media/ais/msm_ais.h2
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/events/core.c21
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/kthread.c96
-rw-r--r--kernel/ptrace.c20
-rw-r--r--kernel/sched/Makefile2
-rw-r--r--kernel/sched/core.c78
-rw-r--r--kernel/sched/cpufreq.c63
-rw-r--r--kernel/sched/cpufreq_sched.c220
-rw-r--r--kernel/sched/cpufreq_schedutil.c770
-rw-r--r--kernel/sched/cpupri.c37
-rw-r--r--kernel/sched/deadline.c3
-rw-r--r--kernel/sched/debug.c26
-rw-r--r--kernel/sched/fair.c1453
-rw-r--r--kernel/sched/rt.c50
-rw-r--r--kernel/sched/sched.h67
-rw-r--r--kernel/sched/stats.c26
-rw-r--r--kernel/sched/tune.c13
-rw-r--r--kernel/softirq.c9
-rw-r--r--kernel/sysctl.c7
-rw-r--r--lib/test_user_copy.c20
-rw-r--r--mm/memblock.c24
-rw-r--r--mm/memory-failure.c8
-rw-r--r--mm/mlock.c5
-rw-r--r--mm/page_alloc.c25
-rw-r--r--mm/slub.c6
-rw-r--r--mm/truncate.c2
-rw-r--r--net/bridge/br_netlink.c7
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/br_stp_timer.c2
-rw-r--r--net/core/dev.c33
-rw-r--r--net/core/dst.c23
-rw-r--r--net/core/rtnetlink.c36
-rw-r--r--net/core/sock.c12
-rw-r--r--net/dccp/ipv6.c6
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/fib_frontend.c15
-rw-r--r--net/ipv4/fib_semantics.c17
-rw-r--r--net/ipv4/fib_trie.c26
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/route.c10
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_cong.c1
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv6/ip6_offload.c9
-rw-r--r--net/ipv6/ip6_output.c20
-rw-r--r--net/ipv6/output_core.c14
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp_offload.c6
-rw-r--r--net/ipv6/xfrm6_mode_ro.c2
-rw-r--r--net/ipv6/xfrm6_mode_transport.c2
-rw-r--r--net/sctp/input.c16
-rw-r--r--net/sctp/ipv6.c49
-rw-r--r--security/keys/key.c5
-rw-r--r--security/keys/keyctl.c4
-rw-r--r--sound/core/timer.c7
-rw-r--r--sound/pci/hda/patch_sigmatel.c2
-rw-r--r--sound/soc/msm/qdsp6v2/q6adm.c18
-rw-r--r--sound/soc/soc-core.c5
354 files changed, 5588 insertions, 2002 deletions
diff --git a/Makefile b/Makefile
index 1a49c8e64768..011b673b7166 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 70
+SUBLEVEL = 72
EXTRAVERSION =
NAME = Blurry Fish Butt
diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg
index b0ef9fcbaac6..85c013764001 100644
--- a/android/configs/android-base.cfg
+++ b/android/configs/android-base.cfg
@@ -3,6 +3,8 @@
# CONFIG_DEVMEM is not set
# CONFIG_FHANDLE is not set
# CONFIG_INET_LRO is not set
+# CONFIG_NFSD is not set
+# CONFIG_NFS_FS is not set
# CONFIG_OABI_COMPAT is not set
# CONFIG_SYSVIPC is not set
# CONFIG_USELIB is not set
diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
index 3c9b8e445ff3..4bcf64185038 100644
--- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
@@ -542,6 +542,7 @@
&mdss_hdmi_cec_active>;
pinctrl-4 = <&mdss_hdmi_hpd_suspend &mdss_hdmi_ddc_suspend
&mdss_hdmi_cec_suspend>;
+ /delete-property/ qcom,pluggable;
};
#include "msm8996-sde-display.dtsi"
diff --git a/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi b/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi
index 48d544e18889..15295639e361 100644
--- a/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi
@@ -459,3 +459,12 @@
< 0 0 >,
< 315000000 4 >;
};
+
+/* GPU overrides for auto */
+&msm_gpu {
+ qcom,gpu-pwrlevel-bins {
+ qcom,gpu-pwrlevels-0 {
+ qcom,initial-pwrlevel = <1>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi
index fc546512992d..3f8962de22be 100644
--- a/arch/arm/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998.dtsi
@@ -3063,8 +3063,8 @@
qcom,msm-core@780000 {
compatible = "qcom,apss-core-ea";
reg = <0x780000 0x1000>;
- qcom,low-hyst-temp = <10>;
- qcom,high-hyst-temp = <5>;
+ qcom,low-hyst-temp = <100>;
+ qcom,high-hyst-temp = <100>;
qcom,polling-interval = <50>;
ea0: ea0 {
diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi
index 95f28d71334c..d492dba8cdac 100644
--- a/arch/arm/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630.dtsi
@@ -811,8 +811,8 @@
qcom,msm-core@780000 {
compatible = "qcom,apss-core-ea";
reg = <0x780000 0x1000>;
- qcom,low-hyst-temp = <10>;
- qcom,high-hyst-temp = <5>;
+ qcom,low-hyst-temp = <100>;
+ qcom,high-hyst-temp = <100>;
ea0: ea0 {
sensor = <&sensor_information3>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi
index ec754f3cce80..6bccb320577b 100644
--- a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi
@@ -40,6 +40,16 @@
gpio = <&tlmm 50 0>;
vin-supply = <&pm660l_bob>;
};
+
+ cam_rear_dvdd_gpio_regulator: cam_rear_dvdd_fixed_regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "cam_rear_dvdd_gpio_regulator";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ enable-active-high;
+ gpio = <&pm660l_gpios 4 0>;
+ vin-supply = <&pm660_s5>;
+ };
};
&tlmm {
@@ -172,10 +182,10 @@
compatible = "qcom,eeprom";
cam_vio-supply = <&pm660_l11>;
cam_vana-supply = <&cam_avdd_gpio_regulator>;
- cam_vdig-supply = <&pm660_s5>;
+ cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
- qcom,cam-vreg-min-voltage = <1780000 0 1350000>;
- qcom,cam-vreg-max-voltage = <1950000 0 1350000>;
+ qcom,cam-vreg-min-voltage = <1780000 0 0>;
+ qcom,cam-vreg-max-voltage = <1950000 0 0>;
qcom,cam-vreg-op-mode = <105000 0 105000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
@@ -184,15 +194,12 @@
pinctrl-1 = <&cam_sensor_mclk0_suspend
&cam_sensor_rear_suspend>;
gpios = <&tlmm 32 0>,
- <&tlmm 46 0>,
- <&pm660l_gpios 4 0>;
+ <&tlmm 46 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-vdig = <2>;
- qcom,gpio-req-tbl-num = <0 1 1>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
- "CAM_RESET0",
- "CAM_VDIG";
+ "CAM_RESET0";
qcom,sensor-position = <0>;
qcom,sensor-mode = <0>;
qcom,cci-master = <0>;
@@ -209,11 +216,11 @@
compatible = "qcom,eeprom";
cam_vio-supply = <&pm660_l11>;
cam_vana-supply = <&cam_avdd_gpio_regulator>;
- cam_vdig-supply = <&pm660_s5>;
+ cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
- qcom,cam-vreg-min-voltage = <1780000 0 1350000>;
- qcom,cam-vreg-max-voltage = <1950000 0 1350000>;
- qcom,cam-vreg-op-mode = <105000 0 105000>;
+ qcom,cam-vreg-min-voltage = <1780000 0 0>;
+ qcom,cam-vreg-max-voltage = <1950000 0 0>;
+ qcom,cam-vreg-op-mode = <105000 0 0>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
@@ -221,15 +228,12 @@
pinctrl-1 = <&cam_sensor_mclk2_suspend
&cam_sensor_rear2_suspend>;
gpios = <&tlmm 34 0>,
- <&tlmm 48 0>,
- <&pm660l_gpios 4 0>;
+ <&tlmm 48 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-vdig = <2>;
- qcom,gpio-req-tbl-num = <0 1 1>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
- "CAM_RESET1",
- "CAM_VDIG";
+ "CAM_RESET1";
qcom,sensor-position = <0>;
qcom,sensor-mode = <0>;
qcom,cci-master = <1>;
@@ -290,11 +294,11 @@
qcom,eeprom-src = <&eeprom0>;
cam_vio-supply = <&pm660_l11>;
cam_vana-supply = <&cam_avdd_gpio_regulator>;
- cam_vdig-supply = <&pm660_s5>;
+ cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
- qcom,cam-vreg-min-voltage = <1780000 0 1350000>;
- qcom,cam-vreg-max-voltage = <1950000 0 1350000>;
- qcom,cam-vreg-op-mode = <105000 0 105000>;
+ qcom,cam-vreg-min-voltage = <1780000 0 0>;
+ qcom,cam-vreg-max-voltage = <1950000 0 0>;
+ qcom,cam-vreg-op-mode = <105000 0 0>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
@@ -302,15 +306,12 @@
pinctrl-1 = <&cam_sensor_mclk0_suspend
&cam_sensor_rear_suspend>;
gpios = <&tlmm 32 0>,
- <&tlmm 46 0>,
- <&pm660l_gpios 4 0>;
+ <&tlmm 46 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-vdig = <2>;
- qcom,gpio-req-tbl-num = <0 1 1>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
- "CAM_RESET0",
- "CAM_VDIG";
+ "CAM_RESET0";
qcom,sensor-position = <0>;
qcom,sensor-mode = <0>;
qcom,cci-master = <0>;
@@ -333,11 +334,11 @@
qcom,eeprom-src = <&eeprom1>;
cam_vio-supply = <&pm660_l11>;
cam_vana-supply = <&cam_avdd_gpio_regulator>;
- cam_vdig-supply = <&pm660_s5>;
+ cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
- qcom,cam-vreg-min-voltage = <1780000 0 1350000>;
- qcom,cam-vreg-max-voltage = <1950000 0 1350000>;
- qcom,cam-vreg-op-mode = <105000 0 105000>;
+ qcom,cam-vreg-min-voltage = <1780000 0 0>;
+ qcom,cam-vreg-max-voltage = <1950000 0 0>;
+ qcom,cam-vreg-op-mode = <105000 0 0>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
@@ -345,15 +346,12 @@
pinctrl-1 = <&cam_sensor_mclk2_suspend
&cam_sensor_rear2_suspend>;
gpios = <&tlmm 34 0>,
- <&tlmm 48 0>,
- <&pm660l_gpios 4 0>;
+ <&tlmm 48 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-vdig = <2>;
- qcom,gpio-req-tbl-num = <0 1 1>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
- "CAM_RESET1",
- "CAM_VDIG";
+ "CAM_RESET1";
qcom,sensor-position = <0>;
qcom,sensor-mode = <0>;
qcom,cci-master = <1>;
diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi
index a1714beb692e..b3c2f3c634c6 100644
--- a/arch/arm/boot/dts/qcom/sdm660.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660.dtsi
@@ -865,8 +865,8 @@
qcom,msm-core@780000 {
compatible = "qcom,apss-core-ea";
reg = <0x780000 0x1000>;
- qcom,low-hyst-temp = <10>;
- qcom,high-hyst-temp = <5>;
+ qcom,low-hyst-temp = <100>;
+ qcom,high-hyst-temp = <100>;
ea0: ea0 {
sensor = <&sensor_information1>;
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 3988e72d16ff..bfc5aae0c280 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -110,7 +110,6 @@ __do_hyp_init:
@ - Write permission implies XN: disabled
@ - Instruction cache: enabled
@ - Data/Unified cache: enabled
- @ - Memory alignment checks: enabled
@ - MMU: enabled (this code must be run from an identity mapping)
mrc p15, 4, r0, c1, c0, 0 @ HSCR
ldr r2, =HSCTLR_MASK
@@ -118,8 +117,8 @@ __do_hyp_init:
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
and r1, r1, r2
- ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) )
- THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
+ ARM( ldr r2, =(HSCTLR_M) )
+ THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
orr r1, r1, r2
orr r0, r0, r1
isb
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 33ee522bb76f..e4a774f7aba1 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -876,6 +876,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
pmd_t *pmd;
pud = stage2_get_pud(kvm, cache, addr);
+ if (!pud)
+ return NULL;
+
if (pud_none(*pud)) {
if (!cache)
return NULL;
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
new file mode 100644
index 000000000000..be2d2347d995
--- /dev/null
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_ASM_UACCESS_H
+#define __ASM_ASM_UACCESS_H
+
+/*
+ * Remove the address tag from a virtual address, if present.
+ */
+ .macro clear_address_tag, dst, addr
+ tst \addr, #(1 << 55)
+ bic \dst, \addr, #(0xff << 56)
+ csel \dst, \dst, \addr, eq
+ .endm
+
+#endif
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index c5dbc5cb8f10..0671711b46ab 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -44,23 +44,33 @@
#define smp_store_release(p, v) \
do { \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(*p)) (v) }; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 1: \
asm volatile ("stlrb %w1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u8 *)__u.__c) \
+ : "memory"); \
break; \
case 2: \
asm volatile ("stlrh %w1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u16 *)__u.__c) \
+ : "memory"); \
break; \
case 4: \
asm volatile ("stlr %w1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u32 *)__u.__c) \
+ : "memory"); \
break; \
case 8: \
asm volatile ("stlr %1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u64 *)__u.__c) \
+ : "memory"); \
break; \
} \
} while (0)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index ac177d96e773..064cef9ae2d1 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -27,6 +27,7 @@
/*
* User space memory access functions
*/
+#include <linux/bitops.h>
#include <linux/string.h>
#include <linux/thread_info.h>
@@ -119,6 +120,13 @@ static inline void set_fs(mm_segment_t fs)
flag; \
})
+/*
+ * When dealing with data aborts, watchpoints, or instruction traps we may end
+ * up with a tagged userland pointer. Clear the tag to get a sane pointer to
+ * pass on to access_ok(), for instance.
+ */
+#define untagged_addr(addr) sign_extend64(addr, 55)
+
#define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 884b317e56c3..10d3642deb7c 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -299,7 +299,8 @@ do { \
_ASM_EXTABLE(0b, 4b) \
_ASM_EXTABLE(1b, 4b) \
: "=&r" (res), "+r" (data), "=&r" (temp) \
- : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
+ : "r" ((unsigned long)addr), "i" (-EAGAIN), \
+ "i" (-EFAULT) \
: "memory"); \
uaccess_disable(); \
} while (0)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 0ea65307f866..7822e36d87fb 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -32,6 +32,7 @@
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/uaccess.h>
+#include <asm/asm-uaccess.h>
#include <asm/unistd.h>
/*
@@ -437,12 +438,13 @@ el1_da:
/*
* Data abort handling
*/
- mrs x0, far_el1
+ mrs x3, far_el1
enable_dbg
// re-enable interrupts if they were enabled in the aborted context
tbnz x23, #7, 1f // PSR_I_BIT
enable_irq
1:
+ clear_address_tag x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort
@@ -604,7 +606,7 @@ el0_da:
// enable interrupts before calling the main handler
enable_dbg_and_irq
ct_user_exit
- bic x0, x26, #(0xff << 56)
+ clear_address_tag x0, x26
mov x1, x25
mov x2, sp
bl do_mem_abort
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index f4dfd8c41e06..1c694f3c643c 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -36,6 +36,7 @@
#include <asm/traps.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
+#include <asm/uaccess.h>
/* Breakpoint currently in use for each BRP. */
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index c0ee84020784..db0087fd9823 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -421,6 +421,11 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
return &cpu_topology[cpu].core_sibling;
}
+static int cpu_cpu_flags(void)
+{
+ return SD_ASYM_CPUCAPACITY;
+}
+
static inline int cpu_corepower_flags(void)
{
return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN | \
@@ -431,7 +436,7 @@ static struct sched_domain_topology_level arm64_topology[] = {
#ifdef CONFIG_SCHED_MC
{ cpu_coregroup_mask, cpu_corepower_flags, cpu_core_energy, SD_INIT_NAME(MC) },
#endif
- { cpu_cpu_mask, NULL, cpu_cluster_energy, SD_INIT_NAME(DIE) },
+ { cpu_cpu_mask, cpu_cpu_flags, cpu_cluster_energy, SD_INIT_NAME(DIE) },
{ NULL, },
};
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 2445db9bbd4f..a41178f8eeea 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -164,6 +164,8 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
+ void *addr;
+
if (dev == NULL) {
WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
return NULL;
@@ -174,7 +176,6 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
flags |= GFP_DMA;
if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
struct page *page;
- void *addr;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
get_order(size));
@@ -184,20 +185,20 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
*dma_handle = phys_to_dma(dev, page_to_phys(page));
addr = page_address(page);
memset(addr, 0, size);
-
- if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs) ||
- dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs)) {
- /*
- * flush the caches here because we can't later
- */
- __dma_flush_range(addr, addr + size);
- __dma_remap(page, size, 0, true);
- }
-
- return addr;
} else {
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ addr = swiotlb_alloc_coherent(dev, size, dma_handle, flags);
}
+
+ if (addr && (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs) ||
+ dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))) {
+ /*
+ * flush the caches here because we can't later
+ */
+ __dma_flush_range(addr, addr + size);
+ __dma_remap(virt_to_page(addr), size, 0, true);
+ }
+
+ return addr;
}
static void __dma_free_coherent(struct device *dev, size_t size,
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8b3b46b7b0f2..329771559cbb 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
extern int sysfs_add_device_to_node(struct device *dev, int nid);
extern void sysfs_remove_device_from_node(struct device *dev, int nid);
+static inline int early_cpu_to_node(int cpu)
+{
+ int nid;
+
+ nid = numa_cpu_lookup_table[cpu];
+
+ /*
+ * Fall back to node 0 if nid is unset (it should be, except bugs).
+ * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
+ */
+ return (nid < 0) ? 0 : nid;
+}
#else
+static inline int early_cpu_to_node(int cpu) { return 0; }
+
static inline void dump_numa_cpu_topology(void) {}
static inline int sysfs_add_device_to_node(struct device *dev, int nid)
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index c314db8b798c..9837c98caabe 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -655,7 +655,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
*/
#define MAX_WAIT_FOR_RECOVERY 300
-static void eeh_handle_normal_event(struct eeh_pe *pe)
+static bool eeh_handle_normal_event(struct eeh_pe *pe)
{
struct pci_bus *frozen_bus;
int rc = 0;
@@ -665,7 +665,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
if (!frozen_bus) {
pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
__func__, pe->phb->global_number, pe->addr);
- return;
+ return false;
}
eeh_pe_update_time_stamp(pe);
@@ -790,7 +790,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
pr_info("EEH: Notify device driver to resume\n");
eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
- return;
+ return false;
excess_failures:
/*
@@ -831,7 +831,11 @@ perm_error:
pci_lock_rescan_remove();
pcibios_remove_pci_devices(frozen_bus);
pci_unlock_rescan_remove();
+
+ /* The passed PE should no longer be used */
+ return true;
}
+ return false;
}
static void eeh_handle_special_event(void)
@@ -897,7 +901,14 @@ static void eeh_handle_special_event(void)
*/
if (rc == EEH_NEXT_ERR_FROZEN_PE ||
rc == EEH_NEXT_ERR_FENCED_PHB) {
- eeh_handle_normal_event(pe);
+ /*
+ * eeh_handle_normal_event() can make the PE stale if it
+ * determines that the PE cannot possibly be recovered.
+ * Don't modify the PE state if that's the case.
+ */
+ if (eeh_handle_normal_event(pe))
+ continue;
+
eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
} else {
pci_lock_rescan_remove();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a20823210ac0..fe6e800c1357 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -751,7 +751,7 @@ void __init setup_arch(char **cmdline_p)
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{
- return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
+ return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
__pa(MAX_DMA_ADDRESS));
}
@@ -762,7 +762,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
{
- if (cpu_to_node(from) == cpu_to_node(to))
+ if (early_cpu_to_node(from) == early_cpu_to_node(to))
return LOCAL_DISTANCE;
else
return REMOTE_DISTANCE;
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index e9ff44cd5d86..e8b1027e1b5b 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -110,6 +110,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
for (i = 0; i < num_lmbs; i++) {
lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
+ lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
}
@@ -553,6 +554,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
for (i = 0; i < num_lmbs; i++) {
lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
+ lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 3736be630113..894bcaed002e 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -183,9 +183,9 @@ config NR_CPUS
int "Maximum number of CPUs"
depends on SMP
range 2 32 if SPARC32
- range 2 1024 if SPARC64
+ range 2 4096 if SPARC64
default 32 if SPARC32
- default 64 if SPARC64
+ default 4096 if SPARC64
source kernel/Kconfig.hz
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index f7de0dbc38af..83b36a5371ff 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -52,7 +52,7 @@
#define CTX_NR_MASK TAG_CONTEXT_BITS
#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
-#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
+#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT)
#define CTX_VALID(__ctx) \
(!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index b84be675e507..349dd23e2876 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -17,13 +17,8 @@ extern spinlock_t ctx_alloc_lock;
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
+DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
void get_new_mmu_context(struct mm_struct *mm);
-#ifdef CONFIG_SMP
-void smp_new_mmu_context_version(void);
-#else
-#define smp_new_mmu_context_version() do { } while (0)
-#endif
-
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm);
@@ -74,8 +69,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long ctx_valid, flags;
- int cpu;
+ int cpu = smp_processor_id();
+ per_cpu(per_cpu_secondary_mm, cpu) = mm;
if (unlikely(mm == &init_mm))
return;
@@ -121,7 +117,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
* for the first time, we must flush that context out of the
* local TLB.
*/
- cpu = smp_processor_id();
if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
cpumask_set_cpu(cpu, mm_cpumask(mm));
__flush_tlb_mm(CTX_HWBITS(mm->context),
@@ -131,26 +126,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
}
#define deactivate_mm(tsk,mm) do { } while (0)
-
-/* Activate a new MM instance for the current task. */
-static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
-{
- unsigned long flags;
- int cpu;
-
- spin_lock_irqsave(&mm->context.lock, flags);
- if (!CTX_VALID(mm->context))
- get_new_mmu_context(mm);
- cpu = smp_processor_id();
- if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
- cpumask_set_cpu(cpu, mm_cpumask(mm));
-
- load_secondary_context(mm);
- __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
- tsb_context_switch(mm);
- spin_unlock_irqrestore(&mm->context.lock, flags);
-}
-
+#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
#endif /* !(__ASSEMBLY__) */
#endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 91b963a887b7..29c3b400f949 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -91,9 +91,9 @@ extern unsigned long pfn_base;
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/*
* In general all page table modifications should use the V8 atomic
diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
index 266937030546..522b43db2ed3 100644
--- a/arch/sparc/include/asm/pil.h
+++ b/arch/sparc/include/asm/pil.h
@@ -20,7 +20,6 @@
#define PIL_SMP_CALL_FUNC 1
#define PIL_SMP_RECEIVE_SIGNAL 2
#define PIL_SMP_CAPTURE 3
-#define PIL_SMP_CTX_NEW_VERSION 4
#define PIL_DEVICE_IRQ 5
#define PIL_SMP_CALL_FUNC_SNGL 6
#define PIL_DEFERRED_PCR_WORK 7
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index 29d64b1758ed..be0cc1beed41 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -16,7 +16,7 @@ extern char reboot_command[];
*/
extern unsigned char boot_cpu_id;
-extern unsigned long empty_zero_page;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
extern int serial_console;
static inline int con_is_present(void)
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 8174f6cdbbbb..9dca7a892978 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -327,6 +327,7 @@ struct vio_dev {
int compat_len;
u64 dev_no;
+ u64 id;
unsigned long channel_id;
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index e22416ce56ea..bfbde8c4ffb2 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
{
#ifdef CONFIG_SMP
unsigned long page;
+ void *mondo, *p;
- BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
+ BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
+
+ /* Make sure mondo block is 64byte aligned */
+ p = kzalloc(127, GFP_KERNEL);
+ if (!p) {
+ prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
+ prom_halt();
+ }
+ mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
+ tb->cpu_mondo_block_pa = __pa(mondo);
page = get_zeroed_page(GFP_KERNEL);
if (!page) {
- prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
+ prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
prom_halt();
}
- tb->cpu_mondo_block_pa = __pa(page);
- tb->cpu_list_pa = __pa(page + 64);
+ tb->cpu_list_pa = __pa(page);
#endif
}
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index e7f652be9e61..44f32dd4477f 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
/* smp_64.c */
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 19cd08d18672..95a9fa0d2195 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -959,37 +959,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
preempt_enable();
}
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
-{
- struct mm_struct *mm;
- unsigned long flags;
-
- clear_softint(1 << irq);
-
- /* See if we need to allocate a new TLB context because
- * the version of the one we are using is now out of date.
- */
- mm = current->active_mm;
- if (unlikely(!mm || (mm == &init_mm)))
- return;
-
- spin_lock_irqsave(&mm->context.lock, flags);
-
- if (unlikely(!CTX_VALID(mm->context)))
- get_new_mmu_context(mm);
-
- spin_unlock_irqrestore(&mm->context.lock, flags);
-
- load_secondary_context(mm);
- __flush_tlb_mm(CTX_HWBITS(mm->context),
- SECONDARY_CONTEXT);
-}
-
-void smp_new_mmu_context_version(void)
-{
- smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
-}
-
#ifdef CONFIG_KGDB
void kgdb_roundup_cpus(unsigned long flags)
{
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index d568c8207af7..395ec1800530 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -470,13 +470,16 @@ __tsb_context_switch:
.type copy_tsb,#function
copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
* %o2=new_tsb_base, %o3=new_tsb_size
+ * %o4=page_size_shift
*/
sethi %uhi(TSB_PASS_BITS), %g7
srlx %o3, 4, %o3
- add %o0, %o1, %g1 /* end of old tsb */
+ add %o0, %o1, %o1 /* end of old tsb */
sllx %g7, 32, %g7
sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
+ mov %o4, %g1 /* page_size_shift */
+
661: prefetcha [%o0] ASI_N, #one_read
.section .tsb_phys_patch, "ax"
.word 661b
@@ -501,9 +504,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
/* This can definitely be computed faster... */
srlx %o0, 4, %o5 /* Build index */
and %o5, 511, %o5 /* Mask index */
- sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
+ sllx %o5, %g1, %o5 /* Put into vaddr position */
or %o4, %o5, %o4 /* Full VADDR. */
- srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
+ srlx %o4, %g1, %o4 /* Shift down to create index */
and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
@@ -511,7 +514,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
80: add %o0, 16, %o0
- cmp %o0, %g1
+ cmp %o0, %o1
bne,pt %xcc, 90b
nop
diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S
index c6dfdaa29e20..170ead662f2a 100644
--- a/arch/sparc/kernel/ttable_64.S
+++ b/arch/sparc/kernel/ttable_64.S
@@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
-tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4)
+tl0_irq4: BTRAP(0x44)
#else
tl0_irq1: BTRAP(0x41)
tl0_irq2: BTRAP(0x42)
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index cb5789c9f961..34824ca396f0 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -284,13 +284,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
if (!id) {
dev_set_name(&vdev->dev, "%s", bus_id_name);
vdev->dev_no = ~(u64)0;
+ vdev->id = ~(u64)0;
} else if (!cfg_handle) {
dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
vdev->dev_no = *id;
+ vdev->id = ~(u64)0;
} else {
dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
*cfg_handle, *id);
vdev->dev_no = *cfg_handle;
+ vdev->id = *id;
}
vdev->dev.parent = parent;
@@ -333,27 +336,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
(void) vio_create_one(hp, node, &root_vdev->dev);
}
+struct vio_md_node_query {
+ const char *type;
+ u64 dev_no;
+ u64 id;
+};
+
static int vio_md_node_match(struct device *dev, void *arg)
{
+ struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
struct vio_dev *vdev = to_vio_dev(dev);
- if (vdev->mp == (u64) arg)
- return 1;
+ if (vdev->dev_no != query->dev_no)
+ return 0;
+ if (vdev->id != query->id)
+ return 0;
+ if (strcmp(vdev->type, query->type))
+ return 0;
- return 0;
+ return 1;
}
static void vio_remove(struct mdesc_handle *hp, u64 node)
{
+ const char *type;
+ const u64 *id, *cfg_handle;
+ u64 a;
+ struct vio_md_node_query query;
struct device *dev;
- dev = device_find_child(&root_vdev->dev, (void *) node,
+ type = mdesc_get_property(hp, node, "device-type", NULL);
+ if (!type) {
+ type = mdesc_get_property(hp, node, "name", NULL);
+ if (!type)
+ type = mdesc_node_name(hp, node);
+ }
+
+ query.type = type;
+
+ id = mdesc_get_property(hp, node, "id", NULL);
+ cfg_handle = NULL;
+ mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
+ u64 target;
+
+ target = mdesc_arc_target(hp, a);
+ cfg_handle = mdesc_get_property(hp, target,
+ "cfg-handle", NULL);
+ if (cfg_handle)
+ break;
+ }
+
+ if (!id) {
+ query.dev_no = ~(u64)0;
+ query.id = ~(u64)0;
+ } else if (!cfg_handle) {
+ query.dev_no = *id;
+ query.id = ~(u64)0;
+ } else {
+ query.dev_no = *cfg_handle;
+ query.id = *id;
+ }
+
+ dev = device_find_child(&root_vdev->dev, &query,
vio_md_node_match);
if (dev) {
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
device_unregister(dev);
put_device(dev);
+ } else {
+ if (!id)
+ printk(KERN_ERR "VIO: Removed unknown %s node.\n",
+ type);
+ else if (!cfg_handle)
+ printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
+ type, *id);
+ else
+ printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
+ type, *cfg_handle, *id);
}
}
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index eb8287155279..3b7092d9ea8f 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -301,7 +301,7 @@ void __init mem_init(void)
/* Saves us work later. */
- memset((void *)&empty_zero_page, 0, PAGE_SIZE);
+ memset((void *)empty_zero_page, 0, PAGE_SIZE);
i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
i += 1;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 965655afdbb6..384aba109d7c 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -656,10 +656,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
/* get_new_mmu_context() uses "cache + 1". */
DEFINE_SPINLOCK(ctx_alloc_lock);
-unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+unsigned long tlb_context_cache = CTX_FIRST_VERSION;
#define MAX_CTX_NR (1UL << CTX_NR_BITS)
#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
+
+static void mmu_context_wrap(void)
+{
+ unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
+ unsigned long new_ver, new_ctx, old_ctx;
+ struct mm_struct *mm;
+ int cpu;
+
+ bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
+
+ /* Reserve kernel context */
+ set_bit(0, mmu_context_bmap);
+
+ new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
+ if (unlikely(new_ver == 0))
+ new_ver = CTX_FIRST_VERSION;
+ tlb_context_cache = new_ver;
+
+ /*
+ * Make sure that any new mm that are added into per_cpu_secondary_mm,
+ * are going to go through get_new_mmu_context() path.
+ */
+ mb();
+
+ /*
+ * Updated versions to current on those CPUs that had valid secondary
+ * contexts
+ */
+ for_each_online_cpu(cpu) {
+ /*
+ * If a new mm is stored after we took this mm from the array,
+ * it will go into get_new_mmu_context() path, because we
+ * already bumped the version in tlb_context_cache.
+ */
+ mm = per_cpu(per_cpu_secondary_mm, cpu);
+
+ if (unlikely(!mm || mm == &init_mm))
+ continue;
+
+ old_ctx = mm->context.sparc64_ctx_val;
+ if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
+ new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
+ set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
+ mm->context.sparc64_ctx_val = new_ctx;
+ }
+ }
+}
/* Caller does TLB context flushing on local CPU if necessary.
* The caller also ensures that CTX_VALID(mm->context) is false.
@@ -675,48 +723,30 @@ void get_new_mmu_context(struct mm_struct *mm)
{
unsigned long ctx, new_ctx;
unsigned long orig_pgsz_bits;
- int new_version;
spin_lock(&ctx_alloc_lock);
+retry:
+ /* wrap might have happened, test again if our context became valid */
+ if (unlikely(CTX_VALID(mm->context)))
+ goto out;
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
- new_version = 0;
if (new_ctx >= (1 << CTX_NR_BITS)) {
new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
if (new_ctx >= ctx) {
- int i;
- new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
- CTX_FIRST_VERSION;
- if (new_ctx == 1)
- new_ctx = CTX_FIRST_VERSION;
-
- /* Don't call memset, for 16 entries that's just
- * plain silly...
- */
- mmu_context_bmap[0] = 3;
- mmu_context_bmap[1] = 0;
- mmu_context_bmap[2] = 0;
- mmu_context_bmap[3] = 0;
- for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
- mmu_context_bmap[i + 0] = 0;
- mmu_context_bmap[i + 1] = 0;
- mmu_context_bmap[i + 2] = 0;
- mmu_context_bmap[i + 3] = 0;
- }
- new_version = 1;
- goto out;
+ mmu_context_wrap();
+ goto retry;
}
}
+ if (mm->context.sparc64_ctx_val)
+ cpumask_clear(mm_cpumask(mm));
mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
-out:
tlb_context_cache = new_ctx;
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
+out:
spin_unlock(&ctx_alloc_lock);
-
- if (unlikely(new_version))
- smp_new_mmu_context_version();
}
static int numa_enabled = 1;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 9cdeca0fa955..266411291634 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -451,7 +451,8 @@ retry_tsb_alloc:
extern void copy_tsb(unsigned long old_tsb_base,
unsigned long old_tsb_size,
unsigned long new_tsb_base,
- unsigned long new_tsb_size);
+ unsigned long new_tsb_size,
+ unsigned long page_size_shift);
unsigned long old_tsb_base = (unsigned long) old_tsb;
unsigned long new_tsb_base = (unsigned long) new_tsb;
@@ -459,7 +460,9 @@ retry_tsb_alloc:
old_tsb_base = __pa(old_tsb_base);
new_tsb_base = __pa(new_tsb_base);
}
- copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
+ copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
+ tsb_index == MM_TSB_BASE ?
+ PAGE_SHIFT : REAL_HPAGE_SHIFT);
}
mm->context.tsb_block[tsb_index].tsb = new_tsb;
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index 5d2fd6cd3189..fcf4d27a38fb 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -971,11 +971,6 @@ xcall_capture:
wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
retry
- .globl xcall_new_mmu_context_version
-xcall_new_mmu_context_version:
- wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
- retry
-
#ifdef CONFIG_KGDB
.globl xcall_kgdb_capture
xcall_kgdb_capture:
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 47190bd399e7..cec49ecf5f31 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token)
*/
rcu_irq_exit();
native_safe_halt();
- rcu_irq_enter();
local_irq_disable();
+ rcu_irq_enter();
}
}
if (!n.halted)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 642e9c93a097..9357b29de9bc 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -737,18 +737,20 @@ out:
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
{
struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
- int j, nent = vcpu->arch.cpuid_nent;
+ struct kvm_cpuid_entry2 *ej;
+ int j = i;
+ int nent = vcpu->arch.cpuid_nent;
e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
/* when no next entry is found, the current entry[i] is reselected */
- for (j = i + 1; ; j = (j + 1) % nent) {
- struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
- if (ej->function == e->function) {
- ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
- return j;
- }
- }
- return 0; /* silence gcc, even though control never reaches here */
+ do {
+ j = (j + 1) % nent;
+ ej = &vcpu->arch.cpuid_entries[j];
+ } while (ej->function != e->function);
+
+ ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+
+ return j;
}
/* find an entry with matching function, matching index (if needed), and that
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8eb8a934b531..1049c3c9b877 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3433,12 +3433,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
}
-static bool can_do_async_pf(struct kvm_vcpu *vcpu)
+bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
{
if (unlikely(!lapic_in_kernel(vcpu) ||
kvm_event_needs_reinjection(vcpu)))
return false;
+ if (is_guest_mode(vcpu))
+ return false;
+
return kvm_x86_ops->interrupt_allowed(vcpu);
}
@@ -3454,7 +3457,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
if (!async)
return false; /* *pfn has correct page already */
- if (!prefault && can_do_async_pf(vcpu)) {
+ if (!prefault && kvm_can_do_async_pf(vcpu)) {
trace_kvm_try_async_get_page(gva, gfn);
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
trace_kvm_async_pf_doublefault(gva, gfn);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 55ffb7b0f95e..e60fc80f8a9c 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -74,6 +74,7 @@ enum {
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
+bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ae2b9cd358f2..6c82792487e9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8245,8 +8245,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
return true;
else
- return !kvm_event_needs_reinjection(vcpu) &&
- kvm_x86_ops->interrupt_allowed(vcpu);
+ return kvm_can_do_async_pf(vcpu);
}
void kvm_arch_start_assignment(struct kvm *kvm)
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 1238b3c5a321..0a12c09d7cb2 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
err = crypto_ablkcipher_encrypt(&data->req);
if (err == -EINPROGRESS || err == -EBUSY) {
- err = wait_for_completion_interruptible(
- &data->result.completion);
- if (!err)
- err = data->result.err;
+ wait_for_completion(&data->result.completion);
+ err = data->result.err;
}
if (err)
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 21994d53db91..20e617ed0770 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -61,7 +61,8 @@ static const struct diag_ssid_range_t msg_mask_tbl[] = {
{ .ssid_first = MSG_SSID_21, .ssid_last = MSG_SSID_21_LAST },
{ .ssid_first = MSG_SSID_22, .ssid_last = MSG_SSID_22_LAST },
{ .ssid_first = MSG_SSID_23, .ssid_last = MSG_SSID_23_LAST },
- { .ssid_first = MSG_SSID_24, .ssid_last = MSG_SSID_24_LAST }
+ { .ssid_first = MSG_SSID_24, .ssid_last = MSG_SSID_24_LAST },
+ { .ssid_first = MSG_SSID_25, .ssid_last = MSG_SSID_25_LAST }
};
static int diag_apps_responds(void)
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 0975d23031ea..2898d19fadf5 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -346,7 +346,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
/* It's illegal to wrap around the end of the physical address space. */
- if (offset + (phys_addr_t)size < offset)
+ if (offset + (phys_addr_t)size - 1 < offset)
return -EINVAL;
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index fc061f7c2bd1..a7de8ae185a5 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
if (rc <= 0) {
- DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+ DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
DEBUGP(2, dev, "<- cm4040_write (failed)\n");
if (rc == -ERESTARTSYS)
return rc;
@@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
for (i = 0; i < bytes_to_write; i++) {
rc = wait_for_bulk_out_ready(dev);
if (rc <= 0) {
- DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n",
+ DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
rc);
DEBUGP(2, dev, "<- cm4040_write (failed)\n");
if (rc == -ERESTARTSYS)
@@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
if (rc <= 0) {
- DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+ DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
DEBUGP(2, dev, "<- cm4040_write (failed)\n");
if (rc == -ERESTARTSYS)
return rc;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d93dfebae0bb..1822472dffab 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1798,13 +1798,15 @@ int random_int_secret_init(void)
return 0;
}
+static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
+ __aligned(sizeof(unsigned long));
+
/*
* Get a random word for internal kernel use only. Similar to urandom but
* with the goal of minimal entropy pool depletion. As a result, the random
* value is not cryptographically secure but for several uses the cost of
* depleting entropy is too high
*/
-static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
unsigned int get_random_int(void)
{
__u32 *hash;
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 8bf45f572c5e..d99e13817a29 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -719,9 +719,22 @@ static int clk_osm_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int clk_osm_acd_init(struct clk_osm *c);
+
static int clk_osm_enable(struct clk_hw *hw)
{
struct clk_osm *cpuclk = to_clk_osm(hw);
+ int rc;
+
+ rc = clk_osm_acd_init(cpuclk);
+ if (rc) {
+ pr_err("Failed to initialize ACD for cluster %d, rc=%d\n",
+ cpuclk->cluster_num, rc);
+ return rc;
+ }
+
+ /* Wait for 5 usecs before enabling OSM */
+ udelay(5);
clk_osm_write_reg(cpuclk, 1, ENABLE_REG);
@@ -3272,17 +3285,6 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
clk_osm_setup_cluster_pll(&perfcl_clk);
}
- rc = clk_osm_acd_init(&pwrcl_clk);
- if (rc) {
- pr_err("failed to initialize ACD for pwrcl, rc=%d\n", rc);
- return rc;
- }
- rc = clk_osm_acd_init(&perfcl_clk);
- if (rc) {
- pr_err("failed to initialize ACD for perfcl, rc=%d\n", rc);
- return rc;
- }
-
spin_lock_init(&pwrcl_clk.lock);
spin_lock_init(&perfcl_clk.lock);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 3b2f46bacd77..1dfd1765319b 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -120,6 +120,15 @@ config CPU_FREQ_DEFAULT_GOV_SCHED
cpu frequency using CPU utilization estimates from the
scheduler.
+config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
+ bool "schedutil"
+ depends on SMP
+ select CPU_FREQ_GOV_SCHEDUTIL
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the 'schedutil' CPUFreq governor by default. If unsure,
+ have a look at the help section of that governor. The fallback
+ governor will be 'performance'.
endchoice
config CPU_FREQ_GOV_PERFORMANCE
@@ -239,6 +248,23 @@ config CPU_FREQ_GOV_SCHED
If in doubt, say N.
+config CPU_FREQ_GOV_SCHEDUTIL
+ bool "'schedutil' cpufreq policy governor"
+ depends on CPU_FREQ && SMP
+ select CPU_FREQ_GOV_ATTR_SET
+ select IRQ_WORK
+ help
+ This governor makes decisions based on the utilization data provided
+ by the scheduler. It sets the CPU frequency to be proportional to
+ the utilization/capacity ratio coming from the scheduler. If the
+ utilization is frequency-invariant, the new frequency is also
+ proportional to the maximum available frequency. If that is not the
+ case, it is proportional to the current frequency of the CPU. The
+ frequency tipping point is at utilization/capacity equal to 80% in
+ both cases.
+
+ If in doubt, say N.
+
comment "CPU frequency scaling drivers"
config CPUFREQ_DT
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 4a2f914e0752..6d4a7aeb506d 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -1,5 +1,5 @@
# CPUfreq core
-obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o
+obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o cpufreq_governor_attr_set.o
# CPUfreq stats
obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a0dba9beac05..5f0f983ce173 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -541,6 +541,38 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
+/**
+ * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
+ * one.
+ * @target_freq: target frequency to resolve.
+ *
+ * The target to driver frequency mapping is cached in the policy.
+ *
+ * Return: Lowest driver-supported frequency greater than or equal to the
+ * given target_freq, subject to policy (min/max) and driver limitations.
+ */
+unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
+ policy->cached_target_freq = target_freq;
+
+ if (cpufreq_driver->target_index) {
+ int idx, rv;
+
+ rv = cpufreq_frequency_table_target(policy, policy->freq_table,
+ target_freq,
+ CPUFREQ_RELATION_L,
+ &idx);
+ if (rv)
+ return target_freq;
+ policy->cached_resolved_idx = idx;
+ return policy->freq_table[idx].frequency;
+ }
+
+ return target_freq;
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
/*********************************************************************
* SYSFS INTERFACE *
@@ -2536,6 +2568,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
list_empty(&cpufreq_policy_list)) {
/* if all ->init() calls failed, unregister */
+ ret = -ENODEV;
pr_debug("%s: No CPU initialized for driver %s\n", __func__,
driver_data->name);
goto err_if_unreg;
diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c
new file mode 100644
index 000000000000..52841f807a7e
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_governor_attr_set.c
@@ -0,0 +1,84 @@
+/*
+ * Abstract code for CPUFreq governor tunable sysfs attributes.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "cpufreq_governor.h"
+
+static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
+{
+ return container_of(kobj, struct gov_attr_set, kobj);
+}
+
+static inline struct governor_attr *to_gov_attr(struct attribute *attr)
+{
+ return container_of(attr, struct governor_attr, attr);
+}
+
+static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct governor_attr *gattr = to_gov_attr(attr);
+
+ return gattr->show(to_gov_attr_set(kobj), buf);
+}
+
+static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
+ struct governor_attr *gattr = to_gov_attr(attr);
+ int ret;
+
+ mutex_lock(&attr_set->update_lock);
+ ret = attr_set->usage_count ? gattr->store(attr_set, buf, count) : -EBUSY;
+ mutex_unlock(&attr_set->update_lock);
+ return ret;
+}
+
+const struct sysfs_ops governor_sysfs_ops = {
+ .show = governor_show,
+ .store = governor_store,
+};
+EXPORT_SYMBOL_GPL(governor_sysfs_ops);
+
+void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+ INIT_LIST_HEAD(&attr_set->policy_list);
+ mutex_init(&attr_set->update_lock);
+ attr_set->usage_count = 1;
+ list_add(list_node, &attr_set->policy_list);
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_init);
+
+void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+ mutex_lock(&attr_set->update_lock);
+ attr_set->usage_count++;
+ list_add(list_node, &attr_set->policy_list);
+ mutex_unlock(&attr_set->update_lock);
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_get);
+
+unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+ unsigned int count;
+
+ mutex_lock(&attr_set->update_lock);
+ list_del(list_node);
+ count = --attr_set->usage_count;
+ mutex_unlock(&attr_set->update_lock);
+ if (count)
+ return count;
+
+ kobject_put(&attr_set->kobj);
+ mutex_destroy(&attr_set->update_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_put);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 57ff46284f15..c97336a2ba92 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -325,6 +325,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
| M2P_CONTROL_ENABLE;
m2p_set_control(edmac, control);
+ edmac->buffer = 0;
+
return 0;
}
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index b1bc945f008f..56410ea75ac5 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -117,7 +117,7 @@ struct usb_dmac {
#define USB_DMASWR 0x0008
#define USB_DMASWR_SWR (1 << 0)
#define USB_DMAOR 0x0060
-#define USB_DMAOR_AE (1 << 2)
+#define USB_DMAOR_AE (1 << 1)
#define USB_DMAOR_DME (1 << 0)
#define USB_DMASAR 0x0000
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 57a2e347f04d..0f0094b58d1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -893,6 +893,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
+ /* disable mclk switching if the refresh is >120Hz, even if the
+ * blanking period would allow it
+ */
+ if (amdgpu_dpm_get_vrefresh(adev) > 120)
+ return true;
+
if (vblank_time < switch_limit)
return true;
else
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index ce0645d0c1e5..61e3a097a478 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -783,20 +783,23 @@ void psb_intel_lvds_init(struct drm_device *dev,
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, scan);
+ DRM_DEBUG_KMS("Using mode from DDC\n");
goto out; /* FIXME: check for quirks */
}
}
/* Failed to get EDID, what about VBT? do we need this? */
- if (mode_dev->vbt_mode)
+ if (dev_priv->lfp_lvds_vbt_mode) {
mode_dev->panel_fixed_mode =
- drm_mode_duplicate(dev, mode_dev->vbt_mode);
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (!mode_dev->panel_fixed_mode)
- if (dev_priv->lfp_lvds_vbt_mode)
- mode_dev->panel_fixed_mode =
- drm_mode_duplicate(dev,
- dev_priv->lfp_lvds_vbt_mode);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG_KMS("Using mode from VBT\n");
+ goto out;
+ }
+ }
/*
* If we didn't get EDID, try checking if the panel is already turned
@@ -813,6 +816,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG_KMS("Using pre-programmed mode\n");
goto out; /* FIXME: check for quirks */
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 8a136fef86f1..f8dbc843f852 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -46,7 +46,6 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
struct msm_gem_address_space *aspace)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_mmu *mmu = aspace->mmu;
struct msm_iommu *iommu = to_msm_iommu(mmu);
@@ -75,17 +74,15 @@ static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
* reload the pagetable if the current ring gets preempted out.
*/
OUT_PKT7(ring, CP_MEM_WRITE, 4);
- OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0)));
- OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0)));
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, ttbr0)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, ttbr0)));
OUT_RING(ring, lower_32_bits(iommu->ttbr0));
OUT_RING(ring, upper_32_bits(iommu->ttbr0));
/* Also write the current contextidr (ASID) */
OUT_PKT7(ring, CP_MEM_WRITE, 3);
- OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id,
- contextidr)));
- OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id,
- contextidr)));
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, contextidr)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, contextidr)));
OUT_RING(ring, iommu->contextidr);
/* Invalidate the draw state so we start off fresh */
@@ -217,8 +214,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_EVENT_WRITE, 4);
OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
- OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
- OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->fence);
if (submit->secure) {
@@ -477,30 +474,14 @@ static int a5xx_preempt_start(struct msm_gpu *gpu)
static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
const struct firmware *fw, u64 *iova)
{
- struct drm_device *drm = gpu->dev;
struct drm_gem_object *bo;
void *ptr;
- bo = msm_gem_new(drm, fw->size - 4,
- MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
-
- if (IS_ERR(bo))
- return bo;
-
- ptr = msm_gem_vaddr(bo);
- if (!ptr) {
- drm_gem_object_unreference_unlocked(bo);
- return ERR_PTR(-ENOMEM);
- }
-
- if (iova) {
- int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
+ ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
- if (ret) {
- drm_gem_object_unreference_unlocked(bo);
- return ERR_PTR(ret);
- }
- }
+ if (IS_ERR(ptr))
+ return ERR_CAST(ptr);
memcpy(ptr, &fw->data[4], fw->size - 4);
return bo;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 0025922540df..647b61313fc2 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -458,18 +458,10 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
*/
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
- a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize,
- MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
-
- if (IS_ERR(a5xx_gpu->gpmu_bo))
- goto err;
-
- if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
- &a5xx_gpu->gpmu_iova))
- goto err;
-
- ptr = msm_gem_vaddr(a5xx_gpu->gpmu_bo);
- if (!ptr)
+ ptr = msm_gem_kernel_new(drm, bosize,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
+ &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
+ if (IS_ERR(ptr))
goto err;
while (cmds_size > 0) {
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 57046089434c..57ef366cf82c 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -15,41 +15,6 @@
#include "msm_iommu.h"
#include "a5xx_gpu.h"
-static void *alloc_kernel_bo(struct drm_device *drm, struct msm_gpu *gpu,
- size_t size, uint32_t flags, struct drm_gem_object **bo,
- u64 *iova)
-{
- struct drm_gem_object *_bo;
- u64 _iova;
- void *ptr;
- int ret;
-
- _bo = msm_gem_new(drm, size, flags);
-
- if (IS_ERR(_bo))
- return _bo;
-
- ret = msm_gem_get_iova(_bo, gpu->aspace, &_iova);
- if (ret)
- goto out;
-
- ptr = msm_gem_vaddr(_bo);
- if (!ptr) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (bo)
- *bo = _bo;
- if (iova)
- *iova = _iova;
-
- return ptr;
-out:
- drm_gem_object_unreference_unlocked(_bo);
- return ERR_PTR(ret);
-}
-
/*
* Try to transition the preemption state from old to new. Return
* true on success or false if the original state wasn't 'old'
@@ -100,7 +65,6 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
/* Return the highest priority ringbuffer with something in it */
static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
unsigned long flags;
int i;
@@ -109,7 +73,7 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
struct msm_ringbuffer *ring = gpu->rb[i];
spin_lock_irqsave(&ring->lock, flags);
- empty = (get_wptr(ring) == adreno_gpu->memptrs->rptr[ring->id]);
+ empty = (get_wptr(ring) == ring->memptrs->rptr);
spin_unlock_irqrestore(&ring->lock, flags);
if (!empty)
@@ -176,10 +140,8 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
/* Set the SMMU info for the preemption */
if (a5xx_gpu->smmu_info) {
- a5xx_gpu->smmu_info->ttbr0 =
- adreno_gpu->memptrs->ttbr0[ring->id];
- a5xx_gpu->smmu_info->contextidr =
- adreno_gpu->memptrs->contextidr[ring->id];
+ a5xx_gpu->smmu_info->ttbr0 = ring->memptrs->ttbr0;
+ a5xx_gpu->smmu_info->contextidr = ring->memptrs->contextidr;
}
/* Set the address of the incoming preemption record */
@@ -278,10 +240,10 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
struct drm_gem_object *bo;
u64 iova;
- ptr = alloc_kernel_bo(gpu->dev, gpu,
+ ptr = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
- &bo, &iova);
+ gpu->aspace, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -296,7 +258,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr->info = 0;
ptr->data = 0;
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
- ptr->rptr_addr = rbmemptr(adreno_gpu, ring->id, rptr);
+ ptr->rptr_addr = rbmemptr(ring, rptr);
ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
return 0;
@@ -352,10 +314,10 @@ void a5xx_preempt_init(struct msm_gpu *gpu)
}
if (msm_iommu_allow_dynamic(gpu->aspace->mmu)) {
- ptr = alloc_kernel_bo(gpu->dev, gpu,
+ ptr = msm_gem_kernel_new(gpu->dev,
sizeof(struct a5xx_smmu_info),
MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
- &bo, &iova);
+ gpu->aspace, &bo, &iova);
if (IS_ERR(ptr))
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
index c2773cb325d5..d1c1ab460c95 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
@@ -214,28 +214,14 @@ struct crashdump {
static int crashdump_init(struct msm_gpu *gpu, struct crashdump *crashdump)
{
- struct drm_device *drm = gpu->dev;
- int ret = -ENOMEM;
-
- crashdump->bo = msm_gem_new_locked(drm, CRASHDUMP_BO_SIZE,
- MSM_BO_UNCACHED);
- if (IS_ERR(crashdump->bo)) {
- ret = PTR_ERR(crashdump->bo);
- crashdump->bo = NULL;
- return ret;
- }
-
- crashdump->ptr = msm_gem_vaddr(crashdump->bo);
- if (!crashdump->ptr)
- goto out;
-
- ret = msm_gem_get_iova(crashdump->bo, gpu->aspace,
- &crashdump->iova);
-
-out:
- if (ret) {
- drm_gem_object_unreference(crashdump->bo);
- crashdump->bo = NULL;
+ int ret = 0;
+
+ crashdump->ptr = msm_gem_kernel_new_locked(gpu->dev,
+ CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED,
+ gpu->aspace, &crashdump->bo, &crashdump->iova);
+ if (IS_ERR(crashdump->ptr)) {
+ ret = PTR_ERR(crashdump->ptr);
+ crashdump->ptr = NULL;
}
return ret;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 81fa37ee9671..9f3d957499d3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -90,7 +90,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
- REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(adreno_gpu, 0, rptr));
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(gpu->rb[0], rptr));
return 0;
}
@@ -106,10 +106,11 @@ static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
* ensure that it won't be. If not then this is why your
* a430 stopped working.
*/
- return adreno_gpu->memptrs->rptr[ring->id] = adreno_gpu_read(
- adreno_gpu, REG_ADRENO_CP_RB_RPTR);
- } else
- return adreno_gpu->memptrs->rptr[ring->id];
+ return ring->memptrs->rptr =
+ adreno_gpu_read(adreno_gpu, REG_ADRENO_CP_RB_RPTR);
+ }
+
+ return ring->memptrs->rptr;
}
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
@@ -128,17 +129,11 @@ uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-
- if (!ring)
- return 0;
-
- return adreno_gpu->memptrs->fence[ring->id];
+ return ring ? ring->memptrs->fence : 0;
}
void adreno_recover(struct msm_gpu *gpu)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *dev = gpu->dev;
struct msm_ringbuffer *ring;
int ret, i;
@@ -156,9 +151,8 @@ void adreno_recover(struct msm_gpu *gpu)
ring->next = ring->start;
/* reset completed fence seqno, discard anything pending: */
- adreno_gpu->memptrs->fence[ring->id] =
- adreno_submitted_fence(gpu, ring);
- adreno_gpu->memptrs->rptr[ring->id] = 0;
+ ring->memptrs->fence = adreno_submitted_fence(gpu, ring);
+ ring->memptrs->rptr = 0;
}
gpu->funcs->pm_resume(gpu);
@@ -213,7 +207,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
- OUT_RING(ring, rbmemptr(adreno_gpu, ring->id, fence));
+ OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->fence);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
@@ -516,7 +510,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
{
struct adreno_platform_config *config = pdev->dev.platform_data;
struct msm_gpu *gpu = &adreno_gpu->base;
- struct msm_mmu *mmu;
int ret;
adreno_gpu->funcs = funcs;
@@ -541,77 +534,19 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
}
ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev);
- if (ret) {
+ if (ret)
dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
adreno_gpu->info->pfpfw, ret);
- return ret;
- }
-
- mmu = gpu->aspace->mmu;
- if (mmu) {
- ret = mmu->funcs->attach(mmu, NULL, 0);
- if (ret)
- return ret;
- }
-
- if (gpu->secure_aspace) {
- mmu = gpu->secure_aspace->mmu;
- if (mmu) {
- ret = mmu->funcs->attach(mmu, NULL, 0);
- if (ret)
- return ret;
- }
- }
- adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
- MSM_BO_UNCACHED);
- if (IS_ERR(adreno_gpu->memptrs_bo)) {
- ret = PTR_ERR(adreno_gpu->memptrs_bo);
- adreno_gpu->memptrs_bo = NULL;
- dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
- return ret;
- }
-
- adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
- if (!adreno_gpu->memptrs) {
- dev_err(drm->dev, "could not vmap memptrs\n");
- return -ENOMEM;
- }
-
- ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
- &adreno_gpu->memptrs_iova);
- if (ret) {
- dev_err(drm->dev, "could not map memptrs: %d\n", ret);
- return ret;
- }
-
- return 0;
+ return ret;
}
void adreno_gpu_cleanup(struct adreno_gpu *gpu)
{
- struct msm_gem_address_space *aspace = gpu->base.aspace;
-
- if (gpu->memptrs_bo) {
- if (gpu->memptrs_iova)
- msm_gem_put_iova(gpu->memptrs_bo, aspace);
- drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
- }
release_firmware(gpu->pm4);
release_firmware(gpu->pfp);
msm_gpu_cleanup(&gpu->base);
-
- if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu);
- msm_gem_address_space_put(aspace);
- }
-
- if (gpu->base.secure_aspace) {
- aspace = gpu->base.secure_aspace;
- aspace->mmu->funcs->detach(aspace->mmu);
- msm_gem_address_space_put(aspace);
- }
}
static void adreno_snapshot_os(struct msm_gpu *gpu,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 9e622fa06ce4..c894956fb5e8 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -83,22 +83,6 @@ struct adreno_info {
const struct adreno_info *adreno_info(struct adreno_rev rev);
-#define _sizeof(member) \
- sizeof(((struct adreno_rbmemptrs *) 0)->member[0])
-
-#define _base(adreno_gpu, member) \
- ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
-
-#define rbmemptr(adreno_gpu, index, member) \
- (_base((adreno_gpu), member) + ((index) * _sizeof(member)))
-
-struct adreno_rbmemptrs {
- volatile uint32_t rptr[MSM_GPU_MAX_RINGS];
- volatile uint32_t fence[MSM_GPU_MAX_RINGS];
- volatile uint64_t ttbr0[MSM_GPU_MAX_RINGS];
- volatile unsigned int contextidr[MSM_GPU_MAX_RINGS];
-};
-
struct adreno_counter {
u32 lo;
u32 hi;
@@ -137,13 +121,6 @@ struct adreno_gpu {
/* firmware: */
const struct firmware *pm4, *pfp;
- /* ringbuffer rptr/wptr: */
- // TODO should this be in msm_ringbuffer? I think it would be
- // different for z180..
- struct adreno_rbmemptrs *memptrs;
- struct drm_gem_object *memptrs_bo;
- uint64_t memptrs_iova;
-
/*
* Register offsets are different between some GPUs.
* GPU specific offsets will be exported by GPU specific
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
index 64914585c9a5..d0c3deefabd7 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -705,13 +705,22 @@ static int _sde_hdmi_update_pll_delta(struct sde_hdmi *display, s32 ppm)
u64 clip_pclk;
int rc = 0;
+ mutex_lock(&display->display_lock);
+
if (!hdmi->power_on || !display->connected) {
SDE_ERROR("HDMI display is not ready\n");
+ mutex_unlock(&display->display_lock);
+ return -EINVAL;
+ }
+
+ if (!display->pll_update_enable) {
+ SDE_ERROR("PLL update function is not enabled\n");
+ mutex_unlock(&display->display_lock);
return -EINVAL;
}
/* get current pclk */
- cur_pclk = hdmi->pixclock;
+ cur_pclk = hdmi->actual_pixclock;
/* get desired pclk */
dst_pclk = cur_pclk * (1000000000 + ppm);
do_div(dst_pclk, 1000000000);
@@ -725,13 +734,16 @@ static int _sde_hdmi_update_pll_delta(struct sde_hdmi *display, s32 ppm)
rc = clk_set_rate(hdmi->pwr_clks[0], clip_pclk);
if (rc < 0) {
- SDE_ERROR("PLL update failed, reset clock rate\n");
+ SDE_ERROR("HDMI PLL update failed\n");
+ mutex_unlock(&display->display_lock);
return rc;
}
- hdmi->pixclock = clip_pclk;
+ hdmi->actual_pixclock = clip_pclk;
}
+ mutex_unlock(&display->display_lock);
+
return rc;
}
@@ -767,14 +779,119 @@ static const struct file_operations pll_delta_fops = {
.write = _sde_hdmi_debugfs_pll_delta_write,
};
+/**
+ * _sde_hdmi_enable_pll_update() - Enable the HDMI PLL update function
+ *
+ * @enable: non-zero to enable PLL update function, 0 to disable.
+ * return: 0 on success, non-zero in case of failure.
+ *
+ */
+static int _sde_hdmi_enable_pll_update(struct sde_hdmi *display, s32 enable)
+{
+ struct hdmi *hdmi = display->ctrl.ctrl;
+ int rc = 0;
+
+ mutex_lock(&display->display_lock);
+
+ if (!hdmi->power_on || !display->connected) {
+ SDE_ERROR("HDMI display is not ready\n");
+ mutex_unlock(&display->display_lock);
+ return -EINVAL;
+ }
+
+ if (!enable && hdmi->actual_pixclock != hdmi->pixclock) {
+ /* reset pixel clock when disable */
+ rc = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
+ if (rc < 0) {
+ SDE_ERROR("reset clock rate failed\n");
+ mutex_unlock(&display->display_lock);
+ return rc;
+ }
+ }
+ hdmi->actual_pixclock = hdmi->pixclock;
+
+ display->pll_update_enable = !!enable;
+
+ mutex_unlock(&display->display_lock);
+
+ SDE_DEBUG("HDMI PLL update: %s\n",
+ display->pll_update_enable ? "enable" : "disable");
+
+ return rc;
+}
+
+static ssize_t _sde_hdmi_debugfs_pll_enable_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char *buf;
+ u32 len = 0;
+
+ if (!display)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(SZ_1K, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += snprintf(buf, SZ_4K, "%s\n",
+ display->pll_update_enable ? "enable" : "disable");
+
+ if (copy_to_user(buff, buf, len)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+
+ kfree(buf);
+ return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_pll_enable_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[10];
+ int enable = 0;
+
+ if (!display)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 0, &enable))
+ return -EFAULT;
+
+ _sde_hdmi_enable_pll_update(display, enable);
+
+ return count;
+}
+
+static const struct file_operations pll_enable_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_pll_enable_read,
+ .write = _sde_hdmi_debugfs_pll_enable_write,
+};
+
static int _sde_hdmi_debugfs_init(struct sde_hdmi *display)
{
int rc = 0;
struct dentry *dir, *dump_file, *edid_modes;
struct dentry *edid_vsdb_info, *edid_hdr_info, *edid_hfvsdb_info;
- struct dentry *edid_vcdb_info, *edid_vendor_name, *pll_file;
+ struct dentry *edid_vcdb_info, *edid_vendor_name;
struct dentry *src_hdcp14_support, *src_hdcp22_support;
struct dentry *sink_hdcp22_support, *hdmi_hdcp_state;
+ struct dentry *pll_delta_file, *pll_enable_file;
dir = debugfs_create_dir(display->name, NULL);
if (!dir) {
@@ -796,18 +913,30 @@ static int _sde_hdmi_debugfs_init(struct sde_hdmi *display)
goto error_remove_dir;
}
- pll_file = debugfs_create_file("pll_delta",
+ pll_delta_file = debugfs_create_file("pll_delta",
0644,
dir,
display,
&pll_delta_fops);
- if (IS_ERR_OR_NULL(pll_file)) {
- rc = PTR_ERR(pll_file);
+ if (IS_ERR_OR_NULL(pll_delta_file)) {
+ rc = PTR_ERR(pll_delta_file);
SDE_ERROR("[%s]debugfs create pll_delta file failed, rc=%d\n",
display->name, rc);
goto error_remove_dir;
}
+ pll_enable_file = debugfs_create_file("pll_enable",
+ 0644,
+ dir,
+ display,
+ &pll_enable_fops);
+ if (IS_ERR_OR_NULL(pll_enable_file)) {
+ rc = PTR_ERR(pll_enable_file);
+ SDE_ERROR("[%s]debugfs create pll_enable file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
edid_modes = debugfs_create_file("edid_modes",
0444,
dir,
@@ -1749,17 +1878,42 @@ int sde_hdmi_set_property(struct drm_connector *connector,
if (!connector || !display) {
SDE_ERROR("connector=%pK or display=%pK is NULL\n",
connector, display);
- return 0;
+ return -EINVAL;
}
SDE_DEBUG("\n");
- if (property_index == CONNECTOR_PROP_PLL_DELTA)
+ if (property_index == CONNECTOR_PROP_PLL_ENABLE)
+ rc = _sde_hdmi_enable_pll_update(display, value);
+ else if (property_index == CONNECTOR_PROP_PLL_DELTA)
rc = _sde_hdmi_update_pll_delta(display, value);
return rc;
}
+int sde_hdmi_get_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t *value,
+ void *display)
+{
+ struct sde_hdmi *hdmi_display = display;
+ int rc = 0;
+
+ if (!connector || !hdmi_display) {
+ SDE_ERROR("connector=%pK or display=%pK is NULL\n",
+ connector, hdmi_display);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_display->display_lock);
+ if (property_index == CONNECTOR_PROP_PLL_ENABLE)
+ *value = hdmi_display->pll_update_enable ? 1 : 0;
+ mutex_unlock(&hdmi_display->display_lock);
+
+ return rc;
+}
+
u32 sde_hdmi_get_num_of_displays(void)
{
u32 count = 0;
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
index 84d8720969be..47f440d617a3 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -109,6 +109,7 @@ enum hdmi_tx_feature_type {
* @codec_ready: If audio codec is ready.
* @client_notify_pending: If there is client notification pending.
* @irq_domain: IRQ domain structure.
+ * @pll_update_enable: if it's allowed to update HDMI PLL ppm.
* @notifier: CEC notifider to convey physical address information.
* @root: Debug fs root entry.
*/
@@ -159,6 +160,7 @@ struct sde_hdmi {
struct irq_domain *irq_domain;
struct cec_notifier *notifier;
+ bool pll_update_enable;
struct delayed_work hdcp_cb_work;
struct dss_io_data io[HDMI_TX_MAX_IO];
@@ -345,6 +347,22 @@ int sde_hdmi_set_property(struct drm_connector *connector,
void *display);
/**
+ * sde_hdmi_get_property() - get the connector properties
+ * @connector: Handle to the connector.
+ * @state: Handle to the connector state.
+ * @property_index: property index.
+ * @value: property value.
+ * @display: Handle to the display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_get_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t *value,
+ void *display);
+
+/**
* sde_hdmi_bridge_init() - init sde hdmi bridge
* @hdmi: Handle to the hdmi.
*
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
index 7af84f5c4229..24d2320683e4 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -494,6 +494,16 @@ static void _sde_hdmi_bridge_enable(struct drm_bridge *bridge)
static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge)
{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+ mutex_lock(&display->display_lock);
+
+ display->pll_update_enable = false;
+
+ mutex_unlock(&display->display_lock);
}
static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 84b578eaad47..8ca7b36ee0c8 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -56,6 +56,7 @@ struct hdmi {
/* video state: */
bool power_on;
unsigned long int pixclock;
+ unsigned long int actual_pixclock;
void __iomem *mmio;
void __iomem *qfprom_mmio;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index a441f3d15542..367e701b59cb 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -26,11 +26,68 @@
#include "msm_gem.h"
#include "msm_mmu.h"
+static void msm_drm_helper_hotplug_event(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ char *event_string;
+ char const *connector_name;
+ char *envp[2];
+
+ if (!dev) {
+ DRM_ERROR("hotplug_event failed, invalid input\n");
+ return;
+ }
+
+ if (!dev->mode_config.poll_enabled)
+ return;
+
+ event_string = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!event_string) {
+ DRM_ERROR("failed to allocate event string\n");
+ return;
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(connector, dev) {
+ /* Only handle HPD capable connectors. */
+ if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ connector->status = connector->funcs->detect(connector, false);
+
+ if (connector->name)
+ connector_name = connector->name;
+ else
+ connector_name = "unknown";
+
+ snprintf(event_string, SZ_4K, "name=%s status=%s\n",
+ connector_name,
+ drm_get_connector_status_name(connector->status));
+ DRM_DEBUG("generating hotplug event [%s]\n", event_string);
+ envp[0] = event_string;
+ envp[1] = NULL;
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
+ envp);
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ kfree(event_string);
+}
+
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_private *priv = NULL;
+
+ if (!dev) {
+ DRM_ERROR("output_poll_changed failed, invalid input\n");
+ return;
+ }
+
+ priv = dev->dev_private;
+
if (priv->fbdev)
drm_fb_helper_hotplug_event(priv->fbdev);
+ else
+ msm_drm_helper_hotplug_event(dev);
}
static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -602,7 +659,8 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- INIT_LIST_HEAD(&ctx->counters);
+ if (ctx)
+ INIT_LIST_HEAD(&ctx->counters);
msm_submitqueue_init(ctx);
@@ -1839,6 +1897,7 @@ static struct drm_driver msm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
+ .gem_prime_res_obj = msm_gem_prime_res_obj,
.gem_prime_pin = msm_gem_prime_pin,
.gem_prime_unpin = msm_gem_prime_unpin,
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index adc1e021b6f7..f85938f8a034 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -155,6 +155,7 @@ enum msm_mdp_conn_property {
CONNECTOR_PROP_DST_W,
CONNECTOR_PROP_DST_H,
CONNECTOR_PROP_PLL_DELTA,
+ CONNECTOR_PROP_PLL_ENABLE,
/* enum/bitmask properties */
CONNECTOR_PROP_TOPOLOGY_NAME,
@@ -465,6 +466,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
@@ -494,7 +496,12 @@ int msm_gem_svm_new_handle(struct drm_device *dev, struct drm_file *file,
struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev,
struct drm_file *file, uint64_t hostptr,
uint64_t size, uint32_t flags);
-
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova);
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova);
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace);
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b7dd84cb23b9..d66071672c62 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1046,7 +1046,7 @@ struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev,
{
struct drm_gem_object *obj;
struct msm_file_private *ctx = file->driver_priv;
- struct msm_gem_address_space *aspace = ctx->aspace;
+ struct msm_gem_address_space *aspace;
struct msm_gem_object *msm_obj;
struct msm_gem_svm_object *msm_svm_obj;
struct msm_gem_vma *domain = NULL;
@@ -1056,6 +1056,9 @@ struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev,
int write;
int ret;
+ if (!ctx)
+ return ERR_PTR(-ENODEV);
+
/* if we don't have IOMMU, don't bother pretending we can import: */
if (!iommu_present(&platform_bus_type)) {
dev_err_once(dev->dev, "cannot import without IOMMU\n");
@@ -1078,6 +1081,7 @@ struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev,
drm_gem_private_object_init(dev, obj, size);
msm_obj = to_msm_bo(obj);
+ aspace = ctx->aspace;
domain = obj_add_domain(&msm_obj->base, aspace);
if (IS_ERR(domain)) {
drm_gem_object_unreference_unlocked(obj);
@@ -1285,3 +1289,51 @@ void msm_mn_invalidate_range_start(struct mmu_notifier *mn,
msm_gem_mn_put(msm_mn);
}
+
+/*
+ * Helper function to consolidate in-kernel buffer allocations that usually need
+ * to allocate a buffer object, iova and a virtual address all in one shot
+ */
+static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova, bool locked)
+{
+ void *vaddr;
+ struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
+ int ret;
+
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ ret = msm_gem_get_iova(obj, aspace, iova);
+ if (ret) {
+ drm_gem_object_unreference(obj);
+ return ERR_PTR(ret);
+ }
+
+ vaddr = msm_gem_vaddr(obj);
+ if (!vaddr) {
+ msm_gem_put_iova(obj, aspace);
+ drm_gem_object_unreference(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ *bo = obj;
+ return vaddr;
+}
+
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova)
+{
+ return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova,
+ false);
+}
+
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova)
+{
+ return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova,
+ true);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 678018804f3a..9f3c097d011b 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -80,3 +80,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
if (!obj->import_attach)
msm_gem_put_pages(obj);
}
+
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ return msm_obj->resv;
+}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b9ace0d9e69b..7ccc146f3ae1 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -430,7 +430,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
return -EINVAL;
gpu = priv->gpu;
- if (!gpu)
+ if (!gpu || !ctx)
return -ENXIO;
queue = msm_submitqueue_get(ctx, args->queueid);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 6bbd4c338433..d896e436251f 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -669,6 +669,9 @@ int msm_gpu_counter_put(struct msm_gpu *gpu, struct drm_msm_counter *data,
{
struct msm_context_counter *entry;
+ if (!gpu || !ctx)
+ return -ENODEV;
+
list_for_each_entry(entry, &ctx->counters, node) {
if (entry->groupid == data->groupid &&
entry->counterid == data->counterid) {
@@ -810,17 +813,39 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct device *dev,
gpu->name, name, PTR_ERR(aspace));
iommu_domain_free(iommu);
- aspace = NULL;
+ return NULL;
+ }
+
+ if (aspace->mmu) {
+ int ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
+
+ if (ret) {
+ dev_err(gpu->dev->dev,
+ "%s: failed to atach IOMMU '%s': %d\n",
+ gpu->name, name, ret);
+ msm_gem_address_space_put(aspace);
+ aspace = ERR_PTR(ret);
+ }
}
return aspace;
}
+static void msm_gpu_destroy_address_space(struct msm_gem_address_space *aspace)
+{
+ if (!IS_ERR_OR_NULL(aspace) && aspace->mmu)
+ aspace->mmu->funcs->detach(aspace->mmu);
+
+ msm_gem_address_space_put(aspace);
+}
+
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
{
int i, ret, nr_rings;
+ void *memptrs;
+ uint64_t memptrs_iova;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
@@ -903,10 +928,18 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
nr_rings = ARRAY_SIZE(gpu->rb);
}
+ /* Allocate one buffer to hold all the memptr records for the rings */
+ memptrs = msm_gem_kernel_new(drm, sizeof(struct msm_memptrs) * nr_rings,
+ MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo, &memptrs_iova);
+
+ if (IS_ERR(memptrs)) {
+ ret = PTR_ERR(memptrs);
+ goto fail;
+ }
+
/* Create ringbuffer(s): */
for (i = 0; i < nr_rings; i++) {
-
- gpu->rb[i] = msm_ringbuffer_new(gpu, i);
+ gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
if (IS_ERR(gpu->rb[i])) {
ret = PTR_ERR(gpu->rb[i]);
gpu->rb[i] = NULL;
@@ -914,6 +947,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
"could not create ringbuffer %d: %d\n", i, ret);
goto fail;
}
+
+ memptrs += sizeof(struct msm_memptrs);
+ memptrs_iova += sizeof(struct msm_memptrs);
}
gpu->nr_rings = nr_rings;
@@ -935,11 +971,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return 0;
fail:
- for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
- if (gpu->rb[i])
- msm_ringbuffer_destroy(gpu->rb[i]);
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)
+ msm_ringbuffer_destroy(gpu->rb[i]);
+
+ if (gpu->memptrs_bo) {
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
+ msm_gpu_destroy_address_space(gpu->aspace);
+ msm_gpu_destroy_address_space(gpu->secure_aspace);
+
pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -957,16 +999,17 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
bs_fini(gpu);
- for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
- if (!gpu->rb[i])
- continue;
-
- if (gpu->rb[i]->iova)
- msm_gem_put_iova(gpu->rb[i]->bo, gpu->aspace);
-
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)
msm_ringbuffer_destroy(gpu->rb[i]);
+
+ if (gpu->memptrs_bo) {
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
msm_snapshot_destroy(gpu, gpu->snapshot);
pm_runtime_disable(&pdev->dev);
+
+ msm_gpu_destroy_address_space(gpu->aspace);
+ msm_gpu_destroy_address_space(gpu->secure_aspace);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index cddbcbbb8557..306139bcd103 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -131,6 +131,8 @@ struct msm_gpu {
struct pm_qos_request pm_qos_req_dma;
+ struct drm_gem_object *memptrs_bo;
+
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
uint32_t bsc;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 458f80426b39..2a5843e6f81b 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -18,7 +18,8 @@
#include "msm_ringbuffer.h"
#include "msm_gpu.h"
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id)
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
+ struct msm_memptrs *memptrs, uint64_t memptrs_iova)
{
struct msm_ringbuffer *ring;
int ret;
@@ -42,6 +43,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id)
goto fail;
}
+ ring->memptrs = memptrs;
+ ring->memptrs_iova = memptrs_iova;
+
+
ring->start = msm_gem_vaddr(ring->bo);
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
@@ -60,7 +65,10 @@ fail:
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
{
- if (ring->bo)
+ if (ring && ring->bo) {
+ msm_gem_put_iova(ring->bo, ring->gpu->aspace);
drm_gem_object_unreference_unlocked(ring->bo);
+ }
+
kfree(ring);
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index 70643a7b4cc4..3eb9a86b2a2e 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -20,6 +20,16 @@
#include "msm_drv.h"
+#define rbmemptr(ring, member) \
+ ((ring)->memptrs_iova + offsetof(struct msm_memptrs, member))
+
+struct msm_memptrs {
+ volatile uint32_t rptr;
+ volatile uint32_t fence;
+ volatile uint64_t ttbr0;
+ volatile unsigned int contextidr;
+};
+
struct msm_ringbuffer {
struct msm_gpu *gpu;
int id;
@@ -29,9 +39,13 @@ struct msm_ringbuffer {
uint32_t submitted_fence;
spinlock_t lock;
struct list_head submits;
+
+ struct msm_memptrs *memptrs;
+ uint64_t memptrs_iova;
};
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id);
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
+ struct msm_memptrs *memptrs, uint64_t memptrs_iova);
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 7538927a4993..e08fe210604c 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -582,6 +582,10 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
"PLL_DELTA", 0x0, INT_MIN, INT_MAX, 0,
CONNECTOR_PROP_PLL_DELTA);
+ msm_property_install_volatile_range(&c_conn->property_info,
+ "PLL_ENABLE", 0x0, 0, 1, 0,
+ CONNECTOR_PROP_PLL_ENABLE);
+
/* enum/bitmask properties */
msm_property_install_enum(&c_conn->property_info, "topology_name",
DRM_MODE_PROP_IMMUTABLE, 0, e_topology_name,
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 45a87456e5ec..a240ed08da89 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -601,6 +601,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
.mode_valid = sde_hdmi_mode_valid,
.get_info = sde_hdmi_get_info,
.set_property = sde_hdmi_set_property,
+ .get_property = sde_hdmi_get_property,
};
struct msm_display_info info = {0};
struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 82d3e28918fd..7e4f24ae7de8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -4,6 +4,7 @@
struct nvkm_alarm {
struct list_head head;
+ struct list_head exec;
u64 timestamp;
void (*func)(struct nvkm_alarm *);
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index 79fcdb43e174..46033909d950 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
/* Move to completed list. We'll drop the lock before
* executing the callback so it can reschedule itself.
*/
- list_move_tail(&alarm->head, &exec);
+ list_del_init(&alarm->head);
+ list_add(&alarm->exec, &exec);
}
/* Shut down interrupt if no more pending alarms. */
@@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
spin_unlock_irqrestore(&tmr->lock, flags);
/* Execute completed callbacks. */
- list_for_each_entry_safe(alarm, atemp, &exec, head) {
- list_del_init(&alarm->head);
+ list_for_each_entry_safe(alarm, atemp, &exec, exec) {
+ list_del(&alarm->exec);
alarm->func(alarm);
}
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 4a09947be244..3c32f095a873 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
+ /* disable mclk switching if the refresh is >120Hz, even if the
+ * blanking period would allow it
+ */
+ if (r600_dpm_get_vrefresh(rdev) > 120)
+ return true;
+
if (vblank_time < switch_limit)
return true;
else
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index f81fb2641097..134874cab4c7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7762,7 +7762,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
@@ -7792,7 +7792,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 32491355a1d4..ba9e6ed4ae54 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4924,7 +4924,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
@@ -4955,7 +4955,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index cc2fdf0be37a..0e20c08f8977 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3945,7 +3945,7 @@ static void r600_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index f878d6962da5..5cf3a2cbc07e 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6335,7 +6335,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
@@ -6366,7 +6366,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index b6a0806b06bf..a1c68e6a689e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
return fifo_state->static_buffer;
else {
fifo_state->dynamic_buffer = vmalloc(bytes);
+ if (!fifo_state->dynamic_buffer)
+ goto out_err;
return fifo_state->dynamic_buffer;
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index c9c04ccccdd9..027987023400 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1288,11 +1288,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
uint32_t size;
- uint32_t backup_handle;
+ uint32_t backup_handle = 0;
if (req->multisample_count != 0)
return -EINVAL;
+ if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
+
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
128;
@@ -1328,12 +1331,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
&res->backup,
&user_srf->backup_base);
- if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
- res->backup_size) {
- DRM_ERROR("Surface backup buffer is too small.\n");
- vmw_dmabuf_unreference(&res->backup);
- ret = -EINVAL;
- goto out_unlock;
+ if (ret == 0) {
+ if (res->backup->base.num_pages * PAGE_SIZE <
+ res->backup_size) {
+ DRM_ERROR("Surface backup buffer is too small.\n");
+ vmw_dmabuf_unreference(&res->backup);
+ ret = -EINVAL;
+ goto out_unlock;
+ } else {
+ backup_handle = req->buffer_handle;
+ }
}
} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index b4d0656d062b..8902c3175c79 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -2443,7 +2443,7 @@ static void _dispatcher_power_down(struct adreno_device *adreno_dev)
mutex_unlock(&device->mutex);
}
-static void adreno_dispatcher_work(struct work_struct *work)
+static void adreno_dispatcher_work(struct kthread_work *work)
{
struct adreno_dispatcher *dispatcher =
container_of(work, struct adreno_dispatcher, work);
@@ -2503,7 +2503,7 @@ void adreno_dispatcher_schedule(struct kgsl_device *device)
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- kgsl_schedule_work(&dispatcher->work);
+ queue_kthread_work(&kgsl_driver.worker, &dispatcher->work);
}
/**
@@ -2799,7 +2799,7 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev)
setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer,
(unsigned long) adreno_dev);
- INIT_WORK(&dispatcher->work, adreno_dispatcher_work);
+ init_kthread_work(&dispatcher->work, adreno_dispatcher_work);
init_completion(&dispatcher->idle_gate);
complete_all(&dispatcher->idle_gate);
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index 72545db12f90..48f0cdc546ff 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -91,7 +91,7 @@ struct adreno_dispatcher {
atomic_t fault;
struct plist_head pending;
spinlock_t plist_lock;
- struct work_struct work;
+ struct kthread_work work;
struct kobject kobj;
struct completion idle_gate;
unsigned int disp_preempt_fair_sched;
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 7584811f388a..990c9bca5127 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -255,13 +255,6 @@ static void _deferred_put(struct work_struct *work)
kgsl_mem_entry_put(entry);
}
-static inline void
-kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry)
-{
- if (entry)
- queue_work(kgsl_driver.mem_workqueue, &entry->work);
-}
-
static inline struct kgsl_mem_entry *
kgsl_mem_entry_create(void)
{
@@ -272,7 +265,6 @@ kgsl_mem_entry_create(void)
/* put this ref in the caller functions after init */
kref_get(&entry->refcount);
- INIT_WORK(&entry->work, _deferred_put);
}
return entry;
}
@@ -1869,7 +1861,7 @@ long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
return -EINVAL;
ret = gpumem_free_entry(entry);
- kgsl_mem_entry_put_deferred(entry);
+ kgsl_mem_entry_put(entry);
return ret;
}
@@ -1887,7 +1879,7 @@ long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
return -EINVAL;
ret = gpumem_free_entry(entry);
- kgsl_mem_entry_put_deferred(entry);
+ kgsl_mem_entry_put(entry);
return ret;
}
@@ -1924,7 +1916,8 @@ static void gpuobj_free_fence_func(void *priv)
{
struct kgsl_mem_entry *entry = priv;
- kgsl_mem_entry_put_deferred(entry);
+ INIT_WORK(&entry->work, _deferred_put);
+ queue_work(kgsl_driver.mem_workqueue, &entry->work);
}
static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv,
@@ -1988,7 +1981,7 @@ long kgsl_ioctl_gpuobj_free(struct kgsl_device_private *dev_priv,
else
ret = -EINVAL;
- kgsl_mem_entry_put_deferred(entry);
+ kgsl_mem_entry_put(entry);
return ret;
}
@@ -3363,13 +3356,7 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
if (entry == NULL)
return -EINVAL;
- if (!kgsl_mem_entry_set_pend(entry)) {
- kgsl_mem_entry_put(entry);
- return -EBUSY;
- }
-
if (entry->memdesc.cur_bindings != 0) {
- kgsl_mem_entry_unset_pend(entry);
kgsl_mem_entry_put(entry);
return -EINVAL;
}
@@ -3378,7 +3365,7 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
/* One put for find_id(), one put for the kgsl_mem_entry_create() */
kgsl_mem_entry_put(entry);
- kgsl_mem_entry_put_deferred(entry);
+ kgsl_mem_entry_put(entry);
return 0;
}
@@ -3438,13 +3425,7 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
if (entry == NULL)
return -EINVAL;
- if (!kgsl_mem_entry_set_pend(entry)) {
- kgsl_mem_entry_put(entry);
- return -EBUSY;
- }
-
if (entry->bind_tree.rb_node != NULL) {
- kgsl_mem_entry_unset_pend(entry);
kgsl_mem_entry_put(entry);
return -EINVAL;
}
@@ -3453,7 +3434,7 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
/* One put for find_id(), one put for the kgsl_mem_entry_create() */
kgsl_mem_entry_put(entry);
- kgsl_mem_entry_put_deferred(entry);
+ kgsl_mem_entry_put(entry);
return 0;
}
@@ -4864,6 +4845,8 @@ static void kgsl_core_exit(void)
static int __init kgsl_core_init(void)
{
int result = 0;
+ struct sched_param param = { .sched_priority = 2 };
+
/* alloc major and minor device numbers */
result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
"kgsl");
@@ -4927,7 +4910,19 @@ static int __init kgsl_core_init(void)
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
- WQ_MEM_RECLAIM, 0);
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+
+ init_kthread_worker(&kgsl_driver.worker);
+
+ kgsl_driver.worker_thread = kthread_run(kthread_worker_fn,
+ &kgsl_driver.worker, "kgsl_worker_thread");
+
+ if (IS_ERR(kgsl_driver.worker_thread)) {
+ pr_err("unable to start kgsl thread\n");
+ goto err;
+ }
+
+ sched_setscheduler(kgsl_driver.worker_thread, SCHED_FIFO, &param);
kgsl_events_init();
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 2a9ac899725c..faf38d1d2293 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -26,6 +26,7 @@
#include <linux/mm.h>
#include <linux/dma-attrs.h>
#include <linux/uaccess.h>
+#include <linux/kthread.h>
#include <asm/cacheflush.h>
/*
@@ -152,6 +153,8 @@ struct kgsl_driver {
unsigned int full_cache_threshold;
struct workqueue_struct *workqueue;
struct workqueue_struct *mem_workqueue;
+ struct kthread_worker worker;
+ struct task_struct *worker_thread;
};
extern struct kgsl_driver kgsl_driver;
@@ -301,7 +304,7 @@ struct kgsl_event {
void *priv;
struct list_head node;
unsigned int created;
- struct work_struct work;
+ struct kthread_work work;
int result;
struct kgsl_event_group *group;
};
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index 6e8abf36c50f..859511baba12 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -32,7 +32,7 @@ static inline void signal_event(struct kgsl_device *device,
{
list_del(&event->node);
event->result = result;
- queue_work(device->events_wq, &event->work);
+ queue_kthread_work(&kgsl_driver.worker, &event->work);
}
/**
@@ -42,7 +42,7 @@ static inline void signal_event(struct kgsl_device *device,
* Each event callback has its own work struct and is run on a event specific
* workqeuue. This is the worker that queues up the event callback function.
*/
-static void _kgsl_event_worker(struct work_struct *work)
+static void _kgsl_event_worker(struct kthread_work *work)
{
struct kgsl_event *event = container_of(work, struct kgsl_event, work);
int id = KGSL_CONTEXT_ID(event->context);
@@ -282,7 +282,7 @@ int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group,
event->created = jiffies;
event->group = group;
- INIT_WORK(&event->work, _kgsl_event_worker);
+ init_kthread_work(&event->work, _kgsl_event_worker);
trace_kgsl_register_event(KGSL_CONTEXT_ID(context), timestamp, func);
@@ -297,7 +297,7 @@ int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group,
if (timestamp_cmp(retired, timestamp) >= 0) {
event->result = KGSL_EVENT_RETIRED;
- queue_work(device->events_wq, &event->work);
+ queue_kthread_work(&kgsl_driver.worker, &event->work);
spin_unlock(&group->lock);
return 0;
}
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 1a2032c2c1fb..690a9f0fa042 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -28,6 +28,8 @@
#define UHID_NAME "uhid"
#define UHID_BUFSIZE 32
+static DEFINE_MUTEX(uhid_open_mutex);
+
struct uhid_device {
struct mutex devlock;
bool running;
@@ -142,15 +144,26 @@ static void uhid_hid_stop(struct hid_device *hid)
static int uhid_hid_open(struct hid_device *hid)
{
struct uhid_device *uhid = hid->driver_data;
+ int retval = 0;
- return uhid_queue_event(uhid, UHID_OPEN);
+ mutex_lock(&uhid_open_mutex);
+ if (!hid->open++) {
+ retval = uhid_queue_event(uhid, UHID_OPEN);
+ if (retval)
+ hid->open--;
+ }
+ mutex_unlock(&uhid_open_mutex);
+ return retval;
}
static void uhid_hid_close(struct hid_device *hid)
{
struct uhid_device *uhid = hid->driver_data;
- uhid_queue_event(uhid, UHID_CLOSE);
+ mutex_lock(&uhid_open_mutex);
+ if (!--hid->open)
+ uhid_queue_event(uhid, UHID_CLOSE);
+ mutex_unlock(&uhid_open_mutex);
}
static int uhid_hid_parse(struct hid_device *hid)
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 35e3fd9fadf6..b62c50d1b1e4 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1440,37 +1440,38 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
{
unsigned char *data = wacom->data;
- if (wacom->pen_input)
+ if (wacom->pen_input) {
dev_dbg(wacom->pen_input->dev.parent,
"%s: received report #%d\n", __func__, data[0]);
- else if (wacom->touch_input)
+
+ if (len == WACOM_PKGLEN_PENABLED ||
+ data[0] == WACOM_REPORT_PENABLED)
+ return wacom_tpc_pen(wacom);
+ }
+ else if (wacom->touch_input) {
dev_dbg(wacom->touch_input->dev.parent,
"%s: received report #%d\n", __func__, data[0]);
- switch (len) {
- case WACOM_PKGLEN_TPC1FG:
- return wacom_tpc_single_touch(wacom, len);
+ switch (len) {
+ case WACOM_PKGLEN_TPC1FG:
+ return wacom_tpc_single_touch(wacom, len);
- case WACOM_PKGLEN_TPC2FG:
- return wacom_tpc_mt_touch(wacom);
+ case WACOM_PKGLEN_TPC2FG:
+ return wacom_tpc_mt_touch(wacom);
- case WACOM_PKGLEN_PENABLED:
- return wacom_tpc_pen(wacom);
+ default:
+ switch (data[0]) {
+ case WACOM_REPORT_TPC1FG:
+ case WACOM_REPORT_TPCHID:
+ case WACOM_REPORT_TPCST:
+ case WACOM_REPORT_TPC1FGE:
+ return wacom_tpc_single_touch(wacom, len);
- default:
- switch (data[0]) {
- case WACOM_REPORT_TPC1FG:
- case WACOM_REPORT_TPCHID:
- case WACOM_REPORT_TPCST:
- case WACOM_REPORT_TPC1FGE:
- return wacom_tpc_single_touch(wacom, len);
-
- case WACOM_REPORT_TPCMT:
- case WACOM_REPORT_TPCMT2:
- return wacom_mt_touch(wacom);
+ case WACOM_REPORT_TPCMT:
+ case WACOM_REPORT_TPCMT2:
+ return wacom_mt_touch(wacom);
- case WACOM_REPORT_PENABLED:
- return wacom_tpc_pen(wacom);
+ }
}
}
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index 0ed77eeff31e..a2e3dd715380 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -178,22 +178,39 @@ static int usb_read(struct i2c_adapter *adapter, int cmd,
int value, int index, void *data, int len)
{
struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
+ void *dmadata = kmalloc(len, GFP_KERNEL);
+ int ret;
+
+ if (!dmadata)
+ return -ENOMEM;
/* do control transfer */
- return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
+ ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE |
- USB_DIR_IN, value, index, data, len, 2000);
+ USB_DIR_IN, value, index, dmadata, len, 2000);
+
+ memcpy(data, dmadata, len);
+ kfree(dmadata);
+ return ret;
}
static int usb_write(struct i2c_adapter *adapter, int cmd,
int value, int index, void *data, int len)
{
struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
+ void *dmadata = kmemdup(data, len, GFP_KERNEL);
+ int ret;
+
+ if (!dmadata)
+ return -ENOMEM;
/* do control transfer */
- return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
+ ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- value, index, data, len, 2000);
+ value, index, dmadata, len, 2000);
+
+ kfree(dmadata);
+ return ret;
}
static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev)
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 6bf89d8f3741..b9d1e5c58ec5 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -74,9 +74,9 @@ static const int int_time_mapping[] = {100000, 50000, 200000, 400000};
static const struct reg_field reg_field_it =
REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4);
static const struct reg_field reg_field_als_intr =
- REG_FIELD(LTR501_INTR, 0, 0);
-static const struct reg_field reg_field_ps_intr =
REG_FIELD(LTR501_INTR, 1, 1);
+static const struct reg_field reg_field_ps_intr =
+ REG_FIELD(LTR501_INTR, 0, 0);
static const struct reg_field reg_field_als_rate =
REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2);
static const struct reg_field reg_field_ps_rate =
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index bf0bd7e03aff..9e6d1cdb7fcd 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -40,9 +40,9 @@
#define AS3935_AFE_PWR_BIT BIT(0)
#define AS3935_INT 0x03
-#define AS3935_INT_MASK 0x07
+#define AS3935_INT_MASK 0x0f
#define AS3935_EVENT_INT BIT(3)
-#define AS3935_NOISE_INT BIT(1)
+#define AS3935_NOISE_INT BIT(0)
#define AS3935_DATA 0x07
#define AS3935_DATA_MASK 0x3F
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index e6b7556d5221..cbc4216091c9 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -2088,8 +2088,10 @@ send_last:
ret = qib_get_rwqe(qp, 1);
if (ret < 0)
goto nack_op_err;
- if (!ret)
+ if (!ret) {
+ qib_put_ss(&qp->r_sge);
goto rnr_nak;
+ }
wc.ex.imm_data = ohdr->u.rc.imm_data;
hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 1a2b2620421e..6f4dc0fd2ca3 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1122,8 +1122,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
+ * Fujitsu LIFEBOOK E557 0x570f01 40, 14, 0c 2 hw buttons
* Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
* Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
@@ -1529,6 +1531,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
},
},
{
+ /* Fujitsu LIFEBOOK E546 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"),
+ },
+ },
+ {
/* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -1550,6 +1559,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
},
},
{
+ /* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"),
+ },
+ },
+ {
/* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/media/platform/msm/ais/camera/camera.c b/drivers/media/platform/msm/ais/camera/camera.c
index 158b83c12d00..33808d18d4c4 100644
--- a/drivers/media/platform/msm/ais/camera/camera.c
+++ b/drivers/media/platform/msm/ais/camera/camera.c
@@ -491,13 +491,16 @@ static long camera_v4l2_vidioc_private_ioctl(struct file *filep, void *fh,
if (WARN_ON(!k_ioctl || !pvdev))
return -EIO;
+ if (cmd != VIDIOC_MSM_CAMERA_PRIVATE_IOCTL_CMD)
+ return -EINVAL;
+
switch (k_ioctl->id) {
case MSM_CAMERA_PRIV_IOCTL_ID_RETURN_BUF: {
struct msm_camera_return_buf ptr, *tmp = NULL;
MSM_CAM_GET_IOCTL_ARG_PTR(&tmp, &k_ioctl->ioctl_ptr,
sizeof(tmp));
- if (copy_from_user(&ptr, tmp,
+ if (copy_from_user(&ptr, (void __user *)tmp,
sizeof(struct msm_camera_return_buf))) {
return -EFAULT;
}
@@ -795,7 +798,7 @@ static long camera_handle_internal_compat_ioctl(struct file *file,
{
long rc = 0;
struct msm_camera_private_ioctl_arg k_ioctl;
- void __user *tmp_compat_ioctl_ptr = NULL;
+ void *tmp_compat_ioctl_ptr = NULL;
rc = msm_copy_camera_private_ioctl_args(arg,
&k_ioctl, &tmp_compat_ioctl_ptr);
@@ -810,11 +813,13 @@ static long camera_handle_internal_compat_ioctl(struct file *file,
k_ioctl.id, k_ioctl.size);
return -EINVAL;
}
- k_ioctl.ioctl_ptr = (__u64)tmp_compat_ioctl_ptr;
- if (!k_ioctl.ioctl_ptr) {
+
+ if (tmp_compat_ioctl_ptr == NULL) {
pr_debug("Invalid ptr for id %d", k_ioctl.id);
return -EINVAL;
}
+ k_ioctl.ioctl_ptr = (__u64)(uintptr_t)tmp_compat_ioctl_ptr;
+
rc = camera_v4l2_vidioc_private_ioctl(file, file->private_data,
0, cmd, (void *)&k_ioctl);
}
@@ -826,7 +831,7 @@ static long camera_handle_internal_compat_ioctl(struct file *file,
return rc;
}
-long camera_v4l2_compat_ioctl(struct file *file, unsigned int cmd,
+static long camera_v4l2_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
long ret = 0;
diff --git a/drivers/media/platform/msm/ais/common/cam_hw_ops.c b/drivers/media/platform/msm/ais/common/cam_hw_ops.c
index 073778c9edcc..cf28e0ca6536 100644
--- a/drivers/media/platform/msm/ais/common/cam_hw_ops.c
+++ b/drivers/media/platform/msm/ais/common/cam_hw_ops.c
@@ -50,7 +50,7 @@ struct cam_ahb_client_data {
static struct cam_ahb_client_data data;
-int get_vector_index(char *name)
+static int get_vector_index(char *name)
{
int i = 0, rc = -1;
@@ -213,7 +213,7 @@ err1:
}
EXPORT_SYMBOL(cam_ahb_clk_init);
-int cam_consolidate_ahb_vote(enum cam_ahb_clk_client id,
+static int cam_consolidate_ahb_vote(enum cam_ahb_clk_client id,
enum cam_ahb_clk_vote vote)
{
int i = 0;
diff --git a/drivers/media/platform/msm/ais/common/cam_smmu_api.c b/drivers/media/platform/msm/ais/common/cam_smmu_api.c
index d3b239e9f304..0f0e14506325 100644
--- a/drivers/media/platform/msm/ais/common/cam_smmu_api.c
+++ b/drivers/media/platform/msm/ais/common/cam_smmu_api.c
@@ -466,7 +466,7 @@ static enum dma_data_direction cam_smmu_translate_dir(
return DMA_NONE;
}
-void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops)
+static void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops)
{
unsigned int i;
int j = 0;
diff --git a/drivers/media/platform/msm/ais/common/cam_smmu_api.h b/drivers/media/platform/msm/ais/common/cam_smmu_api.h
index 4a13598dc719..26bd30a6c8c8 100644
--- a/drivers/media/platform/msm/ais/common/cam_smmu_api.h
+++ b/drivers/media/platform/msm/ais/common/cam_smmu_api.h
@@ -43,6 +43,10 @@ enum cam_smmu_map_dir {
CAM_SMMU_MAP_INVALID
};
+int cam_smmu_query_vaddr_in_range(int handle,
+ unsigned long fault_addr, unsigned long *start_addr,
+ unsigned long *end_addr, int *fd);
+
/**
* @param identifier: Unique identifier to be used by clients which they
* should get from device tree. CAM SMMU driver will
diff --git a/drivers/media/platform/msm/ais/common/cam_soc_api.c b/drivers/media/platform/msm/ais/common/cam_soc_api.c
index 118d665a44d3..92f3e4007390 100644
--- a/drivers/media/platform/msm/ais/common/cam_soc_api.c
+++ b/drivers/media/platform/msm/ais/common/cam_soc_api.c
@@ -36,7 +36,7 @@ struct msm_cam_bus_pscale_data {
struct mutex lock;
};
-struct msm_cam_bus_pscale_data g_cv[CAM_BUS_CLIENT_MAX];
+static struct msm_cam_bus_pscale_data g_cv[CAM_BUS_CLIENT_MAX];
/* Get all clocks from DT */
static int msm_camera_get_clk_info_internal(struct device *dev,
@@ -771,7 +771,7 @@ void __iomem *msm_camera_get_reg_base(struct platform_device *pdev,
char *device_name, int reserve_mem)
{
struct resource *mem;
- void *base;
+ void __iomem *base;
if (!pdev || !device_name) {
pr_err("Invalid params\n");
diff --git a/drivers/media/platform/msm/ais/common/msm_camera_io_util.c b/drivers/media/platform/msm/ais/common/msm_camera_io_util.c
index 8370f556a40d..22518c2cae7d 100644
--- a/drivers/media/platform/msm/ais/common/msm_camera_io_util.c
+++ b/drivers/media/platform/msm/ais/common/msm_camera_io_util.c
@@ -123,8 +123,8 @@ void msm_camera_io_memcpy_toio(void __iomem *dest_addr,
void __iomem *src_addr, u32 len)
{
int i;
- u32 *d = (u32 *) dest_addr;
- u32 *s = (u32 *) src_addr;
+ u32 __iomem *d = (u32 __iomem *) dest_addr;
+ u32 __iomem *s = (u32 __iomem *) src_addr;
for (i = 0; i < len; i++)
writel_relaxed(*s++, d++);
@@ -178,7 +178,7 @@ void msm_camera_io_dump(void __iomem *addr, int size, int enable)
{
char line_str[128], *p_str;
int i;
- u32 *p = (u32 *) addr;
+ u32 __iomem *p = (u32 __iomem *) addr;
u32 data;
CDBG("%s: addr=%pK size=%d\n", __func__, addr, size);
@@ -242,8 +242,8 @@ void msm_camera_io_memcpy_mb(void __iomem *dest_addr,
void __iomem *src_addr, u32 len)
{
int i;
- u32 *d = (u32 *) dest_addr;
- u32 *s = (u32 *) src_addr;
+ u32 __iomem *d = (u32 __iomem *) dest_addr;
+ u32 __iomem *s = (u32 __iomem *) src_addr;
/* This is generic function called who needs to register
* writes with memory barrier
*/
diff --git a/drivers/media/platform/msm/ais/common/msm_camera_io_util.h b/drivers/media/platform/msm/ais/common/msm_camera_io_util.h
index 338e24d45500..3bd6c5f4866e 100644
--- a/drivers/media/platform/msm/ais/common/msm_camera_io_util.h
+++ b/drivers/media/platform/msm/ais/common/msm_camera_io_util.h
@@ -40,6 +40,8 @@ void msm_camera_io_w(u32 data, void __iomem *addr);
void msm_camera_io_w_mb(u32 data, void __iomem *addr);
u32 msm_camera_io_r(void __iomem *addr);
u32 msm_camera_io_r_mb(void __iomem *addr);
+void msm_camera_io_memcpy_toio(void __iomem *dest_addr,
+ void __iomem *src_addr, u32 len);
void msm_camera_io_dump(void __iomem *addr, int size, int enable);
void msm_camera_io_memcpy(void __iomem *dest_addr,
void __iomem *src_addr, u32 len);
diff --git a/drivers/media/platform/msm/ais/fd/msm_fd_dev.c b/drivers/media/platform/msm/ais/fd/msm_fd_dev.c
index 7a4acf6ec815..420083f019cf 100644
--- a/drivers/media/platform/msm/ais/fd/msm_fd_dev.c
+++ b/drivers/media/platform/msm/ais/fd/msm_fd_dev.c
@@ -745,9 +745,13 @@ static int msm_fd_s_fmt_vid_out(struct file *file,
static int msm_fd_reqbufs(struct file *file,
void *fh, struct v4l2_requestbuffers *req)
{
+ int ret;
struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
- return vb2_reqbufs(&ctx->vb2_q, req);
+ mutex_lock(&ctx->fd_device->recovery_lock);
+ ret = vb2_reqbufs(&ctx->vb2_q, req);
+ mutex_unlock(&ctx->fd_device->recovery_lock);
+ return ret;
}
/*
@@ -759,9 +763,14 @@ static int msm_fd_reqbufs(struct file *file,
static int msm_fd_qbuf(struct file *file, void *fh,
struct v4l2_buffer *pb)
{
+ int ret;
struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
- return vb2_qbuf(&ctx->vb2_q, pb);
+ mutex_lock(&ctx->fd_device->recovery_lock);
+ ret = vb2_qbuf(&ctx->vb2_q, pb);
+ mutex_unlock(&ctx->fd_device->recovery_lock);
+ return ret;
+
}
/*
@@ -773,9 +782,13 @@ static int msm_fd_qbuf(struct file *file, void *fh,
static int msm_fd_dqbuf(struct file *file,
void *fh, struct v4l2_buffer *pb)
{
+ int ret;
struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
- return vb2_dqbuf(&ctx->vb2_q, pb, file->f_flags & O_NONBLOCK);
+ mutex_lock(&ctx->fd_device->recovery_lock);
+ ret = vb2_dqbuf(&ctx->vb2_q, pb, file->f_flags & O_NONBLOCK);
+ mutex_unlock(&ctx->fd_device->recovery_lock);
+ return ret;
}
/*
diff --git a/drivers/media/platform/msm/ais/isp/msm_buf_mgr.c b/drivers/media/platform/msm/ais/isp/msm_buf_mgr.c
index 2133f9391433..585865b12387 100644
--- a/drivers/media/platform/msm/ais/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/ais/isp/msm_buf_mgr.c
@@ -76,7 +76,7 @@ static int msm_buf_check_head_sanity(struct msm_isp_bufq *bufq)
return rc;
}
-struct msm_isp_bufq *msm_isp_get_bufq(
+static struct msm_isp_bufq *msm_isp_get_bufq(
struct msm_isp_buf_mgr *buf_mgr,
uint32_t bufq_handle)
{
@@ -161,7 +161,7 @@ static int msm_isp_free_bufq_handle(struct msm_isp_buf_mgr *buf_mgr,
/* Set everything except lock to 0 */
bufq->bufq_handle = 0;
- bufq->bufs = 0;
+ bufq->bufs = NULL;
bufq->vfe_id = 0;
bufq->output_id = 0;
bufq->num_bufs = 0;
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp.c b/drivers/media/platform/msm/ais/isp/msm_isp.c
index 97c0f779cf73..d62b830535a3 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp.c
+++ b/drivers/media/platform/msm/ais/isp/msm_isp.c
@@ -49,9 +49,6 @@ MODULE_DEVICE_TABLE(of, msm_vfe_dt_match);
#define OVERFLOW_BUFFER_LENGTH 64
static char stat_line[OVERFLOW_LENGTH];
-struct msm_isp_statistics stats;
-struct msm_isp_ub_info ub_info;
-
static int msm_isp_enable_debugfs(struct vfe_device *vfe_dev,
struct msm_isp_bw_req_info *isp_req_hist);
@@ -107,8 +104,8 @@ static int vfe_debugfs_statistics_open(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t vfe_debugfs_statistics_read(struct file *t_file, char *t_char,
- size_t t_size_t, loff_t *t_loff_t)
+static ssize_t vfe_debugfs_statistics_read(struct file *t_file,
+ char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
{
int i;
uint64_t *ptr;
@@ -132,7 +129,7 @@ static ssize_t vfe_debugfs_statistics_read(struct file *t_file, char *t_char,
}
static ssize_t vfe_debugfs_statistics_write(struct file *t_file,
- const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+ const char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
{
struct vfe_device *vfe_dev = (struct vfe_device *)
t_file->private_data;
@@ -149,7 +146,7 @@ static int bw_history_open(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t bw_history_read(struct file *t_file, char *t_char,
+static ssize_t bw_history_read(struct file *t_file, char __user *t_char,
size_t t_size_t, loff_t *t_loff_t)
{
int i;
@@ -194,7 +191,7 @@ static ssize_t bw_history_read(struct file *t_file, char *t_char,
}
static ssize_t bw_history_write(struct file *t_file,
- const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+ const char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
{
struct msm_isp_bw_req_info *isp_req_hist =
(struct msm_isp_bw_req_info *) t_file->private_data;
@@ -210,7 +207,7 @@ static int ub_info_open(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t ub_info_read(struct file *t_file, char *t_char,
+static ssize_t ub_info_read(struct file *t_file, char __user *t_char,
size_t t_size_t, loff_t *t_loff_t)
{
int i;
@@ -241,7 +238,7 @@ static ssize_t ub_info_read(struct file *t_file, char *t_char,
}
static ssize_t ub_info_write(struct file *t_file,
- const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+ const char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
{
struct vfe_device *vfe_dev =
(struct vfe_device *) t_file->private_data;
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp47.c b/drivers/media/platform/msm/ais/isp/msm_isp47.c
index 8991433b2c67..d63282f80aca 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/ais/isp/msm_isp47.c
@@ -2708,7 +2708,6 @@ struct msm_vfe_hardware_info vfe47_hw_info = {
.process_camif_irq = msm_vfe47_process_input_irq,
.process_reset_irq = msm_vfe47_process_reset_irq,
.process_halt_irq = msm_vfe47_process_halt_irq,
- .process_reset_irq = msm_vfe47_process_reset_irq,
.process_reg_update = msm_vfe47_process_reg_update,
.process_axi_irq = msm_isp_process_axi_irq,
.process_stats_irq = msm_isp_process_stats_irq,
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp47.h b/drivers/media/platform/msm/ais/isp/msm_isp47.h
index b29fca61ce7c..9af0acd3656a 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp47.h
+++ b/drivers/media/platform/msm/ais/isp/msm_isp47.h
@@ -30,6 +30,8 @@ enum msm_vfe47_stats_comp_idx {
extern struct msm_vfe_hardware_info vfe47_hw_info;
+uint32_t msm_vfe47_ub_reg_offset(struct vfe_device *vfe_dev, int wm_idx);
+uint32_t msm_vfe47_get_ub_size(struct vfe_device *vfe_dev);
void msm_vfe47_read_irq_status(struct vfe_device *vfe_dev,
uint32_t *irq_status0, uint32_t *irq_status1);
void msm_vfe47_read_irq_status_and_clear(struct vfe_device *vfe_dev,
@@ -70,6 +72,8 @@ int32_t msm_vfe47_cfg_io_format(struct vfe_device *vfe_dev,
enum msm_vfe_axi_stream_src stream_src, uint32_t io_format);
int msm_vfe47_start_fetch_engine(struct vfe_device *vfe_dev,
void *arg);
+int msm_vfe47_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
+ void *arg);
void msm_vfe47_cfg_fetch_engine(struct vfe_device *vfe_dev,
struct msm_vfe_pix_cfg *pix_cfg);
void msm_vfe47_cfg_testgen(struct vfe_device *vfe_dev,
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h
index 0396fc4680f1..5ed89161b7f3 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h
@@ -27,6 +27,9 @@ int msm_isp_axi_create_stream(struct vfe_device *vfe_dev,
void msm_isp_axi_destroy_stream(
struct msm_vfe_axi_shared_data *axi_data, int stream_idx);
+int msm_isp_axi_get_num_planes(uint32_t output_format,
+ struct msm_vfe_axi_stream *stream_info);
+
int msm_isp_validate_axi_request(
struct msm_vfe_axi_shared_data *axi_data,
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd);
@@ -36,21 +39,34 @@ void msm_isp_axi_reserve_wm(
struct msm_vfe_axi_shared_data *axi_data,
struct msm_vfe_axi_stream *stream_info);
+void msm_isp_axi_free_wm(struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info);
+
void msm_isp_axi_reserve_comp_mask(
struct msm_vfe_axi_shared_data *axi_data,
struct msm_vfe_axi_stream *stream_info);
+void msm_isp_axi_free_comp_mask(struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info);
+
int msm_isp_axi_check_stream_state(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd);
+void msm_isp_check_for_output_error(struct vfe_device *vfe_dev,
+ struct msm_isp_timestamp *ts, struct msm_isp_sof_info *sof_info);
+
int msm_isp_calculate_framedrop(
struct msm_vfe_axi_shared_data *axi_data,
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd);
+void msm_isp_calculate_bandwidth(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info);
void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info);
int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_start_avtimer(void);
void msm_isp_get_avtimer_ts(struct msm_isp_timestamp *time_stamp);
int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg);
int msm_isp_update_stream_bandwidth(struct vfe_device *vfe_dev,
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c
index 3b877b4cb994..6e89544161ee 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c
@@ -863,6 +863,12 @@ int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg)
if (vfe_dev->stats_data.num_active_stream == 0)
vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
+ if (stream_cfg_cmd->num_streams > MSM_ISP_STATS_MAX) {
+ pr_err("%s invalid num_streams %d\n", __func__,
+ stream_cfg_cmd->num_streams);
+ return -EINVAL;
+ }
+
if (stream_cfg_cmd->enable) {
msm_isp_stats_update_cgc_override(vfe_dev, stream_cfg_cmd);
@@ -891,7 +897,7 @@ int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg)
&update_cmd->update_info[i];
/* check array reference bounds */
if (STATS_IDX(update_info->stream_handle)
- > vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
pr_err("%s: stats idx %d out of bound!", __func__,
STATS_IDX(update_info->stream_handle));
return -EINVAL;
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.h b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.h
index 707901bc6271..ae438a675542 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.h
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.h
@@ -19,6 +19,8 @@
void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
uint32_t pingpong_status, struct msm_isp_timestamp *ts);
+int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_request_cmd *stream_req_cmd);
void msm_isp_stats_stream_update(struct vfe_device *vfe_dev);
int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg);
int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg);
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_util.c
index 0353ab27cf19..9e5317eb2920 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_util.c
@@ -512,7 +512,7 @@ static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev,
return rc;
}
-int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
+static int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
{
int rc = 0;
struct msm_vfe_input_cfg *input_cfg = arg;
@@ -542,7 +542,7 @@ int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
return rc;
}
-int msm_isp_camif_cfg(struct vfe_device *vfe_dev, void *arg)
+static int msm_isp_camif_cfg(struct vfe_device *vfe_dev, void *arg)
{
int rc = 0;
struct msm_vfe_camif_cfg *camif_cfg = arg;
@@ -579,7 +579,7 @@ int msm_isp_camif_cfg(struct vfe_device *vfe_dev, void *arg)
}
-int msm_isp_operation_cfg(struct vfe_device *vfe_dev, void *arg)
+static int msm_isp_operation_cfg(struct vfe_device *vfe_dev, void *arg)
{
struct msm_vfe_operation_cfg *op_cfg = arg;
@@ -1233,14 +1233,16 @@ static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
case VFE_WRITE: {
msm_camera_io_memcpy(vfe_dev->vfe_base +
reg_cfg_cmd->u.rw_info.reg_offset,
- cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
+ (void __iomem *)
+ (cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4),
reg_cfg_cmd->u.rw_info.len);
break;
}
case VFE_WRITE_MB: {
msm_camera_io_memcpy_mb(vfe_dev->vfe_base +
reg_cfg_cmd->u.rw_info.reg_offset,
- cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
+ (void __iomem *)
+ (cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4),
reg_cfg_cmd->u.rw_info.len);
break;
}
@@ -2295,12 +2297,12 @@ int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
}
#ifdef CONFIG_MSM_AVTIMER
-void msm_isp_end_avtimer(void)
+static void msm_isp_end_avtimer(void)
{
avcs_core_disable_power_collapse(0);
}
#else
-void msm_isp_end_avtimer(void)
+static void msm_isp_end_avtimer(void)
{
pr_err("AV Timer is not supported\n");
}
@@ -2408,7 +2410,7 @@ void msm_isp_save_framedrop_values(struct vfe_device *vfe_dev,
}
}
-void msm_isp_dump_irq_debug(void)
+static void msm_isp_dump_irq_debug(void)
{
uint32_t index, count, i;
diff --git a/drivers/media/platform/msm/ais/msm.c b/drivers/media/platform/msm/ais/msm.c
index e8859b7db5cb..3e2c13b9cbbe 100644
--- a/drivers/media/platform/msm/ais/msm.c
+++ b/drivers/media/platform/msm/ais/msm.c
@@ -48,10 +48,10 @@ bool is_daemon_status = true;
/* config node envent queue */
static struct v4l2_fh *msm_eventq;
-spinlock_t msm_eventq_lock;
+static spinlock_t msm_eventq_lock;
static struct pid *msm_pid;
-spinlock_t msm_pid_lock;
+static spinlock_t msm_pid_lock;
/*
* It takes 20 bytes + NULL character to write the
@@ -62,7 +62,7 @@ spinlock_t msm_pid_lock;
#define msm_dequeue(queue, type, member) ({ \
unsigned long flags; \
struct msm_queue_head *__q = (queue); \
- type *node = 0; \
+ type *node = NULL; \
spin_lock_irqsave(&__q->lock, flags); \
if (!list_empty(&__q->list)) { \
__q->len--; \
@@ -78,7 +78,7 @@ spinlock_t msm_pid_lock;
#define msm_delete_sd_entry(queue, type, member, q_node) ({ \
unsigned long flags; \
struct msm_queue_head *__q = (queue); \
- type *node = 0; \
+ type *node = NULL; \
spin_lock_irqsave(&__q->lock, flags); \
if (!list_empty(&__q->list)) { \
list_for_each_entry(node, &__q->list, member) \
@@ -95,7 +95,7 @@ spinlock_t msm_pid_lock;
#define msm_delete_entry(queue, type, member, q_node) ({ \
unsigned long flags; \
struct msm_queue_head *__q = (queue); \
- type *node = 0; \
+ type *node = NULL; \
spin_lock_irqsave(&__q->lock, flags); \
if (!list_empty(&__q->list)) { \
list_for_each_entry(node, &__q->list, member) \
@@ -131,7 +131,7 @@ typedef int (*msm_queue_func)(void *d1, void *d2);
#define msm_queue_traverse_action(queue, type, member, func, data) do {\
unsigned long flags; \
struct msm_queue_head *__q = (queue); \
- type *node = 0; \
+ type *node = NULL; \
msm_queue_func __f = (func); \
spin_lock_irqsave(&__q->lock, flags); \
if (!list_empty(&__q->list)) { \
@@ -147,7 +147,7 @@ typedef int (*msm_queue_find_func)(void *d1, void *d2);
#define msm_queue_find(queue, type, member, func, data) ({\
unsigned long flags; \
struct msm_queue_head *__q = (queue); \
- type *node = 0; \
+ type *node = NULL; \
typeof(node) __ret = NULL; \
msm_queue_find_func __f = (func); \
spin_lock_irqsave(&__q->lock, flags); \
@@ -1119,7 +1119,7 @@ long msm_copy_camera_private_ioctl_args(unsigned long arg,
return -EIO;
if (copy_from_user(&up_ioctl,
- (struct msm_camera_private_ioctl_arg *)arg,
+ (void __user *)arg,
sizeof(struct msm_camera_private_ioctl_arg)))
return -EFAULT;
diff --git a/drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.c b/drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.c
index 073b91a6d2d9..66751b1f0657 100644
--- a/drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.c
+++ b/drivers/media/platform/msm/ais/msm_buf_mgr/msm_generic_buf_mgr.c
@@ -51,7 +51,7 @@ static int32_t msm_buf_mngr_hdl_cont_get_buf(struct msm_buf_mngr_device *dev,
}
static int32_t msm_buf_mngr_get_buf(struct msm_buf_mngr_device *dev,
- void __user *argp)
+ void *argp)
{
unsigned long flags;
int32_t rc = 0;
@@ -465,7 +465,7 @@ static int msm_generic_buf_mngr_close(struct v4l2_subdev *sd,
return rc;
}
-int msm_cam_buf_mgr_ops(unsigned int cmd, void *argp)
+static int msm_cam_buf_mgr_ops(unsigned int cmd, void *argp)
{
int rc = 0;
@@ -531,7 +531,7 @@ static long msm_buf_mngr_subdev_ioctl(struct v4l2_subdev *sd,
{
int32_t rc = 0;
struct msm_buf_mngr_device *buf_mngr_dev = v4l2_get_subdevdata(sd);
- void __user *argp = (void __user *)arg;
+ void *argp = arg;
if (!buf_mngr_dev) {
pr_err("%s buf manager device NULL\n", __func__);
@@ -557,13 +557,13 @@ static long msm_buf_mngr_subdev_ioctl(struct v4l2_subdev *sd,
MSM_CAM_GET_IOCTL_ARG_PTR(&tmp, &k_ioctl.ioctl_ptr,
sizeof(tmp));
- if (copy_from_user(&buf_info, tmp,
+ if (copy_from_user(&buf_info, (void __user *)tmp,
sizeof(struct msm_buf_mngr_info))) {
return -EFAULT;
}
k_ioctl.ioctl_ptr = (uintptr_t)&buf_info;
- argp = &k_ioctl;
+ argp = (void *)&k_ioctl;
rc = msm_cam_buf_mgr_ops(cmd, argp);
}
break;
diff --git a/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c
index 280bf4ebb596..2c1c0a34389a 100644
--- a/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c
+++ b/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c
@@ -41,7 +41,7 @@ static int msm_vb2_queue_setup(struct vb2_queue *q,
return 0;
}
-int msm_vb2_buf_init(struct vb2_buffer *vb)
+static int msm_vb2_buf_init(struct vb2_buffer *vb)
{
struct msm_stream *stream;
struct msm_vb2_buffer *msm_vb2_buf;
diff --git a/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c
index 8df56fe526fe..1adb380f335f 100644
--- a/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c
+++ b/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c
@@ -522,7 +522,7 @@ static int32_t msm_actuator_piezo_move_focus(
CDBG("Enter\n");
if (copy_from_user(&ringing_params_kernel,
- &(move_params->ringing_params[0]),
+ (void __user *)&(move_params->ringing_params[0]),
sizeof(struct damping_params_t))) {
pr_err("copy_from_user failed\n");
return -EFAULT;
@@ -612,7 +612,7 @@ static int32_t msm_actuator_move_focus(
return -EFAULT;
}
if (copy_from_user(ringing_params_kernel,
- &(move_params->ringing_params[0]),
+ (void __user *)&(move_params->ringing_params[0]),
(sizeof(struct damping_params_t))*(a_ctrl->region_size))) {
pr_err("copy_from_user failed\n");
/* Free the allocated memory for damping parameters */
@@ -732,7 +732,7 @@ static int32_t msm_actuator_bivcm_move_focus(
return -EFAULT;
}
if (copy_from_user(ringing_params_kernel,
- &(move_params->ringing_params[0]),
+ (void __user *)&(move_params->ringing_params[0]),
(sizeof(struct damping_params_t))*(a_ctrl->region_size))) {
pr_err("copy_from_user failed\n");
/* Free the allocated memory for damping parameters */
@@ -1289,7 +1289,7 @@ static int32_t msm_actuator_set_param(struct msm_actuator_ctrl_t *a_ctrl,
a_ctrl->total_steps = set_info->af_tuning_params.total_steps;
if (copy_from_user(&a_ctrl->region_params,
- (void *)set_info->af_tuning_params.region_params,
+ (void __user *)set_info->af_tuning_params.region_params,
a_ctrl->region_size * sizeof(struct region_params_t)))
return -EFAULT;
@@ -1332,7 +1332,7 @@ static int32_t msm_actuator_set_param(struct msm_actuator_ctrl_t *a_ctrl,
}
if (copy_from_user(&a_ctrl->reg_tbl,
- (void *)set_info->actuator_params.reg_tbl_params,
+ (void __user *)set_info->actuator_params.reg_tbl_params,
a_ctrl->reg_tbl_size *
sizeof(struct msm_actuator_reg_params_t))) {
kfree(a_ctrl->i2c_reg_tbl);
@@ -1354,7 +1354,8 @@ static int32_t msm_actuator_set_param(struct msm_actuator_ctrl_t *a_ctrl,
return -EFAULT;
}
if (copy_from_user(init_settings,
- (void *)set_info->actuator_params.init_settings,
+ (void __user *)
+ set_info->actuator_params.init_settings,
set_info->actuator_params.init_setting_size *
sizeof(struct reg_settings_t))) {
kfree(init_settings);
@@ -1411,7 +1412,7 @@ static int msm_actuator_init(struct msm_actuator_ctrl_t *a_ctrl)
}
static int32_t msm_actuator_config(struct msm_actuator_ctrl_t *a_ctrl,
- void __user *argp)
+ void *argp)
{
struct msm_actuator_cfg_data *cdata =
(struct msm_actuator_cfg_data *)argp;
@@ -1571,7 +1572,7 @@ static long msm_actuator_subdev_ioctl(struct v4l2_subdev *sd,
{
int rc;
struct msm_actuator_ctrl_t *a_ctrl = v4l2_get_subdevdata(sd);
- void __user *argp = (void __user *)arg;
+ void *argp = arg;
CDBG("Enter\n");
CDBG("%s:%d a_ctrl %pK argp %pK\n", __func__, __LINE__, a_ctrl, argp);
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_0_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_0_hwreg.h
index f88c0ef82499..f55e6c344ef1 100644
--- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_0_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_0_hwreg.h
@@ -15,9 +15,9 @@
#include <sensor/csid/msm_csid.h>
-uint8_t csid_lane_assign_v2_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+static uint8_t csid_lane_assign_v2_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
-struct csid_reg_parms_t csid_v2_0 = {
+static struct csid_reg_parms_t csid_v2_0 = {
/* MIPI CSID registers */
0x0,
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_2_hwreg.h
index e2bb6cd499ff..9ba3555ff01f 100644
--- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_2_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_2_2_hwreg.h
@@ -15,9 +15,9 @@
#include <sensor/csid/msm_csid.h>
-uint8_t csid_lane_assign_v2_2[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+static uint8_t csid_lane_assign_v2_2[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
-struct csid_reg_parms_t csid_v2_2 = {
+static struct csid_reg_parms_t csid_v2_2 = {
/* MIPI CSID registers */
0x0,
0x4,
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_0_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_0_hwreg.h
index 440f869692f7..c75c4167453c 100644
--- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_0_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_0_hwreg.h
@@ -15,9 +15,9 @@
#include <sensor/csid/msm_csid.h>
-uint8_t csid_lane_assign_v3_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+static uint8_t csid_lane_assign_v3_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
-struct csid_reg_parms_t csid_v3_0 = {
+static struct csid_reg_parms_t csid_v3_0 = {
/* MIPI CSID registers */
0x0,
0x4,
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_1_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_1_hwreg.h
index dde47046b679..dc71f39a38f1 100644
--- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_1_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_1_hwreg.h
@@ -15,9 +15,9 @@
#include <sensor/csid/msm_csid.h>
-uint8_t csid_lane_assign_v3_1[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+static uint8_t csid_lane_assign_v3_1[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
-struct csid_reg_parms_t csid_v3_1 = {
+static struct csid_reg_parms_t csid_v3_1 = {
/* MIPI CSID registers */
0x0,
0x4,
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_2_hwreg.h
index 5241a90fbc86..00085dbf94a0 100644
--- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_2_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_2_hwreg.h
@@ -15,9 +15,9 @@
#include <sensor/csid/msm_csid.h>
-uint8_t csid_lane_assign_v3_2[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+static uint8_t csid_lane_assign_v3_2[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
-struct csid_reg_parms_t csid_v3_2 = {
+static struct csid_reg_parms_t csid_v3_2 = {
/* MIPI CSID registers */
0x0,
0x4,
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_1_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_1_hwreg.h
index 0e8ff6c0986d..1d465b66b33f 100644
--- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_1_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_1_hwreg.h
@@ -14,9 +14,9 @@
#define MSM_CSID_3_4_1_HWREG_H
#include <sensor/csid/msm_csid.h>
-uint8_t csid_lane_assign_v3_4_1[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+static uint8_t csid_lane_assign_v3_4_1[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
-struct csid_reg_parms_t csid_v3_4_1 = {
+static struct csid_reg_parms_t csid_v3_4_1 = {
/* MIPI CSID registers */
0x0,
0x4,
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_2_hwreg.h
index 651526cb3db8..d78e68e090e7 100644
--- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_2_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_2_hwreg.h
@@ -15,8 +15,8 @@
#include <sensor/csid/msm_csid.h>
-uint8_t csid_lane_assign_v3_4_2[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
-struct csid_reg_parms_t csid_v3_4_2 = {
+static uint8_t csid_lane_assign_v3_4_2[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
+static struct csid_reg_parms_t csid_v3_4_2 = {
/* MIPI CSID registers */
0x0,
0x4,
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_3_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_3_hwreg.h
index fff29fc9d4c4..bbf4b287ffe4 100644
--- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_3_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_4_3_hwreg.h
@@ -15,8 +15,8 @@
#include <sensor/csid/msm_csid.h>
-uint8_t csid_lane_assign_v3_4_3[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
-struct csid_reg_parms_t csid_v3_4_3 = {
+static uint8_t csid_lane_assign_v3_4_3[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
+static struct csid_reg_parms_t csid_v3_4_3 = {
/* MIPI CSID registers */
0x0,
0x4,
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_1_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_1_hwreg.h
index f7d7d3548c4b..534ef3f5533c 100644
--- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_1_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_1_hwreg.h
@@ -15,9 +15,9 @@
#include <sensor/csid/msm_csid.h>
-uint8_t csid_lane_assign_v3_5_1[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
+static uint8_t csid_lane_assign_v3_5_1[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
-struct csid_reg_parms_t csid_v3_5_1 = {
+static struct csid_reg_parms_t csid_v3_5_1 = {
/* MIPI CSID registers */
0x0,
0x4,
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h
index b423b6e510a0..392d902d3e0c 100644
--- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h
@@ -15,9 +15,9 @@
#include <sensor/csid/msm_csid.h>
-uint8_t csid_lane_assign_v3_5[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
+static uint8_t csid_lane_assign_v3_5[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
-struct csid_reg_parms_t csid_v3_5 = {
+static struct csid_reg_parms_t csid_v3_5 = {
/* MIPI CSID registers */
0x0,
0x4,
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_6_0_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_6_0_hwreg.h
index b95a774ca737..6722974f889b 100644
--- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_6_0_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_6_0_hwreg.h
@@ -15,8 +15,8 @@
#include <sensor/csid/msm_csid.h>
-uint8_t csid_lane_assign_v3_6_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
-struct csid_reg_parms_t csid_v3_6_0 = {
+static uint8_t csid_lane_assign_v3_6_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+static struct csid_reg_parms_t csid_v3_6_0 = {
/* MIPI CSID registers */
0x0,
0x4,
diff --git a/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c
index 331ba939adfa..2b3eefa65606 100644
--- a/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c
@@ -708,7 +708,7 @@ static int msm_csid_release(struct csid_device *csid_dev)
return 0;
}
-static int32_t msm_csid_cmd(struct csid_device *csid_dev, void __user *arg)
+static int32_t msm_csid_cmd(struct csid_device *csid_dev, void *arg)
{
int rc = 0;
struct csid_cfg_data *cdata = (struct csid_cfg_data *)arg;
@@ -728,7 +728,7 @@ static int32_t msm_csid_cmd(struct csid_device *csid_dev, void __user *arg)
case CSID_TESTMODE_CFG: {
csid_dev->is_testmode = 1;
if (copy_from_user(&csid_dev->testmode_params,
- (void *)cdata->cfg.csid_testmode_params,
+ (void __user *)cdata->cfg.csid_testmode_params,
sizeof(struct msm_camera_csid_testmode_parms))) {
pr_err("%s: %d failed\n", __func__, __LINE__);
rc = -EFAULT;
@@ -741,7 +741,7 @@ static int32_t msm_csid_cmd(struct csid_device *csid_dev, void __user *arg)
int i = 0;
if (copy_from_user(&csid_params,
- (void *)cdata->cfg.csid_params,
+ (void __user *)cdata->cfg.csid_params,
sizeof(struct msm_camera_csid_params))) {
pr_err("%s: %d failed\n", __func__, __LINE__);
rc = -EFAULT;
@@ -790,7 +790,7 @@ static int32_t msm_csid_cmd(struct csid_device *csid_dev, void __user *arg)
int i = 0;
if (copy_from_user(&csid_params,
- (void *)cdata->cfg.csid_params,
+ (void __user *)cdata->cfg.csid_params,
sizeof(struct msm_camera_csid_params))) {
pr_err("%s: %d failed\n", __func__, __LINE__);
rc = -EFAULT;
@@ -892,7 +892,7 @@ static long msm_csid_subdev_ioctl(struct v4l2_subdev *sd,
#ifdef CONFIG_COMPAT
-static int32_t msm_csid_cmd32(struct csid_device *csid_dev, void __user *arg)
+static int32_t msm_csid_cmd32(struct csid_device *csid_dev, void *arg)
{
int rc = 0;
struct csid_cfg_data32 *arg32 = (struct csid_cfg_data32 *) (arg);
@@ -913,7 +913,8 @@ static int32_t msm_csid_cmd32(struct csid_device *csid_dev, void __user *arg)
case CSID_TESTMODE_CFG: {
csid_dev->is_testmode = 1;
if (copy_from_user(&csid_dev->testmode_params,
- (void *)compat_ptr(arg32->cfg.csid_testmode_params),
+ (void __user *)
+ compat_ptr(arg32->cfg.csid_testmode_params),
sizeof(struct msm_camera_csid_testmode_parms))) {
pr_err("%s: %d failed\n", __func__, __LINE__);
rc = -EFAULT;
@@ -926,7 +927,7 @@ static int32_t msm_csid_cmd32(struct csid_device *csid_dev, void __user *arg)
struct msm_camera_csid_params32 csid_params32;
if (copy_from_user(&csid_params32,
- (void *)compat_ptr(arg32->cfg.csid_params),
+ (void __user *)compat_ptr(arg32->cfg.csid_params),
sizeof(struct msm_camera_csid_params32))) {
pr_err("%s: %d failed\n", __func__, __LINE__);
rc = -EFAULT;
@@ -975,7 +976,7 @@ static int32_t msm_csid_cmd32(struct csid_device *csid_dev, void __user *arg)
struct msm_camera_csid_params32 csid_params32;
if (copy_from_user(&csid_params32,
- (void *)compat_ptr(arg32->cfg.csid_params),
+ (void __user *)compat_ptr(arg32->cfg.csid_params),
sizeof(struct msm_camera_csid_params32))) {
pr_err("%s: %d failed\n", __func__, __LINE__);
rc = -EFAULT;
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h
index 618926fa8341..3b377de66a2c 100644
--- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h
@@ -15,7 +15,7 @@
#include <sensor/csiphy/msm_csiphy.h>
-struct csiphy_reg_parms_t csiphy_v2_0 = {
+static struct csiphy_reg_parms_t csiphy_v2_0 = {
/* MIPI CSI PHY registers */
0x17C,
0x0,
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h
index 867aec2e0103..71b07299c342 100644
--- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h
@@ -15,7 +15,7 @@
#include <sensor/csiphy/msm_csiphy.h>
-struct csiphy_reg_parms_t csiphy_v2_2 = {
+static struct csiphy_reg_parms_t csiphy_v2_2 = {
/* MIPI CSI PHY registers */
0x17C,
0x0,
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h
index 69efdcc71499..8846fde0f6ed 100644
--- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h
@@ -15,7 +15,7 @@
#include <sensor/csiphy/msm_csiphy.h>
-struct csiphy_reg_parms_t csiphy_v3_0 = {
+static struct csiphy_reg_parms_t csiphy_v3_0 = {
/* MIPI CSI PHY registers */
0x0,
0x4,
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h
index 7fc74a366a6c..044e16ef3848 100644
--- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h
@@ -15,7 +15,7 @@
#include <sensor/csiphy/msm_csiphy.h>
-struct csiphy_reg_parms_t csiphy_v3_1 = {
+static struct csiphy_reg_parms_t csiphy_v3_1 = {
/* MIPI CSI PHY registers */
0x0,
0x4,
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h
index cdf62d46ee7d..c01f0540dfd2 100644
--- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h
@@ -15,7 +15,7 @@
#include <sensor/csiphy/msm_csiphy.h>
-struct csiphy_reg_parms_t csiphy_v3_2 = {
+static struct csiphy_reg_parms_t csiphy_v3_2 = {
/* MIPI CSI PHY registers */
0x0,
0x4,
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h
index 5af1ded189a6..78ac19993fee 100644
--- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h
@@ -18,14 +18,14 @@
#include <sensor/csiphy/msm_csiphy.h>
-struct csiphy_reg_parms_t csiphy_v3_4_2_1 = {
+static struct csiphy_reg_parms_t csiphy_v3_4_2_1 = {
.mipi_csiphy_interrupt_status0_addr = 0x8B0,
.mipi_csiphy_interrupt_clear0_addr = 0x858,
.mipi_csiphy_glbl_irq_cmd_addr = 0x828,
.combo_clk_mask = 0x10,
};
-struct csiphy_reg_3ph_parms_t csiphy_v3_4_2_1_3ph = {
+static struct csiphy_reg_3ph_parms_t csiphy_v3_4_2_1_3ph = {
/* MIPI CSI PHY registers */
{0x814, 0x0},
{0x818, 0x1},
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h
index d85dd1ec3a48..e6072e747a63 100644
--- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h
@@ -18,14 +18,14 @@
#include <sensor/csiphy/msm_csiphy.h>
-struct csiphy_reg_parms_t csiphy_v3_4_2 = {
+static struct csiphy_reg_parms_t csiphy_v3_4_2 = {
.mipi_csiphy_interrupt_status0_addr = 0x8B0,
.mipi_csiphy_interrupt_clear0_addr = 0x858,
.mipi_csiphy_glbl_irq_cmd_addr = 0x828,
.combo_clk_mask = 0x10,
};
-struct csiphy_reg_3ph_parms_t csiphy_v3_4_2_3ph = {
+static struct csiphy_reg_3ph_parms_t csiphy_v3_4_2_3ph = {
/* MIPI CSI PHY registers */
{0x814, 0x0},
{0x818, 0x1},
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
index 99b725a75c8f..bc70697cce5c 100644
--- a/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
@@ -18,14 +18,14 @@
#include <sensor/csiphy/msm_csiphy.h>
-struct csiphy_reg_parms_t csiphy_v3_5 = {
+static struct csiphy_reg_parms_t csiphy_v3_5 = {
.mipi_csiphy_interrupt_status0_addr = 0x8B0,
.mipi_csiphy_interrupt_clear0_addr = 0x858,
.mipi_csiphy_glbl_irq_cmd_addr = 0x828,
.combo_clk_mask = 0x10,
};
-struct csiphy_reg_3ph_parms_t csiphy_v3_5_3ph = {
+static struct csiphy_reg_3ph_parms_t csiphy_v3_5_3ph = {
/* MIPI CSI PHY registers */
{0x814, 0x0},
{0x818, 0x1},
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c
index d146cc3d28a5..c3b087f61888 100644
--- a/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c
@@ -164,7 +164,7 @@ static int msm_csiphy_3phase_lane_config(
mipi_csiphy_3ph_lnn_ctrl1.data,
csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
mipi_csiphy_3ph_lnn_ctrl1.addr + 0x200*i);
- msm_camera_io_w(((csiphy_params->settle_cnt >> 8) & 0xff),
+ msm_camera_io_w(0,
csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
mipi_csiphy_3ph_lnn_ctrl2.addr + 0x200*i);
msm_camera_io_w((csiphy_params->settle_cnt & 0xff),
@@ -648,7 +648,7 @@ static int msm_csiphy_lane_config(struct csiphy_device *csiphy_dev,
return rc;
}
-void msm_csiphy_disable_irq(
+static void msm_csiphy_disable_irq(
struct csiphy_device *csiphy_dev)
{
void __iomem *csiphybase;
@@ -1207,7 +1207,7 @@ static int32_t msm_csiphy_cmd(struct csiphy_device *csiphy_dev, void *arg)
break;
case CSIPHY_CFG:
if (copy_from_user(&csiphy_params,
- (void *)cdata->cfg.csiphy_params,
+ (void __user *)cdata->cfg.csiphy_params,
sizeof(struct msm_camera_csiphy_params))) {
pr_err("%s: %d failed\n", __func__, __LINE__);
rc = -EFAULT;
diff --git a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c
index b97156cbd486..6af589e5c230 100644
--- a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c
+++ b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c
@@ -54,7 +54,7 @@ static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl = {
.i2c_poll = msm_camera_cci_i2c_poll,
};
-void msm_torch_brightness_set(struct led_classdev *led_cdev,
+static void msm_torch_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
if (!torch_trigger) {
@@ -202,7 +202,7 @@ static int32_t msm_flash_i2c_init(
}
if (copy_from_user(power_setting_array32,
- (void *)flash_init_info->power_setting_array,
+ (void __user *)flash_init_info->power_setting_array,
sizeof(struct msm_sensor_power_setting_array32))) {
pr_err("%s copy_from_user failed %d\n",
__func__, __LINE__);
@@ -248,7 +248,7 @@ static int32_t msm_flash_i2c_init(
} else
#endif
if (copy_from_user(&flash_ctrl->power_setting_array,
- (void *)flash_init_info->power_setting_array,
+ (void __user *)flash_init_info->power_setting_array,
sizeof(struct msm_sensor_power_setting_array))) {
pr_err("%s copy_from_user failed %d\n", __func__, __LINE__);
return -EFAULT;
@@ -298,7 +298,8 @@ static int32_t msm_flash_i2c_init(
goto msm_flash_i2c_init_fail;
}
- if (copy_from_user(settings, (void *)flash_init_info->settings,
+ if (copy_from_user(settings,
+ (void __user *)flash_init_info->settings,
sizeof(struct msm_camera_i2c_reg_setting_array))) {
kfree(settings);
pr_err("%s copy_from_user failed %d\n",
@@ -414,7 +415,7 @@ static int32_t msm_flash_i2c_write_setting_array(
if (!settings)
return -ENOMEM;
- if (copy_from_user(settings, (void *)flash_data->cfg.settings,
+ if (copy_from_user(settings, (void __user *)flash_data->cfg.settings,
sizeof(struct msm_camera_i2c_reg_setting_array))) {
kfree(settings);
pr_err("%s copy_from_user failed %d\n", __func__, __LINE__);
@@ -626,7 +627,7 @@ static int32_t msm_flash_release(
}
static int32_t msm_flash_config(struct msm_flash_ctrl_t *flash_ctrl,
- void __user *argp)
+ void *argp)
{
int32_t rc = 0;
struct msm_flash_cfg_data_t *flash_data =
@@ -701,7 +702,7 @@ static long msm_flash_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
struct msm_flash_ctrl_t *fctrl = NULL;
- void __user *argp = (void __user *)arg;
+ void *argp = arg;
CDBG("Enter\n");
@@ -1038,7 +1039,8 @@ static long msm_flash_subdev_do_ioctl(
case CFG_FLASH_INIT:
flash_data.cfg.flash_init_info = &flash_init_info;
if (copy_from_user(&flash_init_info32,
- (void *)compat_ptr(u32->cfg.flash_init_info),
+ (void __user *)
+ compat_ptr(u32->cfg.flash_init_info),
sizeof(struct msm_flash_init_info_t32))) {
pr_err("%s copy_from_user failed %d\n",
__func__, __LINE__);
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_cci_i2c.c b/drivers/media/platform/msm/ais/sensor/io/msm_camera_cci_i2c.c
index 955be342e8cf..8f2fd0f9e24d 100644
--- a/drivers/media/platform/msm/ais/sensor/io/msm_camera_cci_i2c.c
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_cci_i2c.c
@@ -23,7 +23,7 @@ int32_t msm_camera_cci_i2c_read(struct msm_camera_i2c_client *client,
enum msm_camera_i2c_data_type data_type)
{
int32_t rc = -EFAULT;
- unsigned char buf[client->addr_type+data_type];
+ unsigned char *buf = NULL;
struct msm_camera_cci_ctrl cci_ctrl;
if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
@@ -33,6 +33,11 @@ int32_t msm_camera_cci_i2c_read(struct msm_camera_i2c_client *client,
&& data_type != MSM_CAMERA_I2C_WORD_DATA))
return rc;
+ buf = kzalloc((uint32_t)client->addr_type + (uint32_t)data_type,
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
cci_ctrl.cmd = MSM_CCI_I2C_READ;
cci_ctrl.cci_info = client->cci_client;
cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
@@ -42,6 +47,8 @@ int32_t msm_camera_cci_i2c_read(struct msm_camera_i2c_client *client,
rc = v4l2_subdev_call(client->cci_client->cci_subdev,
core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
if (rc < 0) {
+ kfree(buf);
+ buf = NULL;
pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
return rc;
}
@@ -51,6 +58,8 @@ int32_t msm_camera_cci_i2c_read(struct msm_camera_i2c_client *client,
else
*data = buf[0] << 8 | buf[1];
+ kfree(buf);
+ buf = NULL;
S_I2C_DBG("%s addr = 0x%x data: 0x%x\n", __func__, addr, *data);
return rc;
}
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.c b/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.c
index 071600ed5221..66300e3f7359 100644
--- a/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.c
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.c
@@ -685,7 +685,7 @@ ERROR2:
kfree(array);
ERROR1:
kfree(ps);
- power_setting_size = 0;
+ power_setting_size = NULL;
return rc;
}
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.h b/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.h
index a29ef21274c2..fdeeb4aebf00 100644
--- a/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.h
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_dt_util.h
@@ -62,6 +62,9 @@ int msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
int msm_camera_pinctrl_init
(struct msm_pinctrl_info *sensor_pctrl, struct device *dev);
+int msm_cam_sensor_handle_reg_gpio(int seq_val,
+ struct msm_camera_gpio_conf *gconf, int val);
+
int32_t msm_sensor_driver_get_gpio_data(
struct msm_camera_gpio_conf **gpio_conf,
struct device_node *of_node);
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_qup_i2c.c b/drivers/media/platform/msm/ais/sensor/io/msm_camera_qup_i2c.c
index 9098b23dbc67..449951f5ffad 100644
--- a/drivers/media/platform/msm/ais/sensor/io/msm_camera_qup_i2c.c
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_qup_i2c.c
@@ -88,7 +88,8 @@ int32_t msm_camera_qup_i2c_read(struct msm_camera_i2c_client *client,
return rc;
}
- buf = kzalloc(client->addr_type+data_type, GFP_KERNEL);
+ buf = kzalloc((uint32_t)client->addr_type + (uint32_t)data_type,
+ GFP_KERNEL);
if (!buf) {
S_I2C_DBG("%s:%d no memory\n", __func__, __LINE__);
return -ENOMEM;
@@ -179,7 +180,7 @@ int32_t msm_camera_qup_i2c_write(struct msm_camera_i2c_client *client,
enum msm_camera_i2c_data_type data_type)
{
int32_t rc = -EFAULT;
- unsigned char buf[client->addr_type+data_type];
+ unsigned char *buf = NULL;
uint8_t len = 0;
if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
@@ -188,6 +189,11 @@ int32_t msm_camera_qup_i2c_write(struct msm_camera_i2c_client *client,
&& data_type != MSM_CAMERA_I2C_WORD_DATA))
return rc;
+ buf = kzalloc((uint32_t)client->addr_type + (uint32_t)data_type,
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
S_I2C_DBG("%s reg addr = 0x%x data type: %d\n",
__func__, addr, data_type);
if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
@@ -219,6 +225,9 @@ int32_t msm_camera_qup_i2c_write(struct msm_camera_i2c_client *client,
rc = msm_camera_qup_i2c_txdata(client, buf, len);
if (rc < 0)
S_I2C_DBG("%s fail\n", __func__);
+
+ kfree(buf);
+ buf = NULL;
return rc;
}
@@ -226,7 +235,7 @@ int32_t msm_camera_qup_i2c_write_seq(struct msm_camera_i2c_client *client,
uint32_t addr, uint8_t *data, uint32_t num_byte)
{
int32_t rc = -EFAULT;
- unsigned char buf[client->addr_type+num_byte];
+ unsigned char *buf = NULL;
uint8_t len = 0, i = 0;
if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
@@ -234,6 +243,10 @@ int32_t msm_camera_qup_i2c_write_seq(struct msm_camera_i2c_client *client,
|| num_byte == 0)
return rc;
+ buf = kzalloc(client->addr_type+num_byte, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
S_I2C_DBG("%s reg addr = 0x%x num bytes: %d\n",
__func__, addr, num_byte);
if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
@@ -263,6 +276,9 @@ int32_t msm_camera_qup_i2c_write_seq(struct msm_camera_i2c_client *client,
rc = msm_camera_qup_i2c_txdata(client, buf, len+num_byte);
if (rc < 0)
S_I2C_DBG("%s fail\n", __func__);
+
+ kfree(buf);
+ buf = NULL;
return rc;
}
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.c b/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.c
index cd277f0ca0da..d0e27bcc4aac 100644
--- a/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.c
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.c
@@ -513,7 +513,7 @@ int32_t msm_camera_spi_write(struct msm_camera_i2c_client *client,
&client->spi_client->cmd_tbl.page_program;
uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
uint16_t len = 0;
- char buf[data_type];
+ char *buf = NULL;
char *tx;
int rc = -EINVAL;
@@ -524,10 +524,13 @@ int32_t msm_camera_spi_write(struct msm_camera_i2c_client *client,
&& data_type != MSM_CAMERA_I2C_WORD_DATA))
return rc;
S_I2C_DBG("Data: 0x%x\n", data);
+ buf = kzalloc(data_type, GFP_KERNEL);
+ if (!buf)
+ goto NOMEM;
len = header_len + (uint8_t)data_type;
tx = kmalloc(len, GFP_KERNEL | GFP_DMA);
if (!tx)
- goto NOMEM;
+ goto FREEBUF;
if (data_type == MSM_CAMERA_I2C_BYTE_DATA) {
buf[0] = data;
SPIDBG("Byte %d: 0x%x\n", len, buf[0]);
@@ -540,6 +543,8 @@ int32_t msm_camera_spi_write(struct msm_camera_i2c_client *client,
if (rc < 0)
goto ERROR;
goto OUT;
+FREEBUF:
+ kfree(buf);
NOMEM:
pr_err("%s: memory allocation failed\n", __func__);
return -ENOMEM;
@@ -547,6 +552,7 @@ ERROR:
pr_err("%s: error write\n", __func__);
OUT:
kfree(tx);
+ kfree(buf);
return rc;
}
int32_t msm_camera_spi_write_table(struct msm_camera_i2c_client *client,
@@ -585,7 +591,7 @@ int32_t msm_camera_spi_write_table(struct msm_camera_i2c_client *client,
client->addr_type = client_addr_type;
return rc;
}
-uint32_t msm_get_burst_size(struct msm_camera_i2c_reg_array *reg_setting,
+static uint32_t msm_get_burst_size(struct msm_camera_i2c_reg_array *reg_setting,
uint32_t reg_size, uint32_t index, uint16_t burst_addr)
{
uint32_t i;
@@ -601,7 +607,7 @@ uint32_t msm_get_burst_size(struct msm_camera_i2c_reg_array *reg_setting,
}
#ifdef SPI_DYNAMIC_ALLOC
-int32_t msm_camera_spi_send_burst(struct msm_camera_i2c_client *client,
+static int32_t msm_camera_spi_send_burst(struct msm_camera_i2c_client *client,
struct msm_camera_i2c_reg_array *reg_setting, uint32_t reg_size,
struct msm_camera_burst_info *info,
enum msm_camera_i2c_data_type data_type)
@@ -677,7 +683,7 @@ fail:
return rc;
}
#else /* SPI_DYNAMIC_ALLOC */
-int32_t msm_camera_spi_send_burst(struct msm_camera_i2c_client *client,
+static int32_t msm_camera_spi_send_burst(struct msm_camera_i2c_client *client,
struct msm_camera_i2c_reg_array *reg_setting, uint32_t reg_size,
struct msm_camera_burst_info *info,
enum msm_camera_i2c_data_type data_type)
diff --git a/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.h b/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.h
index 28aa184ce630..9f87db1dbbfa 100644
--- a/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.h
+++ b/drivers/media/platform/msm/ais/sensor/io/msm_camera_spi.h
@@ -83,6 +83,14 @@ uint16_t msm_camera_spi_get_hlen(struct msm_camera_spi_inst *inst)
return sizeof(inst->opcode) + inst->addr_len + inst->dummy_len;
}
+int32_t msm_camera_spi_tx_helper(struct msm_camera_i2c_client *client,
+ struct msm_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+ uint32_t num_byte, char *tx, char *rx);
+
+int32_t msm_camera_spi_tx_read(struct msm_camera_i2c_client *client,
+ struct msm_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+ uint32_t num_byte, char *tx, char *rx);
+
int32_t msm_camera_spi_read(struct msm_camera_i2c_client *client,
uint32_t addr, uint16_t *data,
enum msm_camera_i2c_data_type data_type);
diff --git a/drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.c b/drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.c
index bfb960ea862a..68ab4003b666 100644
--- a/drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.c
+++ b/drivers/media/platform/msm/ais/sensor/ir_cut/msm_ir_cut.c
@@ -282,7 +282,7 @@ static int32_t msm_ir_cut_handle_init(
}
static int32_t msm_ir_cut_config(struct msm_ir_cut_ctrl_t *ir_cut_ctrl,
- void __user *argp)
+ void *argp)
{
int32_t rc = -EINVAL;
struct msm_ir_cut_cfg_data_t *ir_cut_data =
@@ -327,7 +327,7 @@ static long msm_ir_cut_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
struct msm_ir_cut_ctrl_t *fctrl = NULL;
- void __user *argp = (void __user *)arg;
+ void *argp = arg;
CDBG("Enter\n");
diff --git a/drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.c b/drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.c
index 803bce440ee1..9e200071f9eb 100644
--- a/drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.c
+++ b/drivers/media/platform/msm/ais/sensor/ir_led/msm_ir_led.c
@@ -196,7 +196,7 @@ static int32_t msm_ir_led_handle_init(
}
static int32_t msm_ir_led_config(struct msm_ir_led_ctrl_t *ir_led_ctrl,
- void __user *argp)
+ void *argp)
{
int32_t rc = -EINVAL;
struct msm_ir_led_cfg_data_t *ir_led_data =
@@ -241,7 +241,7 @@ static long msm_ir_led_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
struct msm_ir_led_ctrl_t *fctrl = NULL;
- void __user *argp = (void __user *)arg;
+ void *argp = arg;
struct msm_ir_led_cfg_data_t ir_led_data = {0};
if (!sd) {
diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor.c b/drivers/media/platform/msm/ais/sensor/msm_sensor.c
index c671ea71d2a7..a276b03e5294 100644
--- a/drivers/media/platform/msm/ais/sensor/msm_sensor.c
+++ b/drivers/media/platform/msm/ais/sensor/msm_sensor.c
@@ -343,7 +343,7 @@ static long msm_sensor_subdev_ioctl(struct v4l2_subdev *sd,
{
int rc = 0;
struct msm_sensor_ctrl_t *s_ctrl = get_sctrl(sd);
- void __user *argp = (void __user *)arg;
+ void *argp = arg;
if (!s_ctrl) {
pr_err("%s s_ctrl NULL\n", __func__);
@@ -421,7 +421,7 @@ long msm_sensor_subdev_fops_ioctl(struct file *file,
}
static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
- void __user *argp)
+ void *argp)
{
struct sensorb_cfg_data32 *cdata = (struct sensorb_cfg_data32 *)argp;
int32_t rc = 0;
@@ -498,7 +498,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
}
if (copy_from_user(&conf_array32,
- (void *)compat_ptr(cdata->cfg.setting),
+ (void __user *)compat_ptr(cdata->cfg.setting),
sizeof(struct msm_camera_i2c_reg_setting32))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
rc = -EFAULT;
@@ -525,7 +525,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
break;
}
if (copy_from_user(reg_setting,
- (void *)(conf_array.reg_setting),
+ (void __user *)(conf_array.reg_setting),
conf_array.size *
sizeof(struct msm_camera_i2c_reg_array))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
@@ -571,7 +571,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
(struct msm_camera_i2c_read_config *)
compat_ptr(cdata->cfg.setting);
- if (copy_from_user(&read_config, read_config_ptr,
+ if (copy_from_user(&read_config, (void __user *)read_config_ptr,
sizeof(struct msm_camera_i2c_read_config))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
rc = -EFAULT;
@@ -640,7 +640,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
goto DONE;
if (copy_from_user(&write_config32,
- (void *)compat_ptr(cdata->cfg.setting),
+ (void __user *)compat_ptr(cdata->cfg.setting),
sizeof(
struct msm_camera_i2c_array_write_config32))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
@@ -682,7 +682,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
break;
}
if (copy_from_user(reg_setting,
- (void *)(write_config.conf_array.reg_setting),
+ (void __user *)(write_config.conf_array.reg_setting),
write_config.conf_array.size *
sizeof(struct msm_camera_i2c_reg_array))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
@@ -753,7 +753,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
}
if (copy_from_user(&conf_array32,
- (void *)compat_ptr(cdata->cfg.setting),
+ (void __user *)compat_ptr(cdata->cfg.setting),
sizeof(struct msm_camera_i2c_seq_reg_setting32))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
rc = -EFAULT;
@@ -780,7 +780,8 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
rc = -ENOMEM;
break;
}
- if (copy_from_user(reg_setting, (void *)conf_array.reg_setting,
+ if (copy_from_user(reg_setting,
+ (void __user *)conf_array.reg_setting,
conf_array.size *
sizeof(struct msm_camera_i2c_seq_reg_array))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
@@ -863,7 +864,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
goto DONE;
if (copy_from_user(&stop_setting32,
- (void *)compat_ptr((cdata->cfg.setting)),
+ (void __user *)compat_ptr((cdata->cfg.setting)),
sizeof(struct msm_camera_i2c_reg_setting32))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
rc = -EFAULT;
@@ -890,7 +891,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
break;
}
if (copy_from_user(stop_setting->reg_setting,
- (void *)reg_setting,
+ (void __user *)reg_setting,
stop_setting->size *
sizeof(struct msm_camera_i2c_reg_array))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
@@ -949,7 +950,7 @@ DONE:
}
#endif
-int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp)
+int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void *argp)
{
struct sensorb_cfg_data *cdata = (struct sensorb_cfg_data *)argp;
int32_t rc = 0;
@@ -1026,7 +1027,7 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp)
}
if (copy_from_user(&conf_array,
- (void *)cdata->cfg.setting,
+ (void __user *)cdata->cfg.setting,
sizeof(struct msm_camera_i2c_reg_setting))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
rc = -EFAULT;
@@ -1046,7 +1047,8 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp)
rc = -ENOMEM;
break;
}
- if (copy_from_user(reg_setting, (void *)conf_array.reg_setting,
+ if (copy_from_user(reg_setting,
+ (void __user *)conf_array.reg_setting,
conf_array.size *
sizeof(struct msm_camera_i2c_reg_array))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
@@ -1089,7 +1091,7 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp)
read_config_ptr =
(struct msm_camera_i2c_read_config *)cdata->cfg.setting;
- if (copy_from_user(&read_config, read_config_ptr,
+ if (copy_from_user(&read_config, (void __user *)read_config_ptr,
sizeof(struct msm_camera_i2c_read_config))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
rc = -EFAULT;
@@ -1153,7 +1155,7 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp)
goto DONE;
if (copy_from_user(&write_config,
- (void *)cdata->cfg.setting,
+ (void __user *)cdata->cfg.setting,
sizeof(struct msm_camera_i2c_array_write_config))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
rc = -EFAULT;
@@ -1178,7 +1180,7 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp)
break;
}
if (copy_from_user(reg_setting,
- (void *)(write_config.conf_array.reg_setting),
+ (void __user *)(write_config.conf_array.reg_setting),
write_config.conf_array.size *
sizeof(struct msm_camera_i2c_reg_array))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
@@ -1243,7 +1245,7 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp)
}
if (copy_from_user(&conf_array,
- (void *)cdata->cfg.setting,
+ (void __user *)cdata->cfg.setting,
sizeof(struct msm_camera_i2c_seq_reg_setting))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
rc = -EFAULT;
@@ -1265,7 +1267,8 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp)
rc = -ENOMEM;
break;
}
- if (copy_from_user(reg_setting, (void *)conf_array.reg_setting,
+ if (copy_from_user(reg_setting,
+ (void __user *)conf_array.reg_setting,
conf_array.size *
sizeof(struct msm_camera_i2c_seq_reg_array))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
@@ -1349,7 +1352,7 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp)
goto DONE;
if (copy_from_user(stop_setting,
- (void *)cdata->cfg.setting,
+ (void __user *)cdata->cfg.setting,
sizeof(struct msm_camera_i2c_reg_setting))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
rc = -EFAULT;
@@ -1371,7 +1374,7 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp)
break;
}
if (copy_from_user(stop_setting->reg_setting,
- (void *)reg_setting,
+ (void __user *)reg_setting,
stop_setting->size *
sizeof(struct msm_camera_i2c_reg_array))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor.h b/drivers/media/platform/msm/ais/sensor/msm_sensor.h
index 060383b05170..eacd3b05420c 100644
--- a/drivers/media/platform/msm/ais/sensor/msm_sensor.h
+++ b/drivers/media/platform/msm/ais/sensor/msm_sensor.h
@@ -94,7 +94,7 @@ struct msm_sensor_ctrl_t {
struct msm_sensor_init_t s_init;
};
-int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp);
+int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void *argp);
int msm_sensor_power_up(struct msm_sensor_ctrl_t *s_ctrl);
diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c
index 80c15717325c..c02972e5e993 100644
--- a/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c
+++ b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c
@@ -385,7 +385,7 @@ static int32_t msm_sensor_get_pw_settings_compat(
pr_err("failed: no memory ps32");
return -ENOMEM;
}
- if (copy_from_user(ps32, (void *)us_ps, sizeof(*ps32) * size)) {
+ if (copy_from_user(ps32, (void __user *)us_ps, sizeof(*ps32) * size)) {
pr_err("failed: copy_from_user");
kfree(ps32);
return -EFAULT;
@@ -413,22 +413,18 @@ static int32_t msm_sensor_create_pd_settings(void *setting,
#ifdef CONFIG_COMPAT
if (is_compat_task()) {
- int i = 0;
- struct msm_sensor_power_setting32 *power_setting_iter =
- (struct msm_sensor_power_setting32 *)compat_ptr((
- (struct msm_camera_sensor_slave_info32 *)setting)->
- power_setting_array.power_setting);
-
- for (i = 0; i < size_down; i++) {
- pd[i].config_val = power_setting_iter[i].config_val;
- pd[i].delay = power_setting_iter[i].delay;
- pd[i].seq_type = power_setting_iter[i].seq_type;
- pd[i].seq_val = power_setting_iter[i].seq_val;
+ rc = msm_sensor_get_pw_settings_compat(
+ pd, pu, size_down);
+ if (rc < 0) {
+ pr_err("failed");
+ return -EFAULT;
}
} else
#endif
{
- if (copy_from_user(pd, (void *)pu, sizeof(*pd) * size_down)) {
+ if (copy_from_user(pd,
+ (void __user *)pu,
+ sizeof(*pd) * size_down)) {
pr_err("failed: copy_from_user");
return -EFAULT;
}
@@ -480,7 +476,8 @@ static int32_t msm_sensor_get_power_down_settings(void *setting,
}
} else
#endif
- if (copy_from_user(pd, (void *)slave_info->power_setting_array.
+ if (copy_from_user(pd,
+ (void __user *)slave_info->power_setting_array.
power_down_setting, sizeof(*pd) * size_down)) {
pr_err("failed: copy_from_user");
kfree(pd);
@@ -546,7 +543,8 @@ static int32_t msm_sensor_get_power_up_settings(void *setting,
#endif
{
if (copy_from_user(pu,
- (void *)slave_info->power_setting_array.power_setting,
+ (void __user *)
+ slave_info->power_setting_array.power_setting,
sizeof(*pu) * size)) {
pr_err("failed: copy_from_user");
kfree(pu);
@@ -659,7 +657,7 @@ int32_t msm_sensor_driver_probe(void *setting,
rc = -ENOMEM;
goto free_slave_info;
}
- if (copy_from_user((void *)slave_info32, setting,
+ if (copy_from_user((void *)slave_info32, (void __user *)setting,
sizeof(*slave_info32))) {
pr_err("failed: copy_from_user");
rc = -EFAULT;
@@ -710,7 +708,7 @@ int32_t msm_sensor_driver_probe(void *setting,
#endif
{
if (copy_from_user(slave_info,
- (void *)setting, sizeof(*slave_info))) {
+ (void __user *)setting, sizeof(*slave_info))) {
pr_err("failed: copy_from_user");
rc = -EFAULT;
goto free_slave_info;
diff --git a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
index f3147b127438..28a5402a4359 100644
--- a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
+++ b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
@@ -382,7 +382,7 @@ static int32_t msm_ois_control(struct msm_ois_ctrl_t *o_ctrl,
return -EFAULT;
}
if (copy_from_user(settings,
- (void *)set_info->ois_params.settings,
+ (void __user *)set_info->ois_params.settings,
set_info->ois_params.setting_size *
sizeof(struct reg_settings_ois_t))) {
kfree(settings);
@@ -407,7 +407,7 @@ static int32_t msm_ois_control(struct msm_ois_ctrl_t *o_ctrl,
static int32_t msm_ois_config(struct msm_ois_ctrl_t *o_ctrl,
- void __user *argp)
+ void *argp)
{
struct msm_ois_cfg_data *cdata =
(struct msm_ois_cfg_data *)argp;
@@ -449,7 +449,7 @@ static int32_t msm_ois_config(struct msm_ois_ctrl_t *o_ctrl,
} else
#endif
if (copy_from_user(&conf_array,
- (void *)cdata->cfg.settings,
+ (void __user *)cdata->cfg.settings,
sizeof(struct msm_camera_i2c_seq_reg_setting))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
rc = -EFAULT;
@@ -470,7 +470,8 @@ static int32_t msm_ois_config(struct msm_ois_ctrl_t *o_ctrl,
rc = -ENOMEM;
break;
}
- if (copy_from_user(reg_setting, (void *)conf_array.reg_setting,
+ if (copy_from_user(reg_setting,
+ (void __user *)conf_array.reg_setting,
conf_array.size *
sizeof(struct msm_camera_i2c_seq_reg_array))) {
pr_err("%s:%d failed\n", __func__, __LINE__);
@@ -495,7 +496,7 @@ static int32_t msm_ois_config(struct msm_ois_ctrl_t *o_ctrl,
}
static int32_t msm_ois_config_download(struct msm_ois_ctrl_t *o_ctrl,
- void __user *argp)
+ void *argp)
{
struct msm_ois_cfg_download_data *cdata =
(struct msm_ois_cfg_download_data *)argp;
@@ -606,7 +607,7 @@ static long msm_ois_subdev_ioctl(struct v4l2_subdev *sd,
{
int rc;
struct msm_ois_ctrl_t *o_ctrl = v4l2_get_subdevdata(sd);
- void __user *argp = (void __user *)arg;
+ void *argp = arg;
CDBG("Enter\n");
CDBG("%s:%d o_ctrl %pK argp %pK\n", __func__, __LINE__, o_ctrl, argp);
@@ -805,7 +806,7 @@ static long msm_ois_subdev_do_ioctl(
break;
case CFG_OIS_I2C_WRITE_SEQ_TABLE:
if (copy_from_user(&settings32,
- (void *)compat_ptr(u32->cfg.settings),
+ (void __user *)compat_ptr(u32->cfg.settings),
sizeof(
struct msm_camera_i2c_seq_reg_setting32))) {
pr_err("copy_from_user failed\n");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
index 891e528f75f1..a41d7dba490e 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
@@ -469,17 +469,11 @@ static int32_t msm_sensor_create_pd_settings(void *setting,
#ifdef CONFIG_COMPAT
if (is_compat_task()) {
- int i = 0;
- struct msm_sensor_power_setting32 *power_setting_iter =
- (struct msm_sensor_power_setting32 *)compat_ptr((
- (struct msm_camera_sensor_slave_info32 *)setting)->
- power_setting_array.power_setting);
-
- for (i = 0; i < size_down; i++) {
- pd[i].config_val = power_setting_iter[i].config_val;
- pd[i].delay = power_setting_iter[i].delay;
- pd[i].seq_type = power_setting_iter[i].seq_type;
- pd[i].seq_val = power_setting_iter[i].seq_val;
+ rc = msm_sensor_get_pw_settings_compat(
+ pd, pu, size_down);
+ if (rc < 0) {
+ pr_err("failed");
+ return -EFAULT;
}
} else
#endif
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 7388dab92c34..037c6f3b12ab 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1832,10 +1832,10 @@ int create_pkt_cmd_session_set_property(
pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
break;
}
- case HAL_PARAM_VENC_H264_GENERATE_AUDNAL:
+ case HAL_PARAM_VENC_GENERATE_AUDNAL:
{
create_pkt_enable(pkt->rg_property_data,
- HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL,
+ HFI_PROPERTY_PARAM_VENC_GENERATE_AUDNAL,
((struct hal_enable *)pdata)->enable);
pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
break;
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index a4cd4e34c7d0..c5816511e056 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -861,14 +861,14 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
.step = 1,
},
{
- .id = V4L2_CID_MPEG_VIDC_VIDEO_H264_AU_DELIMITER,
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER,
.name = "H264 AU Delimiter",
.type = V4L2_CTRL_TYPE_BOOLEAN,
- .minimum = V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_DISABLED,
- .maximum = V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_ENABLED,
+ .minimum = V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_DISABLED,
+ .maximum = V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_ENABLED,
.step = 1,
.default_value =
- V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_DISABLED,
+ V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_DISABLED,
},
{
.id = V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL,
@@ -3317,14 +3317,14 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
pdata = &vui_timing_info;
break;
}
- case V4L2_CID_MPEG_VIDC_VIDEO_H264_AU_DELIMITER:
- property_id = HAL_PARAM_VENC_H264_GENERATE_AUDNAL;
+ case V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER:
+ property_id = HAL_PARAM_VENC_GENERATE_AUDNAL;
switch (ctrl->val) {
- case V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_DISABLED:
+ case V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_DISABLED:
enable.enable = 0;
break;
- case V4L2_MPEG_VIDC_VIDEO_H264_AU_DELIMITER_ENABLED:
+ case V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_ENABLED:
enable.enable = 1;
break;
default:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index f3d50c6a1e0c..7caf61cb6799 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -531,13 +531,60 @@ static inline void save_v4l2_buffer(struct v4l2_buffer *b,
}
}
+static int __map_and_update_binfo(struct msm_vidc_inst *inst,
+ struct buffer_info *binfo,
+ struct v4l2_buffer *b, int i)
+{
+ int rc = 0;
+ struct msm_smem *same_fd_handle = NULL;
+
+ same_fd_handle = get_same_fd_buffer(
+ inst, b->m.planes[i].reserved[0]);
+
+ if (same_fd_handle) {
+ binfo->device_addr[i] =
+ same_fd_handle->device_addr + binfo->buff_off[i];
+ b->m.planes[i].m.userptr = binfo->device_addr[i];
+ binfo->handle[i] = same_fd_handle;
+ } else {
+ binfo->handle[i] = map_buffer(inst, &b->m.planes[i],
+ get_hal_buffer_type(inst, b));
+ if (!binfo->handle[i])
+ rc = -EINVAL;
+
+ binfo->mapped[i] = true;
+ binfo->device_addr[i] = binfo->handle[i]->device_addr +
+ binfo->buff_off[i];
+ b->m.planes[i].m.userptr = binfo->device_addr[i];
+ }
+
+ return rc;
+}
+
+static int __handle_fw_referenced_buffers(struct msm_vidc_inst *inst,
+ struct buffer_info *binfo,
+ struct v4l2_buffer *b)
+{
+ int i = 0, rc = 0;
+
+ if (EXTRADATA_IDX(b->length)) {
+ i = EXTRADATA_IDX(b->length);
+ if (b->m.planes[i].length)
+ rc = __map_and_update_binfo(inst, binfo, b, i);
+ }
+
+ if (rc)
+ dprintk(VIDC_ERR, "%s: Failed to map extradata\n", __func__);
+
+ return rc;
+}
+
int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
{
struct buffer_info *binfo = NULL;
struct buffer_info *temp = NULL, *iterator = NULL;
int plane = 0;
int i = 0, rc = 0;
- struct msm_smem *same_fd_handle = NULL;
if (!b || !inst) {
dprintk(VIDC_ERR, "%s: invalid input\n", __func__);
@@ -613,33 +660,17 @@ int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
rc = 0;
goto exit;
} else if (rc == 2) {
- rc = -EEXIST;
+ rc = __handle_fw_referenced_buffers(inst, temp, b);
+ if (!rc)
+ rc = -EEXIST;
goto exit;
}
- same_fd_handle = get_same_fd_buffer(
- inst, b->m.planes[i].reserved[0]);
-
populate_buf_info(binfo, b, i);
- if (same_fd_handle) {
- binfo->device_addr[i] =
- same_fd_handle->device_addr + binfo->buff_off[i];
- b->m.planes[i].m.userptr = binfo->device_addr[i];
- binfo->mapped[i] = false;
- binfo->handle[i] = same_fd_handle;
- } else {
- binfo->handle[i] = map_buffer(inst, &b->m.planes[i],
- get_hal_buffer_type(inst, b));
- if (!binfo->handle[i]) {
- rc = -EINVAL;
- goto exit;
- }
- binfo->mapped[i] = true;
- binfo->device_addr[i] = binfo->handle[i]->device_addr +
- binfo->buff_off[i];
- b->m.planes[i].m.userptr = binfo->device_addr[i];
- }
+ rc = __map_and_update_binfo(inst, binfo, b, i);
+ if (rc)
+ goto exit;
/* We maintain one ref count for all planes*/
if (!i && is_dynamic_output_buffer_mode(b, inst)) {
@@ -727,6 +758,7 @@ int unmap_and_deregister_buf(struct msm_vidc_inst *inst,
temp->handle[i] = 0;
temp->device_addr[i] = 0;
temp->uvaddr[i] = 0;
+ temp->mapped[i] = false;
}
}
if (!keep_node) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
index 3e269576c126..9bc313adb10a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
@@ -407,8 +407,10 @@ static int msm_dcvs_enc_scale_clocks(struct msm_vidc_inst *inst)
if (dcvs->etb_counter < total_input_buf) {
dcvs->etb_counter++;
- if (dcvs->etb_counter != total_input_buf)
- return rc;
+ if (dcvs->etb_counter != total_input_buf) {
+ return msm_comm_scale_clocks_load(core, dcvs->load,
+ LOAD_CALC_NO_QUIRKS);
+ }
}
dprintk(VIDC_PROF,
@@ -425,7 +427,7 @@ static int msm_dcvs_enc_scale_clocks(struct msm_vidc_inst *inst)
}
if (fw_pending_bufs >= DCVS_ENC_HIGH_THR &&
- dcvs->load <= dcvs->load_low) {
+ dcvs->load < dcvs->load_high) {
dcvs->load = dcvs->load_high;
dcvs->prev_freq_increased = true;
} else {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 820c8685a75b..6cc5f9f50ba1 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -201,7 +201,7 @@ enum hal_property {
HAL_PARAM_VENC_H264_ENTROPY_CABAC_MODEL,
HAL_CONFIG_VENC_MAX_BITRATE,
HAL_PARAM_VENC_H264_VUI_TIMING_INFO,
- HAL_PARAM_VENC_H264_GENERATE_AUDNAL,
+ HAL_PARAM_VENC_GENERATE_AUDNAL,
HAL_PARAM_VENC_MAX_NUM_B_FRAMES,
HAL_PARAM_BUFFER_ALLOC_MODE,
HAL_PARAM_VDEC_FRAME_ASSEMBLY,
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index bb9958b0a819..31af06cd88ef 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -340,7 +340,7 @@ struct hfi_buffer_info {
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x014)
#define HFI_PROPERTY_PARAM_VENC_H264_PPS_ID \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x015)
-#define HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL \
+#define HFI_PROPERTY_PARAM_VENC_GENERATE_AUDNAL \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x016)
#define HFI_PROPERTY_PARAM_VENC_ASPECT_RATIO \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x017)
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 2a4abf736d89..d86795bf9453 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -551,7 +551,7 @@ config VEXPRESS_SYSCFG
config UID_SYS_STATS
bool "Per-UID statistics"
- depends on PROFILING
+ depends on PROFILING && TASK_XACCT && TASK_IO_ACCOUNTING
help
Per UID based cpu time statistics exported to /proc/uid_cputime
Per UID based io statistics exported to /proc/uid_io
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 783337d22f36..10a02934bfc0 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -158,11 +158,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
/* Do this outside the status_mutex to avoid a circular dependency with
* the locking in cxl_mmap_fault() */
- if (copy_from_user(&work, uwork,
- sizeof(struct cxl_ioctl_start_work))) {
- rc = -EFAULT;
- goto out;
- }
+ if (copy_from_user(&work, uwork, sizeof(work)))
+ return -EFAULT;
mutex_lock(&ctx->status_mutex);
if (ctx->status != OPENED) {
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 500185546599..7cdcd69cecf4 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -4379,9 +4379,9 @@ int qseecom_start_app(struct qseecom_handle **handle,
return -EINVAL;
}
- if (strlen(app_name) >= MAX_APP_NAME_SIZE) {
+ if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
pr_err("The app_name (%s) with length %zu is not valid\n",
- app_name, strlen(app_name));
+ app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
return -EINVAL;
}
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 091370f4ea40..3c9d311106cd 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -94,7 +94,7 @@ static struct uid_entry *find_or_register_uid(uid_t uid)
static int uid_cputime_show(struct seq_file *m, void *v)
{
- struct uid_entry *uid_entry;
+ struct uid_entry *uid_entry = NULL;
struct task_struct *task, *temp;
struct user_namespace *user_ns = current_user_ns();
cputime_t utime;
@@ -112,7 +112,8 @@ static int uid_cputime_show(struct seq_file *m, void *v)
read_lock(&tasklist_lock);
do_each_thread(temp, task) {
uid = from_kuid_munged(user_ns, task_uid(task));
- uid_entry = find_or_register_uid(uid);
+ if (!uid_entry || uid_entry->uid != uid)
+ uid_entry = find_or_register_uid(uid);
if (!uid_entry) {
read_unlock(&tasklist_lock);
rt_mutex_unlock(&uid_lock);
@@ -251,7 +252,7 @@ static void compute_uid_io_bucket_stats(struct io_stats *io_bucket,
static void update_io_stats_all_locked(void)
{
- struct uid_entry *uid_entry;
+ struct uid_entry *uid_entry = NULL;
struct task_struct *task, *temp;
struct user_namespace *user_ns = current_user_ns();
unsigned long bkt;
@@ -264,7 +265,8 @@ static void update_io_stats_all_locked(void)
rcu_read_lock();
do_each_thread(temp, task) {
uid = from_kuid_munged(user_ns, task_uid(task));
- uid_entry = find_or_register_uid(uid);
+ if (!uid_entry || uid_entry->uid != uid)
+ uid_entry = find_or_register_uid(uid);
if (!uid_entry)
continue;
add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 3b423b0ad8e7..f280744578e4 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -156,7 +156,8 @@ static const struct sdhci_ops sdhci_iproc_ops = {
};
static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
- .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
.ops = &sdhci_iproc_ops,
};
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index feb9cbe1f068..d908c3fed7c9 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2366,21 +2366,6 @@ out:
return ret;
}
-/*
- * Reset vreg by ensuring it is off during probe. A call
- * to enable vreg is needed to balance disable vreg
- */
-static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
-{
- int ret;
-
- ret = sdhci_msm_setup_vreg(pdata, 1, true);
- if (ret)
- return ret;
- ret = sdhci_msm_setup_vreg(pdata, 0, true);
- return ret;
-}
-
/* This init function should be called only once for each SDHC slot */
static int sdhci_msm_vreg_init(struct device *dev,
struct sdhci_msm_pltfm_data *pdata,
@@ -2415,7 +2400,7 @@ static int sdhci_msm_vreg_init(struct device *dev,
if (ret)
goto vdd_reg_deinit;
}
- ret = sdhci_msm_vreg_reset(pdata);
+
if (ret)
dev_err(dev, "vreg reset failed (%d)\n", ret);
goto out;
@@ -2592,7 +2577,9 @@ static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
io_level = REQ_IO_HIGH;
}
if (irq_status & CORE_PWRCTL_BUS_OFF) {
- ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
+ if (msm_host->pltfm_init_done)
+ ret = sdhci_msm_setup_vreg(msm_host->pdata,
+ false, false);
if (!ret) {
ret = sdhci_msm_setup_pins(msm_host->pdata, false);
ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
@@ -4668,6 +4655,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto vreg_deinit;
}
+ msm_host->pltfm_init_done = true;
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index b63b4df3ded3..79949c2c537f 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -226,6 +226,7 @@ struct sdhci_msm_host {
bool mci_removed;
const struct sdhci_msm_offset *offset;
bool core_3_0v_support;
+ bool pltfm_init_done;
};
extern char *saved_command_line;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index c82ab87fcbe8..e5911ccb2148 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1949,7 +1949,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
+ return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0d147610a06f..090e00650601 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2714,10 +2714,14 @@ static int cxgb_up(struct adapter *adap)
if (err)
goto irq_err;
}
+
+ mutex_lock(&uld_mutex);
enable_rx(adap);
t4_sge_start(adap);
t4_intr_enable(adap);
adap->flags |= FULL_INIT_DONE;
+ mutex_unlock(&uld_mutex);
+
notify_ulds(adap, CXGB4_STATE_UP);
#if IS_ENABLED(CONFIG_IPV6)
update_clip(adap);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 8a1d9fffd7d6..26255862d1cf 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5260,9 +5260,11 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
struct be_adapter *adapter = netdev_priv(dev);
u8 l4_hdr = 0;
- /* The code below restricts offload features for some tunneled packets.
+ /* The code below restricts offload features for some tunneled and
+ * Q-in-Q packets.
* Offload features for normal (non tunnel) packets are unchanged.
*/
+ features = vlan_features_check(skb, features);
if (!skb->encapsulation ||
!(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
return features;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index ff665493ca97..52f2230062e7 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -713,6 +713,8 @@ static int ethoc_open(struct net_device *dev)
if (ret)
return ret;
+ napi_enable(&priv->napi);
+
ethoc_init_ring(priv, dev->mem_start);
ethoc_reset(priv);
@@ -725,7 +727,6 @@ static int ethoc_open(struct net_device *dev)
}
phy_start(priv->phy);
- napi_enable(&priv->napi);
if (netif_msg_ifup(priv)) {
dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 0240552b50f3..d2701c53ed68 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -203,34 +203,6 @@ static int marvell_config_aneg(struct phy_device *phydev)
{
int err;
- /* The Marvell PHY has an errata which requires
- * that certain registers get written in order
- * to restart autonegotiation */
- err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1d, 0x1f);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1e, 0x200c);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1d, 0x5);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1e, 0);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1e, 0x100);
- if (err < 0)
- return err;
-
err = marvell_set_polarity(phydev, phydev->mdix);
if (err < 0)
return err;
@@ -264,6 +236,42 @@ static int marvell_config_aneg(struct phy_device *phydev)
return 0;
}
+static int m88e1101_config_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ /* This Marvell PHY has an errata which requires
+ * that certain registers get written in order
+ * to restart autonegotiation
+ */
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1d, 0x1f);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1e, 0x200c);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1d, 0x5);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1e, 0);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1e, 0x100);
+ if (err < 0)
+ return err;
+
+ return marvell_config_aneg(phydev);
+}
+
#ifdef CONFIG_OF_MDIO
/*
* Set and/or override some configuration registers based on the
@@ -993,7 +1001,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1101",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &marvell_config_aneg,
+ .config_aneg = &m88e1101_config_aneg,
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index c6f5d9a6bec6..582d8f0c6266 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -730,6 +730,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
{QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 0e2a19e58923..7f7c87762bc6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1415,6 +1415,7 @@ static const struct net_device_ops virtnet_netdev = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = virtnet_busy_poll,
#endif
+ .ndo_features_check = passthru_features_check,
};
static void virtnet_config_changed_work(struct work_struct *work)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 590750ab6564..9a986ccd42e5 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -77,6 +77,8 @@ static const u8 all_zeros_mac[ETH_ALEN];
static int vxlan_sock_add(struct vxlan_dev *vxlan);
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
+
/* per-network namespace private data for this module */
struct vxlan_net {
struct list_head vxlan_list;
@@ -1052,6 +1054,8 @@ static void __vxlan_sock_release(struct vxlan_sock *vs)
static void vxlan_sock_release(struct vxlan_dev *vxlan)
{
+ vxlan_vs_del_dev(vxlan);
+
__vxlan_sock_release(vxlan->vn4_sock);
#if IS_ENABLED(CONFIG_IPV6)
__vxlan_sock_release(vxlan->vn6_sock);
@@ -2255,6 +2259,15 @@ static void vxlan_cleanup(unsigned long arg)
mod_timer(&vxlan->age_timer, next_timer);
}
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+
+ spin_lock(&vn->sock_lock);
+ hlist_del_init_rcu(&vxlan->hlist);
+ spin_unlock(&vn->sock_lock);
+}
+
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
@@ -3028,12 +3041,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
-
- spin_lock(&vn->sock_lock);
- if (!hlist_unhashed(&vxlan->hlist))
- hlist_del_rcu(&vxlan->hlist);
- spin_unlock(&vn->sock_lock);
gro_cells_destroy(&vxlan->gro_cells);
list_del(&vxlan->next);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 313eb43cbf2c..d451430cfba7 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -796,7 +796,7 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
if (QCA_REV_WCN3990(ar)) {
time_left = wait_for_completion_timeout(&ar->peer_delete_done,
- 5 * HZ);
+ 50 * HZ);
if (time_left == 0) {
ath10k_warn(ar, "Timeout in receiving peer delete response\n");
@@ -7752,6 +7752,81 @@ static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
},
};
+static const struct ieee80211_iface_limit ath10k_wcn3990_if_limit[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_wcn3990_qcs_if_limit[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_wcn3990_if_limit_ibss[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC),
+ },
+};
+
+static struct ieee80211_iface_combination ath10k_wcn3990_qcs_if_comb[] = {
+ {
+ .limits = ath10k_wcn3990_if_limit,
+ .num_different_channels = 1,
+ .max_interfaces = 4,
+ .n_limits = ARRAY_SIZE(ath10k_wcn3990_if_limit),
+ },
+ {
+ .limits = ath10k_wcn3990_qcs_if_limit,
+ .num_different_channels = 2,
+ .max_interfaces = 4,
+ .n_limits = ARRAY_SIZE(ath10k_wcn3990_qcs_if_limit),
+ },
+ {
+ .limits = ath10k_wcn3990_if_limit_ibss,
+ .num_different_channels = 1,
+ .max_interfaces = 2,
+ .n_limits = ARRAY_SIZE(ath10k_wcn3990_if_limit_ibss),
+ },
+};
+
static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
{
.max = 1,
@@ -7983,6 +8058,14 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
+ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ if (QCA_REV_WCN3990(ar)) {
+ ar->hw->wiphy->iface_combinations =
+ ath10k_wcn3990_qcs_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_wcn3990_qcs_if_comb);
+ break;
+ }
if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
ar->hw->wiphy->iface_combinations =
ath10k_tlv_qcs_if_comb;
@@ -7993,7 +8076,6 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(ath10k_tlv_if_comb);
}
- ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index e07b120f791f..63bb7686b811 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -960,6 +960,9 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
len, true);
+ if (len < sizeof(struct ieee80211_mgmt))
+ return -EINVAL;
+
cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index d3691c64d47a..6eefb9e61ec4 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -805,8 +805,12 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
struct wireless_dev *wdev = wil_to_wdev(wil);
struct cfg80211_mgmt_tx_params params;
int rc;
- void *frame = kmalloc(len, GFP_KERNEL);
+ void *frame;
+ if (!len)
+ return -EINVAL;
+
+ frame = kmalloc(len, GFP_KERNEL);
if (!frame)
return -ENOMEM;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 1f445f357da1..888e9cfef51a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -304,7 +304,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
queue->rx_skbs[id] = skb;
ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
- BUG_ON((signed short)ref < 0);
+ WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
queue->grant_rx_ref[id] = ref;
page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
@@ -437,7 +437,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
- BUG_ON((signed short)ref < 0);
+ WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
gfn, GNTMAP_readonly);
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
index 2f11c6dd7e05..ddb2388c5006 100644
--- a/drivers/platform/msm/sps/sps.c
+++ b/drivers/platform/msm/sps/sps.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016 , The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -67,6 +67,7 @@ static char *debugfs_buf;
static u32 debugfs_buf_size;
static u32 debugfs_buf_used;
static int wraparound;
+static struct mutex sps_debugfs_lock;
struct dentry *dent;
struct dentry *dfile_info;
@@ -85,6 +86,7 @@ static struct sps_bam *phy2bam(phys_addr_t phys_addr);
/* record debug info for debugfs */
void sps_debugfs_record(const char *msg)
{
+ mutex_lock(&sps_debugfs_lock);
if (debugfs_record_enabled) {
if (debugfs_buf_used + MAX_MSG_LEN >= debugfs_buf_size) {
debugfs_buf_used = 0;
@@ -98,6 +100,7 @@ void sps_debugfs_record(const char *msg)
debugfs_buf_size - debugfs_buf_used,
"\n**** end line of sps log ****\n\n");
}
+ mutex_unlock(&sps_debugfs_lock);
}
/* read the recorded debug info to userspace */
@@ -107,6 +110,7 @@ static ssize_t sps_read_info(struct file *file, char __user *ubuf,
int ret = 0;
int size;
+ mutex_lock(&sps_debugfs_lock);
if (debugfs_record_enabled) {
if (wraparound)
size = debugfs_buf_size - MAX_MSG_LEN;
@@ -116,6 +120,7 @@ static ssize_t sps_read_info(struct file *file, char __user *ubuf,
ret = simple_read_from_buffer(ubuf, count, ppos,
debugfs_buf, size);
}
+ mutex_unlock(&sps_debugfs_lock);
return ret;
}
@@ -161,11 +166,13 @@ static ssize_t sps_set_info(struct file *file, const char __user *buf,
new_buf_size = buf_size_kb * SZ_1K;
+ mutex_lock(&sps_debugfs_lock);
if (debugfs_record_enabled) {
if (debugfs_buf_size == new_buf_size) {
/* need do nothing */
pr_info("sps:debugfs: input buffer size "
"is the same as before.\n");
+ mutex_unlock(&sps_debugfs_lock);
return count;
} else {
/* release the current buffer */
@@ -185,12 +192,14 @@ static ssize_t sps_set_info(struct file *file, const char __user *buf,
if (!debugfs_buf) {
debugfs_buf_size = 0;
pr_err("sps:fail to allocate memory for debug_fs.\n");
+ mutex_unlock(&sps_debugfs_lock);
return -ENOMEM;
}
debugfs_buf_used = 0;
wraparound = false;
debugfs_record_enabled = true;
+ mutex_unlock(&sps_debugfs_lock);
return count;
}
@@ -239,6 +248,7 @@ static ssize_t sps_set_logging_option(struct file *file, const char __user *buf,
return count;
}
+ mutex_lock(&sps_debugfs_lock);
if (((option == 0) || (option == 2)) &&
((logging_option == 1) || (logging_option == 3))) {
debugfs_record_enabled = false;
@@ -250,6 +260,7 @@ static ssize_t sps_set_logging_option(struct file *file, const char __user *buf,
}
logging_option = option;
+ mutex_unlock(&sps_debugfs_lock);
return count;
}
@@ -587,6 +598,8 @@ static void sps_debugfs_init(void)
goto bam_log_level_err;
}
+ mutex_init(&sps_debugfs_lock);
+
return;
bam_log_level_err:
diff --git a/drivers/platform/msm/sps/spsi.h b/drivers/platform/msm/sps/spsi.h
index 1b4ca69bee16..38ee76be13c1 100644
--- a/drivers/platform/msm/sps/spsi.h
+++ b/drivers/platform/msm/sps/spsi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -145,11 +145,6 @@ extern u8 print_limit_option;
pr_info(msg, ##args); \
} \
} while (0)
-#define SPS_DEBUGFS(msg, args...) do { \
- char buf[MAX_MSG_LEN]; \
- snprintf(buf, MAX_MSG_LEN, msg"\n", ##args); \
- sps_debugfs_record(buf); \
- } while (0)
#define SPS_ERR(dev, msg, args...) do { \
if (logging_option != 1) { \
if (unlikely(print_limit_option > 2)) \
@@ -157,8 +152,6 @@ extern u8 print_limit_option;
else \
pr_err(msg, ##args); \
} \
- if (unlikely(debugfs_record_enabled)) \
- SPS_DEBUGFS(msg, ##args); \
SPS_IPC(3, dev, msg, args); \
} while (0)
#define SPS_INFO(dev, msg, args...) do { \
@@ -168,8 +161,6 @@ extern u8 print_limit_option;
else \
pr_info(msg, ##args); \
} \
- if (unlikely(debugfs_record_enabled)) \
- SPS_DEBUGFS(msg, ##args); \
SPS_IPC(3, dev, msg, args); \
} while (0)
#define SPS_DBG(dev, msg, args...) do { \
@@ -181,8 +172,6 @@ extern u8 print_limit_option;
pr_info(msg, ##args); \
} else \
pr_debug(msg, ##args); \
- if (unlikely(debugfs_record_enabled)) \
- SPS_DEBUGFS(msg, ##args); \
if (dev) { \
if ((dev)->ipc_loglevel <= 0) \
SPS_IPC(0, dev, msg, args); \
@@ -197,8 +186,6 @@ extern u8 print_limit_option;
pr_info(msg, ##args); \
} else \
pr_debug(msg, ##args); \
- if (unlikely(debugfs_record_enabled)) \
- SPS_DEBUGFS(msg, ##args); \
if (dev) { \
if ((dev)->ipc_loglevel <= 1) \
SPS_IPC(1, dev, msg, args); \
@@ -213,8 +200,6 @@ extern u8 print_limit_option;
pr_info(msg, ##args); \
} else \
pr_debug(msg, ##args); \
- if (unlikely(debugfs_record_enabled)) \
- SPS_DEBUGFS(msg, ##args); \
if (dev) { \
if ((dev)->ipc_loglevel <= 2) \
SPS_IPC(2, dev, msg, args); \
@@ -229,8 +214,6 @@ extern u8 print_limit_option;
pr_info(msg, ##args); \
} else \
pr_debug(msg, ##args); \
- if (unlikely(debugfs_record_enabled)) \
- SPS_DEBUGFS(msg, ##args); \
if (dev) { \
if ((dev)->ipc_loglevel <= 3) \
SPS_IPC(3, dev, msg, args); \
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index e3c85ab705c4..e03681ec13cc 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -3136,6 +3136,7 @@ static int fg_psy_set_property(struct power_supply *psy,
pval->intval);
return -EINVAL;
}
+ break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
rc = fg_set_constant_chg_voltage(chip, pval->intval);
break;
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 803851d2b3ef..49cd7aa6fdac 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -266,6 +266,10 @@ module_param_named(
debug_mask, __debug_mask, int, S_IRUSR | S_IWUSR
);
+static int __weak_chg_icl_ua = 500000;
+module_param_named(
+ weak_chg_icl_ua, __weak_chg_icl_ua, int, S_IRUSR | S_IWUSR);
+
#define MICRO_1P5A 1500000
#define MICRO_P1A 100000
#define OTG_DEFAULT_DEGLITCH_TIME_MS 50
@@ -2109,7 +2113,7 @@ static struct smb_irq_info smb2_irqs[] = {
[SWITCH_POWER_OK_IRQ] = {
.name = "switcher-power-ok",
.handler = smblib_handle_switcher_power_ok,
- .storm_data = {true, 1000, 3},
+ .storm_data = {true, 1000, 8},
},
};
@@ -2303,6 +2307,7 @@ static int smb2_probe(struct platform_device *pdev)
chg->dev = &pdev->dev;
chg->param = v1_params;
chg->debug_mask = &__debug_mask;
+ chg->weak_chg_icl_ua = &__weak_chg_icl_ua;
chg->mode = PARALLEL_MASTER;
chg->irq_info = smb2_irqs;
chg->name = "PMI";
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index ef62631f6d98..22944f122c44 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -630,8 +630,29 @@ int smblib_mapping_cc_delta_from_field_value(struct smb_chg_param *param,
static void smblib_uusb_removal(struct smb_charger *chg)
{
int rc;
+ struct smb_irq_data *data;
+ struct storm_watch *wdata;
cancel_delayed_work_sync(&chg->pl_enable_work);
+
+ if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
+ smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
+ rc = regulator_disable(chg->dpdm_reg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
+ rc);
+ }
+
+ if (chg->wa_flags & BOOST_BACK_WA) {
+ data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
+ if (data) {
+ wdata = &data->storm_data;
+ update_storm_count(wdata, WEAK_CHG_STORM_COUNT);
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
+ vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+ false, 0);
+ }
+ }
vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
@@ -3127,6 +3148,8 @@ void smblib_usb_plugin_hard_reset_locked(struct smb_charger *chg)
int rc;
u8 stat;
bool vbus_rising;
+ struct smb_irq_data *data;
+ struct storm_watch *wdata;
rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
if (rc < 0) {
@@ -3136,10 +3159,23 @@ void smblib_usb_plugin_hard_reset_locked(struct smb_charger *chg)
vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
- if (vbus_rising)
+ if (vbus_rising) {
smblib_cc2_sink_removal_exit(chg);
- else
+ } else {
smblib_cc2_sink_removal_enter(chg);
+ if (chg->wa_flags & BOOST_BACK_WA) {
+ data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
+ if (data) {
+ wdata = &data->storm_data;
+ update_storm_count(wdata,
+ WEAK_CHG_STORM_COUNT);
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER,
+ false, 0);
+ vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+ false, 0);
+ }
+ }
+ }
power_supply_changed(chg->usb_psy);
smblib_dbg(chg, PR_INTERRUPT, "IRQ: usbin-plugin %s\n",
@@ -3152,6 +3188,8 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
int rc;
u8 stat;
bool vbus_rising;
+ struct smb_irq_data *data;
+ struct storm_watch *wdata;
rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
if (rc < 0) {
@@ -3188,8 +3226,18 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
schedule_delayed_work(&chg->pl_enable_work,
msecs_to_jiffies(PL_DELAY_MS));
} else {
- if (chg->wa_flags & BOOST_BACK_WA)
- vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
+ if (chg->wa_flags & BOOST_BACK_WA) {
+ data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
+ if (data) {
+ wdata = &data->storm_data;
+ update_storm_count(wdata,
+ WEAK_CHG_STORM_COUNT);
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER,
+ false, 0);
+ vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+ false, 0);
+ }
+ }
if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
@@ -3568,9 +3616,30 @@ static void typec_sink_removal(struct smb_charger *chg)
static void smblib_handle_typec_removal(struct smb_charger *chg)
{
int rc;
+ struct smb_irq_data *data;
+ struct storm_watch *wdata;
chg->cc2_detach_wa_active = false;
+ if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
+ smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
+ rc = regulator_disable(chg->dpdm_reg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
+ rc);
+ }
+
+ if (chg->wa_flags & BOOST_BACK_WA) {
+ data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
+ if (data) {
+ wdata = &data->storm_data;
+ update_storm_count(wdata, WEAK_CHG_STORM_COUNT);
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
+ vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+ false, 0);
+ }
+ }
+
/* reset APSD voters */
vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0);
vote(chg->apsd_disable_votable, PD_VOTER, false, 0);
@@ -3791,10 +3860,23 @@ irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data)
return IRQ_HANDLED;
}
+static void smblib_bb_removal_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ bb_removal_work.work);
+
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
+ vote(chg->awake_votable, BOOST_BACK_VOTER, false, 0);
+}
+
+#define BOOST_BACK_UNVOTE_DELAY_MS 750
+#define BOOST_BACK_STORM_COUNT 3
+#define WEAK_CHG_STORM_COUNT 8
irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data)
{
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
+ struct storm_watch *wdata = &irq_data->storm_data;
int rc, usb_icl;
u8 stat;
@@ -3816,8 +3898,32 @@ irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data)
return IRQ_HANDLED;
if (is_storming(&irq_data->storm_data)) {
- smblib_err(chg, "Reverse boost detected: voting 0mA to suspend input\n");
- vote(chg->usb_icl_votable, BOOST_BACK_VOTER, true, 0);
+ /* This could be a weak charger reduce ICL */
+ if (!is_client_vote_enabled(chg->usb_icl_votable,
+ WEAK_CHARGER_VOTER)) {
+ smblib_err(chg,
+ "Weak charger detected: voting %dmA ICL\n",
+ *chg->weak_chg_icl_ua / 1000);
+ vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+ true, *chg->weak_chg_icl_ua);
+ /*
+ * reset storm data and set the storm threshold
+ * to 3 for reverse boost detection.
+ */
+ update_storm_count(wdata, BOOST_BACK_STORM_COUNT);
+ } else {
+ smblib_err(chg,
+ "Reverse boost detected: voting 0mA to suspend input\n");
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER, true, 0);
+ vote(chg->awake_votable, BOOST_BACK_VOTER, true, 0);
+ /*
+ * Remove the boost-back vote after a delay, to avoid
+ * permanently suspending the input if the boost-back
+ * condition is unintentionally hit.
+ */
+ schedule_delayed_work(&chg->bb_removal_work,
+ msecs_to_jiffies(BOOST_BACK_UNVOTE_DELAY_MS));
+ }
}
return IRQ_HANDLED;
@@ -4472,6 +4578,7 @@ int smblib_init(struct smb_charger *chg)
INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
INIT_WORK(&chg->legacy_detection_work, smblib_legacy_detection_work);
INIT_DELAYED_WORK(&chg->uusb_otg_work, smblib_uusb_otg_work);
+ INIT_DELAYED_WORK(&chg->bb_removal_work, smblib_bb_removal_work);
chg->fake_capacity = -EINVAL;
chg->fake_input_current_limited = -EINVAL;
@@ -4527,6 +4634,7 @@ int smblib_deinit(struct smb_charger *chg)
cancel_delayed_work_sync(&chg->pl_enable_work);
cancel_work_sync(&chg->legacy_detection_work);
cancel_delayed_work_sync(&chg->uusb_otg_work);
+ cancel_delayed_work_sync(&chg->bb_removal_work);
power_supply_unreg_notifier(&chg->nb);
smblib_destroy_votables(chg);
qcom_batt_deinit();
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 8ba0a1dfe19f..c97b84c34084 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -64,9 +64,12 @@ enum print_reason {
#define BATT_PROFILE_VOTER "BATT_PROFILE_VOTER"
#define OTG_DELAY_VOTER "OTG_DELAY_VOTER"
#define USBIN_I_VOTER "USBIN_I_VOTER"
+#define WEAK_CHARGER_VOTER "WEAK_CHARGER_VOTER"
#define VCONN_MAX_ATTEMPTS 3
#define OTG_MAX_ATTEMPTS 3
+#define BOOST_BACK_STORM_COUNT 3
+#define WEAK_CHG_STORM_COUNT 8
enum smb_mode {
PARALLEL_MASTER = 0,
@@ -230,6 +233,7 @@ struct smb_charger {
struct smb_chg_freq chg_freq;
int smb_version;
int otg_delay_ms;
+ int *weak_chg_icl_ua;
/* locks */
struct mutex lock;
@@ -292,6 +296,7 @@ struct smb_charger {
struct delayed_work pl_enable_work;
struct work_struct legacy_detection_work;
struct delayed_work uusb_otg_work;
+ struct delayed_work bb_removal_work;
/* cached status */
int voltage_min_uv;
diff --git a/drivers/power/supply/qcom/storm-watch.c b/drivers/power/supply/qcom/storm-watch.c
index 5275079c53e0..21ac669f2ec9 100644
--- a/drivers/power/supply/qcom/storm-watch.c
+++ b/drivers/power/supply/qcom/storm-watch.c
@@ -64,3 +64,13 @@ void reset_storm_count(struct storm_watch *data)
data->storm_count = 0;
mutex_unlock(&data->storm_lock);
}
+
+void update_storm_count(struct storm_watch *data, int max_count)
+{
+ if (!data)
+ return;
+
+ mutex_lock(&data->storm_lock);
+ data->max_storm_count = max_count;
+ mutex_unlock(&data->storm_lock);
+}
diff --git a/drivers/power/supply/qcom/storm-watch.h b/drivers/power/supply/qcom/storm-watch.h
index ff05c4a661c3..5275d73613d4 100644
--- a/drivers/power/supply/qcom/storm-watch.h
+++ b/drivers/power/supply/qcom/storm-watch.h
@@ -37,4 +37,5 @@ struct storm_watch {
bool is_storming(struct storm_watch *data);
void reset_storm_count(struct storm_watch *data);
+void update_storm_count(struct storm_watch *data, int max_count);
#endif
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 1766a20ebcb1..741f3ee81cfe 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -717,6 +717,7 @@ enum qeth_discipline_id {
};
struct qeth_discipline {
+ const struct device_type *devtype;
void (*start_poll)(struct ccw_device *, int, unsigned long);
qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
@@ -881,6 +882,9 @@ extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline;
extern const struct attribute_group *qeth_generic_attr_groups[];
extern const struct attribute_group *qeth_osn_attr_groups[];
+extern const struct attribute_group qeth_device_attr_group;
+extern const struct attribute_group qeth_device_blkt_group;
+extern const struct device_type qeth_generic_devtype;
extern struct workqueue_struct *qeth_wq;
int qeth_card_hw_is_reachable(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 31ac53fa5cee..d10bf3da8e5f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5449,10 +5449,12 @@ void qeth_core_free_discipline(struct qeth_card *card)
card->discipline = NULL;
}
-static const struct device_type qeth_generic_devtype = {
+const struct device_type qeth_generic_devtype = {
.name = "qeth_generic",
.groups = qeth_generic_attr_groups,
};
+EXPORT_SYMBOL_GPL(qeth_generic_devtype);
+
static const struct device_type qeth_osn_devtype = {
.name = "qeth_osn",
.groups = qeth_osn_attr_groups,
@@ -5578,23 +5580,22 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
goto err_card;
}
- if (card->info.type == QETH_CARD_TYPE_OSN)
- gdev->dev.type = &qeth_osn_devtype;
- else
- gdev->dev.type = &qeth_generic_devtype;
-
switch (card->info.type) {
case QETH_CARD_TYPE_OSN:
case QETH_CARD_TYPE_OSM:
rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
if (rc)
goto err_card;
+
+ gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
+ ? card->discipline->devtype
+ : &qeth_osn_devtype;
rc = card->discipline->setup(card->gdev);
if (rc)
goto err_disc;
- case QETH_CARD_TYPE_OSD:
- case QETH_CARD_TYPE_OSX:
+ break;
default:
+ gdev->dev.type = &qeth_generic_devtype;
break;
}
@@ -5650,8 +5651,10 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
if (rc)
goto err;
rc = card->discipline->setup(card->gdev);
- if (rc)
+ if (rc) {
+ qeth_core_free_discipline(card);
goto err;
+ }
}
rc = card->discipline->set_online(gdev);
err:
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index e6e5b9671bf2..fa844b0ff847 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -409,12 +409,16 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
if (card->options.layer2 == newdis)
goto out;
- else {
- card->info.mac_bits = 0;
- if (card->discipline) {
- card->discipline->remove(card->gdev);
- qeth_core_free_discipline(card);
- }
+ if (card->info.type == QETH_CARD_TYPE_OSM) {
+ /* fixed layer, can't switch */
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ card->info.mac_bits = 0;
+ if (card->discipline) {
+ card->discipline->remove(card->gdev);
+ qeth_core_free_discipline(card);
}
rc = qeth_core_load_discipline(card, newdis);
@@ -422,6 +426,8 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
goto out;
rc = card->discipline->setup(card->gdev);
+ if (rc)
+ qeth_core_free_discipline(card);
out:
mutex_unlock(&card->discipline_mutex);
return rc ? rc : count;
@@ -699,10 +705,11 @@ static struct attribute *qeth_blkt_device_attrs[] = {
&dev_attr_inter_jumbo.attr,
NULL,
};
-static struct attribute_group qeth_device_blkt_group = {
+const struct attribute_group qeth_device_blkt_group = {
.name = "blkt",
.attrs = qeth_blkt_device_attrs,
};
+EXPORT_SYMBOL_GPL(qeth_device_blkt_group);
static struct attribute *qeth_device_attrs[] = {
&dev_attr_state.attr,
@@ -722,9 +729,10 @@ static struct attribute *qeth_device_attrs[] = {
&dev_attr_switch_attrs.attr,
NULL,
};
-static struct attribute_group qeth_device_attr_group = {
+const struct attribute_group qeth_device_attr_group = {
.attrs = qeth_device_attrs,
};
+EXPORT_SYMBOL_GPL(qeth_device_attr_group);
const struct attribute_group *qeth_generic_attr_groups[] = {
&qeth_device_attr_group,
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index 0767556404bd..eb87bf97d38a 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -8,6 +8,8 @@
#include "qeth_core.h"
+extern const struct attribute_group *qeth_l2_attr_groups[];
+
int qeth_l2_create_device_attributes(struct device *);
void qeth_l2_remove_device_attributes(struct device *);
void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index df036b872b05..bf1e0e39334d 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1027,11 +1027,21 @@ static int qeth_l2_stop(struct net_device *dev)
return 0;
}
+static const struct device_type qeth_l2_devtype = {
+ .name = "qeth_layer2",
+ .groups = qeth_l2_attr_groups,
+};
+
static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc;
- qeth_l2_create_device_attributes(&gdev->dev);
+ if (gdev->dev.type == &qeth_generic_devtype) {
+ rc = qeth_l2_create_device_attributes(&gdev->dev);
+ if (rc)
+ return rc;
+ }
INIT_LIST_HEAD(&card->vid_list);
hash_init(card->mac_htable);
card->options.layer2 = 1;
@@ -1043,7 +1053,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
- qeth_l2_remove_device_attributes(&cgdev->dev);
+ if (cgdev->dev.type == &qeth_generic_devtype)
+ qeth_l2_remove_device_attributes(&cgdev->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
@@ -1101,7 +1112,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
case QETH_CARD_TYPE_OSN:
card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN,
ether_setup);
- card->dev->flags |= IFF_NOARP;
break;
default:
card->dev = alloc_etherdev(0);
@@ -1114,9 +1124,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
card->dev->mtu = card->info.initial_mtu;
card->dev->netdev_ops = &qeth_l2_netdev_ops;
- card->dev->ethtool_ops =
- (card->info.type != QETH_CARD_TYPE_OSN) ?
- &qeth_l2_ethtool_ops : &qeth_l2_osn_ops;
+ if (card->info.type == QETH_CARD_TYPE_OSN) {
+ card->dev->ethtool_ops = &qeth_l2_osn_ops;
+ card->dev->flags |= IFF_NOARP;
+ } else {
+ card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
+ }
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
card->dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
@@ -1429,6 +1442,7 @@ static int qeth_l2_control_event(struct qeth_card *card,
}
struct qeth_discipline qeth_l2_discipline = {
+ .devtype = &qeth_l2_devtype,
.start_poll = qeth_qdio_start_poll,
.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
.output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 692db49e3d2a..a48ed9e7e168 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -272,3 +272,11 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
} else
qeth_bridgeport_an_set(card, 0);
}
+
+const struct attribute_group *qeth_l2_attr_groups[] = {
+ &qeth_device_attr_group,
+ &qeth_device_blkt_group,
+ /* l2 specific, see l2_{create,remove}_device_attributes(): */
+ &qeth_l2_bridgeport_attr_group,
+ NULL,
+};
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index cc4d3c3d8cc5..285fe0b2c753 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3227,8 +3227,11 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc;
- qeth_l3_create_device_attributes(&gdev->dev);
+ rc = qeth_l3_create_device_attributes(&gdev->dev);
+ if (rc)
+ return rc;
card->options.layer2 = 0;
card->info.hwtrap = 0;
return 0;
@@ -3519,6 +3522,7 @@ static int qeth_l3_control_event(struct qeth_card *card,
}
struct qeth_discipline qeth_l3_discipline = {
+ .devtype = &qeth_generic_devtype,
.start_poll = qeth_qdio_start_poll,
.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
.output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8a5fbdb45cfd..e333029e4b6c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4452,6 +4452,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
struct MPT3SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
unsigned long flags;
+ unsigned int sector_sz;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
@@ -4510,6 +4511,20 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
}
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
+
+ /* In case of bogus fw or device, we could end up having
+ * unaligned partial completion. We can force alignment here,
+ * then scsi-ml does not need to handle this misbehavior.
+ */
+ sector_sz = scmd->device->sector_size;
+ if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz &&
+ xfer_cnt % sector_sz)) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
+ xfer_cnt, sector_sz);
+ xfer_cnt = round_down(xfer_cnt, sector_sz);
+ }
+
scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 3588a56aabb4..5cbf20ab94aa 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2311,10 +2311,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (mem_only) {
if (pci_enable_device_mem(pdev))
- goto probe_out;
+ return ret;
} else {
if (pci_enable_device(pdev))
- goto probe_out;
+ return ret;
}
/* This may fail but that's ok */
@@ -2324,7 +2324,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (!ha) {
ql_log_pci(ql_log_fatal, pdev, 0x0009,
"Unable to allocate memory for ha.\n");
- goto probe_out;
+ goto disable_device;
}
ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
"Memory allocated for ha=%p.\n", ha);
@@ -2923,7 +2923,7 @@ iospace_config_failed:
kfree(ha);
ha = NULL;
-probe_out:
+disable_device:
pci_disable_device(pdev);
return ret;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 544a71e7c242..a2f2cc0c2c51 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -653,7 +653,7 @@ static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
unsigned int tag, const char *str)
{
struct ufshcd_lrb *lrbp;
- char *cmd_type;
+ char *cmd_type = NULL;
u8 opcode = 0;
u8 cmd_id = 0, idn = 0;
sector_t lba = -1;
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index ee0fe1622645..f4ffc7df1e9c 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -2884,7 +2884,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
struct channel_ctx *ctx = (struct channel_ctx *)handle;
uint32_t riid;
int ret = 0;
- struct glink_core_tx_pkt *tx_info;
+ struct glink_core_tx_pkt *tx_info = NULL;
size_t intent_size;
bool is_atomic =
tx_flags & (GLINK_TX_SINGLE_THREADED | GLINK_TX_ATOMIC);
@@ -2899,6 +2899,13 @@ static int glink_tx_common(void *handle, void *pkt_priv,
return ret;
rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic);
+ tx_info = kzalloc(sizeof(struct glink_core_tx_pkt),
+ is_atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (!tx_info) {
+ GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
+ ret = -ENOMEM;
+ goto glink_tx_common_err;
+ }
if (!(vbuf_provider || pbuf_provider)) {
ret = -EINVAL;
goto glink_tx_common_err;
@@ -3018,14 +3025,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
GLINK_INFO_PERF_CH(ctx, "%s: R[%u]:%zu data[%p], size[%zu]. TID %u\n",
__func__, riid, intent_size,
data ? data : iovec, size, current->pid);
- tx_info = kzalloc(sizeof(struct glink_core_tx_pkt),
- is_atomic ? GFP_ATOMIC : GFP_KERNEL);
- if (!tx_info) {
- GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
- ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
- ret = -ENOMEM;
- goto glink_tx_common_err;
- }
+
rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release);
INIT_LIST_HEAD(&tx_info->list_done);
INIT_LIST_HEAD(&tx_info->list_node);
@@ -3050,10 +3050,15 @@ static int glink_tx_common(void *handle, void *pkt_priv,
else
xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
+ rwref_read_put(&ctx->ch_state_lhb2);
+ glink_put_ch_ctx(ctx, false);
+ return ret;
+
glink_tx_common_err:
rwref_read_put(&ctx->ch_state_lhb2);
glink_tx_common_err_2:
glink_put_ch_ctx(ctx, false);
+ kfree(tx_info);
return ret;
}
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 8385987e8888..99caa1c84dce 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -215,7 +215,6 @@ struct edge_info {
bool tx_blocked_signal_sent;
struct kthread_work kwork;
struct kthread_worker kworker;
- struct work_struct wakeup_work;
struct task_struct *task;
struct tasklet_struct tasklet;
struct srcu_struct use_ref;
@@ -816,6 +815,32 @@ static bool get_rx_fifo(struct edge_info *einfo)
}
/**
+ * tx_wakeup_worker() - worker function to wakeup tx blocked thread
+ * @work: kwork associated with the edge to process commands on.
+ */
+static void tx_wakeup_worker(struct edge_info *einfo)
+{
+ bool trigger_wakeup = false;
+ unsigned long flags;
+
+ if (einfo->in_ssr)
+ return;
+ if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
+ einfo->tx_resume_needed = false;
+ einfo->xprt_if.glink_core_if_ptr->tx_resume(
+ &einfo->xprt_if);
+ }
+ spin_lock_irqsave(&einfo->write_lock, flags);
+ if (waitqueue_active(&einfo->tx_blocked_queue)) { /* tx waiting ?*/
+ einfo->tx_blocked_signal_sent = false;
+ trigger_wakeup = true;
+ }
+ spin_unlock_irqrestore(&einfo->write_lock, flags);
+ if (trigger_wakeup)
+ wake_up_all(&einfo->tx_blocked_queue);
+}
+
+/**
* __rx_worker() - process received commands on a specific edge
* @einfo: Edge to process commands on.
* @atomic_ctx: Indicates if the caller is in atomic context and requires any
@@ -865,7 +890,7 @@ static void __rx_worker(struct edge_info *einfo, bool atomic_ctx)
if ((atomic_ctx) && ((einfo->tx_resume_needed) ||
(waitqueue_active(&einfo->tx_blocked_queue)))) /* tx waiting ?*/
- schedule_work(&einfo->wakeup_work);
+ tx_wakeup_worker(einfo);
/*
* Access to the fifo needs to be synchronized, however only the calls
@@ -1173,39 +1198,6 @@ static void rx_worker_atomic(unsigned long param)
}
/**
- * tx_wakeup_worker() - worker function to wakeup tx blocked thread
- * @work: kwork associated with the edge to process commands on.
- */
-static void tx_wakeup_worker(struct work_struct *work)
-{
- struct edge_info *einfo;
- bool trigger_wakeup = false;
- unsigned long flags;
- int rcu_id;
-
- einfo = container_of(work, struct edge_info, wakeup_work);
- rcu_id = srcu_read_lock(&einfo->use_ref);
- if (einfo->in_ssr) {
- srcu_read_unlock(&einfo->use_ref, rcu_id);
- return;
- }
- if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
- einfo->tx_resume_needed = false;
- einfo->xprt_if.glink_core_if_ptr->tx_resume(
- &einfo->xprt_if);
- }
- spin_lock_irqsave(&einfo->write_lock, flags);
- if (waitqueue_active(&einfo->tx_blocked_queue)) { /* tx waiting ?*/
- einfo->tx_blocked_signal_sent = false;
- trigger_wakeup = true;
- }
- spin_unlock_irqrestore(&einfo->write_lock, flags);
- if (trigger_wakeup)
- wake_up_all(&einfo->tx_blocked_queue);
- srcu_read_unlock(&einfo->use_ref, rcu_id);
-}
-
-/**
* rx_worker() - worker function to process received commands
* @work: kwork associated with the edge to process commands on.
*/
@@ -2355,7 +2347,6 @@ static int glink_smem_native_probe(struct platform_device *pdev)
init_waitqueue_head(&einfo->tx_blocked_queue);
init_kthread_work(&einfo->kwork, rx_worker);
init_kthread_worker(&einfo->kworker);
- INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
einfo->read_from_fifo = read_from_fifo;
einfo->write_to_fifo = write_to_fifo;
@@ -2457,7 +2448,6 @@ request_irq_fail:
reg_xprt_fail:
smem_alloc_fail:
flush_kthread_worker(&einfo->kworker);
- flush_work(&einfo->wakeup_work);
kthread_stop(einfo->task);
einfo->task = NULL;
tasklet_kill(&einfo->tasklet);
@@ -2546,7 +2536,6 @@ static int glink_rpm_native_probe(struct platform_device *pdev)
init_waitqueue_head(&einfo->tx_blocked_queue);
init_kthread_work(&einfo->kwork, rx_worker);
init_kthread_worker(&einfo->kworker);
- INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
einfo->intentless = true;
einfo->read_from_fifo = memcpy32_fromio;
@@ -2707,7 +2696,6 @@ request_irq_fail:
reg_xprt_fail:
toc_init_fail:
flush_kthread_worker(&einfo->kworker);
- flush_work(&einfo->wakeup_work);
kthread_stop(einfo->task);
einfo->task = NULL;
tasklet_kill(&einfo->tasklet);
@@ -2839,7 +2827,6 @@ static int glink_mailbox_probe(struct platform_device *pdev)
init_waitqueue_head(&einfo->tx_blocked_queue);
init_kthread_work(&einfo->kwork, rx_worker);
init_kthread_worker(&einfo->kworker);
- INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
einfo->read_from_fifo = read_from_fifo;
einfo->write_to_fifo = write_to_fifo;
@@ -2960,7 +2947,6 @@ request_irq_fail:
reg_xprt_fail:
smem_alloc_fail:
flush_kthread_worker(&einfo->kworker);
- flush_work(&einfo->wakeup_work);
kthread_stop(einfo->task);
einfo->task = NULL;
tasklet_kill(&einfo->tasklet);
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 28f89bfac7c6..9690d3c64560 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -434,6 +434,7 @@ static struct icnss_priv {
bool is_wlan_mac_set;
struct icnss_wlan_mac_addr wlan_mac_addr;
bool bypass_s1_smmu;
+ struct mutex dev_lock;
} *penv;
#ifdef CONFIG_ICNSS_DEBUG
@@ -3939,12 +3940,14 @@ static int icnss_regread_show(struct seq_file *s, void *data)
{
struct icnss_priv *priv = s->private;
+ mutex_lock(&priv->dev_lock);
if (!priv->diag_reg_read_buf) {
seq_puts(s, "Usage: echo <mem_type> <offset> <data_len> > <debugfs>/icnss/reg_read\n");
if (!test_bit(ICNSS_FW_READY, &priv->state))
seq_puts(s, "Firmware is not ready yet!, wait for FW READY\n");
+ mutex_unlock(&priv->dev_lock);
return 0;
}
@@ -3958,6 +3961,7 @@ static int icnss_regread_show(struct seq_file *s, void *data)
priv->diag_reg_read_len = 0;
kfree(priv->diag_reg_read_buf);
priv->diag_reg_read_buf = NULL;
+ mutex_unlock(&priv->dev_lock);
return 0;
}
@@ -4018,18 +4022,22 @@ static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
data_len > QMI_WLFW_MAX_DATA_SIZE_V01)
return -EINVAL;
+ mutex_lock(&priv->dev_lock);
kfree(priv->diag_reg_read_buf);
priv->diag_reg_read_buf = NULL;
reg_buf = kzalloc(data_len, GFP_KERNEL);
- if (!reg_buf)
+ if (!reg_buf) {
+ mutex_unlock(&priv->dev_lock);
return -ENOMEM;
+ }
ret = wlfw_athdiag_read_send_sync_msg(priv, reg_offset,
mem_type, data_len,
reg_buf);
if (ret) {
kfree(reg_buf);
+ mutex_unlock(&priv->dev_lock);
return ret;
}
@@ -4037,6 +4045,7 @@ static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
priv->diag_reg_read_mem_type = mem_type;
priv->diag_reg_read_len = data_len;
priv->diag_reg_read_buf = reg_buf;
+ mutex_unlock(&priv->dev_lock);
return count;
}
@@ -4284,6 +4293,7 @@ static int icnss_probe(struct platform_device *pdev)
spin_lock_init(&priv->event_lock);
spin_lock_init(&priv->on_off_lock);
+ mutex_init(&priv->dev_lock);
priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
if (!priv->event_wq) {
diff --git a/drivers/soc/qcom/rpm_master_stat.c b/drivers/soc/qcom/rpm_master_stat.c
index 7bf18ffe6ad2..b8bf3a059677 100644
--- a/drivers/soc/qcom/rpm_master_stat.c
+++ b/drivers/soc/qcom/rpm_master_stat.c
@@ -50,6 +50,8 @@
#define GET_FIELD(a) ((strnstr(#a, ".", 80) + 1))
+static DEFINE_MUTEX(msm_rpm_master_stats_mutex);
+
struct msm_rpm_master_stats {
uint32_t active_cores;
uint32_t numshutdowns;
@@ -80,9 +82,11 @@ int msm_rpm_master_stats_file_close(struct inode *inode,
{
struct msm_rpm_master_stats_private_data *private = file->private_data;
+ mutex_lock(&msm_rpm_master_stats_mutex);
if (private->reg_base)
iounmap(private->reg_base);
kfree(file->private_data);
+ mutex_unlock(&msm_rpm_master_stats_mutex);
return 0;
}
@@ -95,15 +99,11 @@ static int msm_rpm_master_copy_stats(
static int master_cnt;
int count, j = 0;
char *buf;
- static DEFINE_MUTEX(msm_rpm_master_stats_mutex);
unsigned long active_cores;
- mutex_lock(&msm_rpm_master_stats_mutex);
-
/* Iterate possible number of masters */
if (master_cnt > prvdata->num_masters - 1) {
master_cnt = 0;
- mutex_unlock(&msm_rpm_master_stats_mutex);
return 0;
}
@@ -256,7 +256,6 @@ static int msm_rpm_master_copy_stats(
}
master_cnt++;
- mutex_unlock(&msm_rpm_master_stats_mutex);
return RPM_MASTERS_BUF_LEN - count;
}
@@ -265,25 +264,36 @@ static ssize_t msm_rpm_master_stats_file_read(struct file *file,
{
struct msm_rpm_master_stats_private_data *prvdata;
struct msm_rpm_master_stats_platform_data *pdata;
+ ssize_t ret;
+ mutex_lock(&msm_rpm_master_stats_mutex);
prvdata = file->private_data;
- if (!prvdata)
- return -EINVAL;
+ if (!prvdata) {
+ ret = -EINVAL;
+ goto exit;
+ }
pdata = prvdata->platform_data;
- if (!pdata)
- return -EINVAL;
+ if (!pdata) {
+ ret = -EINVAL;
+ goto exit;
+ }
- if (!bufu || count == 0)
- return -EINVAL;
+ if (!bufu || count == 0) {
+ ret = -EINVAL;
+ goto exit;
+ }
if (*ppos <= pdata->phys_size) {
prvdata->len = msm_rpm_master_copy_stats(prvdata);
*ppos = 0;
}
- return simple_read_from_buffer(bufu, count, ppos,
+ ret = simple_read_from_buffer(bufu, count, ppos,
prvdata->buf, prvdata->len);
+exit:
+ mutex_unlock(&msm_rpm_master_stats_mutex);
+ return ret;
}
static int msm_rpm_master_stats_file_open(struct inode *inode,
@@ -291,15 +301,20 @@ static int msm_rpm_master_stats_file_open(struct inode *inode,
{
struct msm_rpm_master_stats_private_data *prvdata;
struct msm_rpm_master_stats_platform_data *pdata;
+ int ret = 0;
+ mutex_lock(&msm_rpm_master_stats_mutex);
pdata = inode->i_private;
file->private_data =
kzalloc(sizeof(struct msm_rpm_master_stats_private_data),
GFP_KERNEL);
- if (!file->private_data)
- return -ENOMEM;
+ if (!file->private_data) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
prvdata = file->private_data;
prvdata->reg_base = ioremap(pdata->phys_addr_base,
@@ -310,14 +325,17 @@ static int msm_rpm_master_stats_file_open(struct inode *inode,
pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
__func__, &pdata->phys_addr_base,
pdata->phys_size);
- return -EBUSY;
+ ret = -EBUSY;
+ goto exit;
}
prvdata->len = 0;
prvdata->num_masters = pdata->num_masters;
prvdata->master_names = pdata->masters;
prvdata->platform_data = pdata;
- return 0;
+exit:
+ mutex_unlock(&msm_rpm_master_stats_mutex);
+ return ret;
}
static const struct file_operations msm_rpm_master_stats_fops = {
diff --git a/drivers/soc/qcom/rpm_rail_stats.c b/drivers/soc/qcom/rpm_rail_stats.c
index 9ef96dc54eb6..728fb69bfbe2 100644
--- a/drivers/soc/qcom/rpm_rail_stats.c
+++ b/drivers/soc/qcom/rpm_rail_stats.c
@@ -46,6 +46,8 @@
#define NAMELEN (sizeof(uint32_t)+1)
+static DEFINE_MUTEX(msm_rpm_rail_stats_mutex);
+
struct msm_rpm_rail_stats_platform_data {
phys_addr_t phys_addr_base;
u32 phys_size;
@@ -80,9 +82,11 @@ int msm_rpm_rail_stats_file_close(struct inode *inode, struct file *file)
{
struct msm_rpm_rail_stats_private_data *private = file->private_data;
+ mutex_lock(&msm_rpm_rail_stats_mutex);
if (private->reg_base)
iounmap(private->reg_base);
kfree(file->private_data);
+ mutex_unlock(&msm_rpm_rail_stats_mutex);
return 0;
}
@@ -154,18 +158,26 @@ static int msm_rpm_rail_stats_copy(
static ssize_t msm_rpm_rail_stats_file_read(struct file *file,
char __user *bufu, size_t count, loff_t *ppos)
{
- struct msm_rpm_rail_stats_private_data *prvdata =
- file->private_data;
+ struct msm_rpm_rail_stats_private_data *prvdata;
struct msm_rpm_rail_stats_platform_data *pdata;
+ ssize_t ret;
- if (!prvdata)
- return -EINVAL;
+ mutex_lock(&msm_rpm_rail_stats_mutex);
+ prvdata = file->private_data;
+ if (!prvdata) {
+ ret = -EINVAL;
+ goto exit;
+ }
- if (!prvdata->platform_data)
- return -EINVAL;
+ if (!prvdata->platform_data) {
+ ret = -EINVAL;
+ goto exit;
+ }
- if (!bufu || count == 0)
- return -EINVAL;
+ if (!bufu || count == 0) {
+ ret = -EINVAL;
+ goto exit;
+ }
pdata = prvdata->platform_data;
@@ -174,22 +186,32 @@ static ssize_t msm_rpm_rail_stats_file_read(struct file *file,
*ppos = 0;
}
- return simple_read_from_buffer(bufu, count, ppos,
+ ret = simple_read_from_buffer(bufu, count, ppos,
prvdata->buf, prvdata->len);
+exit:
+ mutex_unlock(&msm_rpm_rail_stats_mutex);
+ return ret;
}
static int msm_rpm_rail_stats_file_open(struct inode *inode,
struct file *file)
{
struct msm_rpm_rail_stats_private_data *prvdata;
- struct msm_rpm_rail_stats_platform_data *pdata = inode->i_private;
+ struct msm_rpm_rail_stats_platform_data *pdata;
+ int ret = 0;
+
+ mutex_lock(&msm_rpm_rail_stats_mutex);
+ pdata = inode->i_private;
file->private_data =
kzalloc(sizeof(struct msm_rpm_rail_stats_private_data),
GFP_KERNEL);
- if (!file->private_data)
- return -ENOMEM;
+ if (!file->private_data) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
prvdata = file->private_data;
prvdata->reg_base = ioremap(pdata->phys_addr_base,
@@ -200,12 +222,15 @@ static int msm_rpm_rail_stats_file_open(struct inode *inode,
pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
__func__, &pdata->phys_addr_base,
pdata->phys_size);
- return -EBUSY;
+ ret = -EBUSY;
+ goto exit;
}
prvdata->len = 0;
prvdata->platform_data = pdata;
- return 0;
+exit:
+ mutex_unlock(&msm_rpm_rail_stats_mutex);
+ return ret;
}
diff --git a/drivers/soc/qcom/rpm_stats.c b/drivers/soc/qcom/rpm_stats.c
index 8f3094853ba3..b54af9eae8ec 100644
--- a/drivers/soc/qcom/rpm_stats.c
+++ b/drivers/soc/qcom/rpm_stats.c
@@ -31,6 +31,8 @@
#define GET_PDATA_OF_ATTR(attr) \
(container_of(attr, struct msm_rpmstats_kobj_attr, ka)->pd)
+static DEFINE_MUTEX(rpm_stats_mutex);
+
enum {
ID_COUNTER,
ID_ACCUM_TIME_SCLK,
@@ -220,6 +222,12 @@ static int msm_rpmstats_copy_stats(struct msm_rpmstats_private_data *pdata)
record.id = msm_rpmstats_read_register(pdata->reg_base,
pdata->read_idx, 1);
+ if (record.id >= ID_MAX) {
+ pr_err("%s: array out of bound error found.\n",
+ __func__);
+ return -EINVAL;
+ }
+
record.val = msm_rpmstats_read_register(pdata->reg_base,
pdata->read_idx, 2);
@@ -242,13 +250,20 @@ static ssize_t msm_rpmstats_file_read(struct file *file, char __user *bufu,
size_t count, loff_t *ppos)
{
struct msm_rpmstats_private_data *prvdata;
+ ssize_t ret;
+ mutex_lock(&rpm_stats_mutex);
prvdata = file->private_data;
- if (!prvdata)
- return -EINVAL;
- if (!bufu || count == 0)
- return -EINVAL;
+ if (!prvdata) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (!bufu || count == 0) {
+ ret = -EINVAL;
+ goto exit;
+ }
if (prvdata->platform_data->version == 1) {
if (!prvdata->num_records)
@@ -263,22 +278,30 @@ static ssize_t msm_rpmstats_file_read(struct file *file, char __user *bufu,
prvdata->len = msm_rpmstats_copy_stats_v2(prvdata);
*ppos = 0;
}
- return simple_read_from_buffer(bufu, count, ppos,
+ ret = simple_read_from_buffer(bufu, count, ppos,
prvdata->buf, prvdata->len);
+exit:
+ mutex_unlock(&rpm_stats_mutex);
+ return ret;
}
static int msm_rpmstats_file_open(struct inode *inode, struct file *file)
{
struct msm_rpmstats_private_data *prvdata;
struct msm_rpmstats_platform_data *pdata;
+ int ret = 0;
+ mutex_lock(&rpm_stats_mutex);
pdata = inode->i_private;
file->private_data =
kmalloc(sizeof(struct msm_rpmstats_private_data), GFP_KERNEL);
- if (!file->private_data)
- return -ENOMEM;
+ if (!file->private_data) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
prvdata = file->private_data;
prvdata->reg_base = ioremap_nocache(pdata->phys_addr_base,
@@ -289,24 +312,28 @@ static int msm_rpmstats_file_open(struct inode *inode, struct file *file)
pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
__func__, &pdata->phys_addr_base,
pdata->phys_size);
- return -EBUSY;
+ ret = -EBUSY;
+ goto exit;
}
prvdata->read_idx = prvdata->num_records = prvdata->len = 0;
prvdata->platform_data = pdata;
if (pdata->version == 2)
prvdata->num_records = 2;
-
- return 0;
+exit:
+ mutex_unlock(&rpm_stats_mutex);
+ return ret;
}
static int msm_rpmstats_file_close(struct inode *inode, struct file *file)
{
struct msm_rpmstats_private_data *private = file->private_data;
+ mutex_lock(&rpm_stats_mutex);
if (private->reg_base)
iounmap(private->reg_base);
kfree(file->private_data);
+ mutex_unlock(&rpm_stats_mutex);
return 0;
}
@@ -362,22 +389,26 @@ static ssize_t rpmstats_show(struct kobject *kobj,
{
struct msm_rpmstats_private_data *prvdata = NULL;
struct msm_rpmstats_platform_data *pdata = NULL;
+ ssize_t ret;
+ mutex_lock(&rpm_stats_mutex);
pdata = GET_PDATA_OF_ATTR(attr);
prvdata =
kmalloc(sizeof(*prvdata), GFP_KERNEL);
- if (!prvdata)
- return -ENOMEM;
+ if (!prvdata) {
+ ret = -ENOMEM;
+ goto kmalloc_fail;
+ }
prvdata->reg_base = ioremap_nocache(pdata->phys_addr_base,
pdata->phys_size);
if (!prvdata->reg_base) {
- kfree(prvdata);
pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
__func__, &pdata->phys_addr_base,
pdata->phys_size);
- return -EBUSY;
+ ret = -EBUSY;
+ goto ioremap_fail;
}
prvdata->read_idx = prvdata->num_records = prvdata->len = 0;
@@ -399,23 +430,22 @@ static ssize_t rpmstats_show(struct kobject *kobj,
prvdata);
}
- return snprintf(buf, prvdata->len, prvdata->buf);
+ ret = snprintf(buf, prvdata->len, prvdata->buf);
+ iounmap(prvdata->reg_base);
+ioremap_fail:
+ kfree(prvdata);
+kmalloc_fail:
+ mutex_unlock(&rpm_stats_mutex);
+ return ret;
}
static int msm_rpmstats_create_sysfs(struct msm_rpmstats_platform_data *pd)
{
- struct kobject *module_kobj = NULL;
struct kobject *rpmstats_kobj = NULL;
struct msm_rpmstats_kobj_attr *rpms_ka = NULL;
int ret = 0;
- module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
- if (!module_kobj) {
- pr_err("%s: Cannot find module_kset\n", __func__);
- return -ENODEV;
- }
-
- rpmstats_kobj = kobject_create_and_add("rpmstats", module_kobj);
+ rpmstats_kobj = kobject_create_and_add("system_sleep", power_kobj);
if (!rpmstats_kobj) {
pr_err("%s: Cannot create rpmstats kobject\n", __func__);
ret = -ENOMEM;
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index 36804b988d62..31de6e5c173c 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -731,10 +731,6 @@ int scm_call2_atomic(u32 fn_id, struct scm_desc *desc)
x0 = fn_id | BIT(SMC_ATOMIC_SYSCALL) | scm_version_mask;
- pr_debug("scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
- x0, desc->arginfo, desc->args[0], desc->args[1],
- desc->args[2], desc->x5);
-
if (scm_version == SCM_ARMV8_64)
ret = __scm_call_armv8_64(x0, desc->arginfo, desc->args[0],
desc->args[1], desc->args[2],
@@ -746,9 +742,8 @@ int scm_call2_atomic(u32 fn_id, struct scm_desc *desc)
desc->x5, &desc->ret[0],
&desc->ret[1], &desc->ret[2]);
if (ret < 0)
- pr_err("scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
- x0, desc->arginfo, desc->args[0], desc->args[1],
- desc->args[2], desc->x5, ret, desc->ret[0],
+ pr_err("scm_call failed: func id %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+ x0, ret, desc->ret[0],
desc->ret[1], desc->ret[2]);
if (arglen > N_REGISTER_ARGS)
diff --git a/drivers/soc/qcom/smp2p_sleepstate.c b/drivers/soc/qcom/smp2p_sleepstate.c
index 34b46d25a823..2ef25e48ce50 100644
--- a/drivers/soc/qcom/smp2p_sleepstate.c
+++ b/drivers/soc/qcom/smp2p_sleepstate.c
@@ -14,6 +14,8 @@
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/suspend.h>
+#include <linux/delay.h>
+#include <linux/ipc_router.h>
#include "smp2p_private.h"
#define SET_DELAY (2 * HZ)
@@ -35,10 +37,14 @@ static int sleepstate_pm_notifier(struct notifier_block *nb,
switch (event) {
case PM_SUSPEND_PREPARE:
gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 0);
+ msleep(25); /* To be tuned based on SMP2P latencies */
+ msm_ipc_router_set_ws_allowed(true);
break;
case PM_POST_SUSPEND:
gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 1);
+ msleep(25); /* To be tuned based on SMP2P latencies */
+ msm_ipc_router_set_ws_allowed(false);
break;
}
return NOTIFY_DONE;
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 2fb1e974cc70..e11b1001d1f6 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -399,18 +399,10 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
struct lov_mds_md *lmmk = NULL;
int rc, lmm_size;
int lum_size;
- mm_segment_t seg;
if (!lsm)
return -ENODATA;
- /*
- * "Switch to kernel segment" to allow copying from kernel space by
- * copy_{to,from}_user().
- */
- seg = get_fs();
- set_fs(KERNEL_DS);
-
/* we only need the header part from user space to get lmm_magic and
* lmm_stripe_count, (the header part is common to v1 and v3) */
lum_size = sizeof(struct lov_user_md_v1);
@@ -485,6 +477,5 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
obd_free_diskmd(exp, &lmmk);
out_set:
- set_fs(seg);
return rc;
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index af301414a9f3..60743bf27f37 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1154,15 +1154,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
if (cmd->unknown_data_length) {
cmd->data_length = size;
} else if (size != cmd->data_length) {
- pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
+ pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
" %u does not match SCSI CDB Length: %u for SAM Opcode:"
" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
cmd->data_length, size, cmd->t_task_cdb[0]);
- if (cmd->data_direction == DMA_TO_DEVICE &&
- cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
- pr_err("Rejecting underflow/overflow WRITE data\n");
- return TCM_INVALID_CDB_FIELD;
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+ pr_err_ratelimited("Rejecting underflow/overflow"
+ " for WRITE data CDB\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+ /*
+ * Some fabric drivers like iscsi-target still expect to
+ * always reject overflow writes. Reject this case until
+ * full fabric driver level support for overflow writes
+ * is introduced tree-wide.
+ */
+ if (size > cmd->data_length) {
+ pr_err_ratelimited("Rejecting overflow for"
+ " WRITE control CDB\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
}
/*
* Reject READ_* or WRITE_* with overflow/underflow for
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index c78406cb3325..2df1bf69e7c9 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -3292,40 +3292,55 @@ static int qpnp_adc_tm_remove(struct platform_device *pdev)
static void qpnp_adc_tm_shutdown(struct platform_device *pdev)
{
struct qpnp_adc_tm_chip *chip = dev_get_drvdata(&pdev->dev);
- int rc = 0;
+ int rc = 0, i = 0;
u8 reg_val = 0, status1 = 0, en_ctl1 = 0;
- /* Set measurement in single measurement mode */
- reg_val = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
- rc = qpnp_adc_tm_mode_select(chip, reg_val);
- if (rc < 0)
- pr_err("adc-tm single mode select failed\n");
+ if (!chip->adc_tm_hc) {
+ /* Set measurement in single measurement mode */
+ reg_val = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT;
+ rc = qpnp_adc_tm_mode_select(chip, reg_val);
+ if (rc < 0)
+ pr_err("adc-tm single mode select failed\n");
+ }
/* Disable bank */
rc = qpnp_adc_tm_disable(chip);
if (rc < 0)
pr_err("adc-tm disable failed\n");
- /* Check if a conversion is in progress */
- rc = qpnp_adc_tm_req_sts_check(chip);
- if (rc < 0)
- pr_err("adc-tm req_sts check failed\n");
+ if (chip->adc_tm_hc) {
+ for (i = 0; i < 8; i++) {
+ rc = qpnp_adc_tm_reg_update(chip,
+ QPNP_BTM_Mn_EN(i),
+ QPNP_BTM_Mn_MEAS_EN, false);
+ if (rc < 0)
+ pr_err("multi meas disable failed\n");
+ }
+ } else {
+ /* Check if a conversion is in progress */
+ rc = qpnp_adc_tm_req_sts_check(chip);
+ if (rc < 0)
+ pr_err("adc-tm req_sts check failed\n");
- /* Disable multimeasurement */
- reg_val = 0;
- rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN, reg_val, 1);
- if (rc < 0)
- pr_err("adc-tm multi-measurement mode disable failed\n");
+ /* Disable multimeasurement */
+ reg_val = 0;
+ rc = qpnp_adc_tm_write_reg(chip,
+ QPNP_ADC_TM_MULTI_MEAS_EN, reg_val, 1);
+ if (rc < 0)
+ pr_err("adc-tm multi-meas mode disable failed\n");
- rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1);
- if (rc < 0)
- pr_err("adc-tm status1 read failed\n");
+ rc = qpnp_adc_tm_read_reg(chip,
+ QPNP_ADC_TM_STATUS1, &status1, 1);
+ if (rc < 0)
+ pr_err("adc-tm status1 read failed\n");
- rc = qpnp_adc_tm_read_reg(chip, QPNP_EN_CTL1, &en_ctl1, 1);
- if (rc < 0)
- pr_err("adc-tm en_ctl1 read failed\n");
+ rc = qpnp_adc_tm_read_reg(chip,
+ QPNP_EN_CTL1, &en_ctl1, 1);
+ if (rc < 0)
+ pr_err("adc-tm en_ctl1 read failed\n");
- pr_debug("adc-tm status1=0%x, en_ctl1=0x%x\n", status1, en_ctl1);
+ pr_debug("status1=0%x, en_ctl1=0x%x\n", status1, en_ctl1);
+ }
}
static int qpnp_adc_tm_suspend_noirq(struct device *dev)
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 88246f7e435a..0f23dda60011 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1378,9 +1378,9 @@ static struct spi_driver ifx_spi_driver = {
static void __exit ifx_spi_exit(void)
{
/* unregister */
+ spi_unregister_driver(&ifx_spi_driver);
tty_unregister_driver(tty_drv);
put_tty_driver(tty_drv);
- spi_unregister_driver(&ifx_spi_driver);
unregister_reboot_notifier(&ifx_modem_reboot_notifier_block);
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 63a06ab6ba03..235e150d7b81 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1800,11 +1800,13 @@ static int sci_startup(struct uart_port *port)
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+ sci_request_dma(port);
+
ret = sci_request_irq(s);
- if (unlikely(ret < 0))
+ if (unlikely(ret < 0)) {
+ sci_free_dma(port);
return ret;
-
- sci_request_dma(port);
+ }
spin_lock_irqsave(&port->lock, flags);
sci_start_tx(port);
@@ -1834,8 +1836,8 @@ static void sci_shutdown(struct uart_port *port)
}
#endif
- sci_free_dma(port);
sci_free_irq(s);
+ sci_free_dma(port);
}
static unsigned int sci_scbrr_calc(struct sci_port *s, unsigned int bps,
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 7cef54334b12..1bb629ab8ecc 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2070,13 +2070,12 @@ retry_open:
if (tty) {
mutex_unlock(&tty_mutex);
retval = tty_lock_interruptible(tty);
+ tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */
if (retval) {
if (retval == -EINTR)
retval = -ERESTARTSYS;
goto err_unref;
}
- /* safe to drop the kref from tty_driver_lookup_tty() */
- tty_kref_put(tty);
retval = tty_reopen(tty);
if (retval < 0) {
tty_unlock(tty);
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index d09293bc0e04..cff304abb619 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -24,10 +24,15 @@ EXPORT_SYMBOL(tty_lock);
int tty_lock_interruptible(struct tty_struct *tty)
{
+ int ret;
+
if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
return -EIO;
tty_kref_get(tty);
- return mutex_lock_interruptible(&tty->legacy_mutex);
+ ret = mutex_lock_interruptible(&tty->legacy_mutex);
+ if (ret)
+ tty_kref_put(tty);
+ return ret;
}
void __lockfunc tty_unlock(struct tty_struct *tty)
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 58c8485a0715..923379972707 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -295,7 +295,8 @@ static int ci_role_show(struct seq_file *s, void *data)
{
struct ci_hdrc *ci = s->private;
- seq_printf(s, "%s\n", ci_role(ci)->name);
+ if (ci->role != CI_ROLE_END)
+ seq_printf(s, "%s\n", ci_role(ci)->name);
return 0;
}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index d8a045fc1fdb..aff086ca97e4 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1982,6 +1982,7 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci)
int ci_hdrc_gadget_init(struct ci_hdrc *ci)
{
struct ci_role_driver *rdrv;
+ int ret;
if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
return -ENXIO;
@@ -1994,7 +1995,10 @@ int ci_hdrc_gadget_init(struct ci_hdrc *ci)
rdrv->stop = udc_id_switch_for_host;
rdrv->irq = udc_irq;
rdrv->name = "gadget";
- ci->roles[CI_ROLE_GADGET] = rdrv;
- return udc_start(ci);
+ ret = udc_start(ci);
+ if (!ret)
+ ci->roles[CI_ROLE_GADGET] = rdrv;
+
+ return ret;
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 325cbc9c35d8..b1298f093f13 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -949,6 +949,14 @@ int usb_get_bos_descriptor(struct usb_device *dev)
dev->bos->ss_id =
(struct usb_ss_container_id_descriptor *)buffer;
break;
+ case USB_CAP_TYPE_CONFIG_SUMMARY:
+ /* one such desc per configuration */
+ if (!dev->bos->num_config_summary_desc)
+ dev->bos->config_summary =
+ (struct usb_config_summary_descriptor *)buffer;
+
+ dev->bos->num_config_summary_desc++;
+ break;
default:
break;
}
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 358ca8dd784f..72d1109f13eb 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -19,6 +19,8 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v3.h>
#include "usb.h"
static inline const char *plural(int n)
@@ -40,6 +42,36 @@ static int is_activesync(struct usb_interface_descriptor *desc)
&& desc->bInterfaceProtocol == 1;
}
+static int usb_audio_max_rev_config(struct usb_host_bos *bos)
+{
+ int desc_cnt, func_cnt, numfunc;
+ int num_cfg_desc;
+ struct usb_config_summary_descriptor *conf_summary;
+
+ if (!bos || !bos->config_summary)
+ goto done;
+
+ conf_summary = bos->config_summary;
+ num_cfg_desc = bos->num_config_summary_desc;
+
+ for (desc_cnt = 0; desc_cnt < num_cfg_desc; desc_cnt++) {
+ numfunc = conf_summary->bNumFunctions;
+ for (func_cnt = 0; func_cnt < numfunc; func_cnt++) {
+ /* look for BADD 3.0 */
+ if (conf_summary->cs_info[func_cnt].bClass ==
+ USB_CLASS_AUDIO &&
+ conf_summary->cs_info[func_cnt].bProtocol ==
+ UAC_VERSION_3 &&
+ conf_summary->cs_info[func_cnt].bSubClass !=
+ FULL_ADC_PROFILE)
+ return conf_summary->bConfigurationValue;
+ }
+ }
+
+done:
+ return -EINVAL;
+}
+
int usb_choose_configuration(struct usb_device *udev)
{
int i;
@@ -130,7 +162,6 @@ int usb_choose_configuration(struct usb_device *udev)
best = c;
break;
}
-
/* If all the remaining configs are vendor-specific,
* choose the first one. */
else if (!best)
@@ -143,7 +174,10 @@ int usb_choose_configuration(struct usb_device *udev)
insufficient_power, plural(insufficient_power));
if (best) {
- i = best->desc.bConfigurationValue;
+ /* choose usb audio class preferred config if available */
+ i = usb_audio_max_rev_config(udev->bos);
+ if (i < 0)
+ i = best->desc.bConfigurationValue;
dev_dbg(&udev->dev,
"configuration #%d chosen from %d choice%s\n",
i, num_configs, plural(num_configs));
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 59d6ac67d072..9b7274821d0b 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -399,7 +399,11 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
/* Caller must hold fsg->lock */
static void wakeup_thread(struct fsg_common *common)
{
- smp_wmb(); /* ensure the write of bh->state is complete */
+ /*
+ * Ensure the reading of thread_wakeup_needed
+ * and the writing of bh->state are completed
+ */
+ smp_mb();
/* Tell the main thread that something has happened */
common->thread_wakeup_needed = 1;
if (common->thread_task)
@@ -649,7 +653,12 @@ static int sleep_thread(struct fsg_common *common, bool can_freeze)
}
__set_current_state(TASK_RUNNING);
common->thread_wakeup_needed = 0;
- smp_rmb(); /* ensure the latest bh->state is visible */
+
+ /*
+ * Ensure the writing of thread_wakeup_needed
+ * and the reading of bh->state are completed
+ */
+ smp_mb();
return rc;
}
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index df2e6f783318..527de56f832f 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -335,8 +335,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
st->global_error = 1;
}
}
- st->va += PAGE_SIZE * nr;
- st->index += nr;
+ st->va += XEN_PAGE_SIZE * nr;
+ st->index += nr / XEN_PFN_PER_PAGE;
return 0;
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 2a2e370399ba..c36a03fa7678 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3854,6 +3854,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
info->space_info_kobj, "%s",
alloc_name(found->flags));
if (ret) {
+ percpu_counter_destroy(&found->total_bytes_pinned);
kfree(found);
return ret;
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 353f4bae658c..d4a6eef31854 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2771,7 +2771,7 @@ static long btrfs_fallocate(struct file *file, int mode,
if (!ret)
ret = btrfs_prealloc_file_range(inode, mode,
range->start,
- range->len, 1 << inode->i_blkbits,
+ range->len, i_blocksize(inode),
offset + len, &alloc_hint);
list_del(&range->list);
kfree(range);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3cff6523f27d..863fa0f1972b 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7318,8 +7318,8 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
int found = false;
void **pagep = NULL;
struct page *page = NULL;
- int start_idx;
- int end_idx;
+ unsigned long start_idx;
+ unsigned long end_idx;
start_idx = start >> PAGE_CACHE_SHIFT;
diff --git a/fs/buffer.c b/fs/buffer.c
index fc35dd27cc0f..14ce7b24f32a 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2347,7 +2347,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
loff_t pos, loff_t *bytes)
{
struct inode *inode = mapping->host;
- unsigned blocksize = 1 << inode->i_blkbits;
+ unsigned int blocksize = i_blocksize(inode);
struct page *page;
void *fsdata;
pgoff_t index, curidx;
@@ -2427,8 +2427,8 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
get_block_t *get_block, loff_t *bytes)
{
struct inode *inode = mapping->host;
- unsigned blocksize = 1 << inode->i_blkbits;
- unsigned zerofrom;
+ unsigned int blocksize = i_blocksize(inode);
+ unsigned int zerofrom;
int err;
err = cont_expand_zero(file, mapping, pos, bytes);
@@ -2790,7 +2790,7 @@ int nobh_truncate_page(struct address_space *mapping,
struct buffer_head map_bh;
int err;
- blocksize = 1 << inode->i_blkbits;
+ blocksize = i_blocksize(inode);
length = offset & (blocksize - 1);
/* Block boundary? Nothing to do */
@@ -2868,7 +2868,7 @@ int block_truncate_page(struct address_space *mapping,
struct buffer_head *bh;
int err;
- blocksize = 1 << inode->i_blkbits;
+ blocksize = i_blocksize(inode);
length = offset & (blocksize - 1);
/* Block boundary? Nothing to do */
@@ -2980,7 +2980,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
struct inode *inode = mapping->host;
tmp.b_state = 0;
tmp.b_blocknr = 0;
- tmp.b_size = 1 << inode->i_blkbits;
+ tmp.b_size = i_blocksize(inode);
get_block(inode, block, &tmp, 0);
return tmp.b_blocknr;
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index b7d218a168fb..c6a1ec110c01 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -697,7 +697,7 @@ static int ceph_writepages_start(struct address_space *mapping,
struct pagevec pvec;
int done = 0;
int rc = 0;
- unsigned wsize = 1 << inode->i_blkbits;
+ unsigned int wsize = i_blocksize(inode);
struct ceph_osd_request *req = NULL;
int do_sync = 0;
loff_t snap_size, i_size;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 0f1517d0b969..7cdf8ad1bb5b 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -589,7 +589,7 @@ static int dio_set_defer_completion(struct dio *dio)
/*
* Call into the fs to map some more disk blocks. We record the current number
* of available blocks at sdio->blocks_available. These are in units of the
- * fs blocksize, (1 << inode->i_blkbits).
+ * fs blocksize, i_blocksize(inode).
*
* The fs is allowed to map lots of blocks at once. If it wants to do that,
* it uses the passed inode-relative block number as the file offset, as usual.
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 8a456f9b8a44..61d5bfc7318c 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4902,6 +4902,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
/* Zero out partial block at the edges of the range */
ret = ext4_zero_partial_blocks(handle, inode, offset, len);
+ if (ret >= 0)
+ ext4_update_inode_fsync_trans(handle, inode, 1);
if (file->f_flags & O_SYNC)
ext4_handle_sync(handle);
@@ -5597,6 +5599,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
+ ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
ext4_journal_stop(handle);
@@ -5770,6 +5773,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
+ if (ret >= 0)
+ ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
ext4_journal_stop(handle);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 0d24ebcd7c9e..8772bfc3415b 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -463,47 +463,27 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
(pgoff_t)num);
- if (nr_pages == 0) {
- if (whence == SEEK_DATA)
- break;
-
- BUG_ON(whence != SEEK_HOLE);
- /*
- * If this is the first time to go into the loop and
- * offset is not beyond the end offset, it will be a
- * hole at this offset
- */
- if (lastoff == startoff || lastoff < endoff)
- found = 1;
+ if (nr_pages == 0)
break;
- }
-
- /*
- * If this is the first time to go into the loop and
- * offset is smaller than the first page offset, it will be a
- * hole at this offset.
- */
- if (lastoff == startoff && whence == SEEK_HOLE &&
- lastoff < page_offset(pvec.pages[0])) {
- found = 1;
- break;
- }
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
struct buffer_head *bh, *head;
/*
- * If the current offset is not beyond the end of given
- * range, it will be a hole.
+ * If current offset is smaller than the page offset,
+ * there is a hole at this offset.
*/
- if (lastoff < endoff && whence == SEEK_HOLE &&
- page->index > end) {
+ if (whence == SEEK_HOLE && lastoff < endoff &&
+ lastoff < page_offset(pvec.pages[i])) {
found = 1;
*offset = lastoff;
goto out;
}
+ if (page->index > end)
+ goto out;
+
lock_page(page);
if (unlikely(page->mapping != inode->i_mapping)) {
@@ -543,20 +523,18 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
unlock_page(page);
}
- /*
- * The no. of pages is less than our desired, that would be a
- * hole in there.
- */
- if (nr_pages < num && whence == SEEK_HOLE) {
- found = 1;
- *offset = lastoff;
+ /* The no. of pages is less than our desired, we are done. */
+ if (nr_pages < num)
break;
- }
index = pvec.pages[i - 1]->index + 1;
pagevec_release(&pvec);
} while (index <= end);
+ if (whence == SEEK_HOLE && lastoff < endoff) {
+ found = 1;
+ *offset = lastoff;
+ }
out:
pagevec_release(&pvec);
return found;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 2892a799f6f8..521abc5c743d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2059,7 +2059,7 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
{
struct inode *inode = mpd->inode;
int err;
- ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
+ ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
>> inode->i_blkbits;
do {
@@ -3855,6 +3855,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
+ if (ret >= 0)
+ ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
ext4_journal_stop(handle);
out_dio:
@@ -5230,8 +5232,9 @@ static int ext4_expand_extra_isize(struct inode *inode,
/* No extended attributes present */
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
- memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
- new_extra_isize);
+ memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
+ EXT4_I(inode)->i_extra_isize, 0,
+ new_extra_isize - EXT4_I(inode)->i_extra_isize);
EXT4_I(inode)->i_extra_isize = new_extra_isize;
return 0;
}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index c21826be1cb3..3a2594665b44 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -626,9 +626,6 @@ resizefs_out:
struct ext4_encryption_policy policy;
int err = 0;
- if (!ext4_has_feature_encrypt(sb))
- return -EOPNOTSUPP;
-
if (copy_from_user(&policy,
(struct ext4_encryption_policy __user *)arg,
sizeof(policy))) {
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 7861d801b048..05048fcfd602 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -187,7 +187,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
if (PageUptodate(page))
return 0;
- blocksize = 1 << inode->i_blkbits;
+ blocksize = i_blocksize(inode);
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index f77b3258454a..7965957dd0e6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1623,6 +1623,10 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
{
+ struct f2fs_summary_block *s_sits =
+ CURSEG_I(sbi, CURSEG_COLD_DATA)->sum_blk;
+ struct f2fs_summary_block *s_nats =
+ CURSEG_I(sbi, CURSEG_HOT_DATA)->sum_blk;
int type = CURSEG_HOT_DATA;
int err;
@@ -1649,6 +1653,11 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
return err;
}
+ /* sanity check for summary blocks */
+ if (nats_in_cursum(s_nats) > NAT_JOURNAL_ENTRIES ||
+ sits_in_cursum(s_sits) > SIT_JOURNAL_ENTRIES)
+ return -EINVAL;
+
return 0;
}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index ba5e702b19b4..5d3e745d33ae 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1078,6 +1078,8 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
unsigned int total, fsmeta;
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ unsigned int main_segs, blocks_per_seg;
+ int i;
total = le32_to_cpu(raw_super->segment_count);
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
@@ -1089,6 +1091,22 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
if (unlikely(fsmeta >= total))
return 1;
+ main_segs = le32_to_cpu(sbi->raw_super->segment_count_main);
+ blocks_per_seg = sbi->blocks_per_seg;
+
+ for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
+ if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
+ le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) {
+ return 1;
+ }
+ }
+ for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
+ if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
+ le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) {
+ return 1;
+ }
+ }
+
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
return 1;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 8f9176caf098..c8d58c5ac8ae 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -758,7 +758,7 @@ static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
sb->s_blocksize - offset : toread;
tmp_bh.b_state = 0;
- tmp_bh.b_size = 1 << inode->i_blkbits;
+ tmp_bh.b_size = i_blocksize(inode);
err = jfs_get_block(inode, blk, &tmp_bh, 0);
if (err)
return err;
@@ -798,7 +798,7 @@ static ssize_t jfs_quota_write(struct super_block *sb, int type,
sb->s_blocksize - offset : towrite;
tmp_bh.b_state = 0;
- tmp_bh.b_size = 1 << inode->i_blkbits;
+ tmp_bh.b_size = i_blocksize(inode);
err = jfs_get_block(inode, blk, &tmp_bh, 1);
if (err)
goto out;
diff --git a/fs/mpage.c b/fs/mpage.c
index 0fd48fdcc1b1..f37bb01a333b 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -147,7 +147,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
SetPageUptodate(page);
return;
}
- create_empty_buffers(page, 1 << inode->i_blkbits, 0);
+ create_empty_buffers(page, i_blocksize(inode), 0);
}
head = page_buffers(page);
page_bh = head;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 52ee0b73ab4a..5b21b1ca2341 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2421,6 +2421,20 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
}
EXPORT_SYMBOL_GPL(nfs_may_open);
+static int nfs_execute_ok(struct inode *inode, int mask)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ int ret;
+
+ if (mask & MAY_NOT_BLOCK)
+ ret = nfs_revalidate_inode_rcu(server, inode);
+ else
+ ret = nfs_revalidate_inode(server, inode);
+ if (ret == 0 && !execute_ok(inode))
+ ret = -EACCES;
+ return ret;
+}
+
int nfs_permission(struct inode *inode, int mask)
{
struct rpc_cred *cred;
@@ -2438,6 +2452,9 @@ int nfs_permission(struct inode *inode, int mask)
case S_IFLNK:
goto out;
case S_IFREG:
+ if ((mask & MAY_OPEN) &&
+ nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN))
+ return 0;
break;
case S_IFDIR:
/*
@@ -2470,8 +2487,8 @@ force_lookup:
res = PTR_ERR(cred);
}
out:
- if (!res && (mask & MAY_EXEC) && !execute_ok(inode))
- res = -EACCES;
+ if (!res && (mask & MAY_EXEC))
+ res = nfs_execute_ok(inode, mask);
dfprintk(VFS, "NFS: permission(%s/%lu), mask=0x%x, res=%d\n",
inode->i_sb->s_id, inode->i_ino, mask, res);
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index c29d9421bd5e..0976f8dad4ce 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -50,7 +50,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
{
struct nfsd4_layout_seg *seg = &args->lg_seg;
struct super_block *sb = inode->i_sb;
- u32 block_size = (1 << inode->i_blkbits);
+ u32 block_size = i_blocksize(inode);
struct pnfs_block_extent *bex;
struct iomap iomap;
u32 device_generation = 0;
@@ -151,7 +151,7 @@ nfsd4_block_proc_layoutcommit(struct inode *inode,
int error;
nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout,
- lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits);
+ lcp->lc_up_len, &iomaps, i_blocksize(inode));
if (nr_iomaps < 0)
return nfserrno(nr_iomaps);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 7d5351cd67fb..209dbfc50cd4 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1690,6 +1690,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
opdesc->op_get_currentstateid(cstate, &op->u);
op->status = opdesc->op_func(rqstp, cstate, &op->u);
+ /* Only from SEQUENCE */
+ if (cstate->status == nfserr_replay_cache) {
+ dprintk("%s NFS4.1 replay from cache\n", __func__);
+ status = op->status;
+ goto out;
+ }
if (!op->status) {
if (opdesc->op_set_currentstateid)
opdesc->op_set_currentstateid(cstate, &op->u);
@@ -1700,14 +1706,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
if (need_wrongsec_check(rqstp))
op->status = check_nfsd_access(current_fh->fh_export, rqstp);
}
-
encode_op:
- /* Only from SEQUENCE */
- if (cstate->status == nfserr_replay_cache) {
- dprintk("%s NFS4.1 replay from cache\n", __func__);
- status = op->status;
- goto out;
- }
if (op->status == nfserr_replay_me) {
op->replay = &cstate->replay_owner->so_replay;
nfsd4_encode_replay(&resp->xdr, op);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index c3e1cb481fe0..3f68a25f2169 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2753,9 +2753,16 @@ out_acl:
}
#endif /* CONFIG_NFSD_PNFS */
if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
- status = nfsd4_encode_bitmap(xdr, NFSD_SUPPATTR_EXCLCREAT_WORD0,
- NFSD_SUPPATTR_EXCLCREAT_WORD1,
- NFSD_SUPPATTR_EXCLCREAT_WORD2);
+ u32 supp[3];
+
+ supp[0] = nfsd_suppattrs0(minorversion);
+ supp[1] = nfsd_suppattrs1(minorversion);
+ supp[2] = nfsd_suppattrs2(minorversion);
+ supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0;
+ supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1;
+ supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2;
+
+ status = nfsd4_encode_bitmap(xdr, supp[0], supp[1], supp[2]);
if (status)
goto out;
}
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index a35ae35e6932..cd39b57288c2 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -55,7 +55,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
brelse(bh);
BUG();
}
- memset(bh->b_data, 0, 1 << inode->i_blkbits);
+ memset(bh->b_data, 0, i_blocksize(inode));
bh->b_bdev = inode->i_sb->s_bdev;
bh->b_blocknr = blocknr;
set_buffer_mapped(bh);
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index ac2f64943ff4..00877ef0b120 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -55,7 +55,7 @@ void nilfs_inode_add_blocks(struct inode *inode, int n)
{
struct nilfs_root *root = NILFS_I(inode)->i_root;
- inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
+ inode_add_bytes(inode, i_blocksize(inode) * n);
if (root)
atomic64_add(n, &root->blocks_count);
}
@@ -64,7 +64,7 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n)
{
struct nilfs_root *root = NILFS_I(inode)->i_root;
- inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
+ inode_sub_bytes(inode, i_blocksize(inode) * n);
if (root)
atomic64_sub(n, &root->blocks_count);
}
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 1125f40233ff..612a2457243d 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -60,7 +60,7 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
set_buffer_mapped(bh);
kaddr = kmap_atomic(bh->b_page);
- memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits);
+ memset(kaddr + bh_offset(bh), 0, i_blocksize(inode));
if (init_block)
init_block(inode, bh, kaddr);
flush_dcache_page(bh->b_page);
@@ -503,7 +503,7 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
mi->mi_entry_size = entry_size;
- mi->mi_entries_per_block = (1 << inode->i_blkbits) / entry_size;
+ mi->mi_entries_per_block = i_blocksize(inode) / entry_size;
mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
}
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 3b65adaae7e4..2f27c935bd57 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -719,7 +719,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
lock_page(page);
if (!page_has_buffers(page))
- create_empty_buffers(page, 1 << inode->i_blkbits, 0);
+ create_empty_buffers(page, i_blocksize(inode), 0);
unlock_page(page);
bh = head = page_buffers(page);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index e6795c7c76a8..e4184bd2a954 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1103,7 +1103,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
int ret = 0;
struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
unsigned int block_end, block_start;
- unsigned int bsize = 1 << inode->i_blkbits;
+ unsigned int bsize = i_blocksize(inode);
if (!page_has_buffers(page))
create_empty_buffers(page, bsize, 0);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 56dd3957cc91..1d738723a41a 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -808,7 +808,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
/* We know that zero_from is block aligned */
for (block_start = zero_from; block_start < zero_to;
block_start = block_end) {
- block_end = block_start + (1 << inode->i_blkbits);
+ block_end = block_start + i_blocksize(inode);
/*
* block_start is block-aligned. Bump it by one to force
diff --git a/fs/pnode.c b/fs/pnode.c
index b5f97c605d98..e4e428d621e9 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -504,9 +504,14 @@ static struct mount *next_descendent(struct mount *root, struct mount *cur)
if (!IS_MNT_NEW(cur) && !list_empty(&cur->mnt_slave_list))
return first_slave(cur);
do {
- if (cur->mnt_slave.next != &cur->mnt_master->mnt_slave_list)
- return next_slave(cur);
- cur = cur->mnt_master;
+ struct mount *master = cur->mnt_master;
+
+ if (!master || cur->mnt_slave.next != &master->mnt_slave_list) {
+ struct mount *next = next_slave(cur);
+
+ return (next == root) ? NULL : next;
+ }
+ cur = master;
} while (cur != root);
return NULL;
}
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 8f5ccdf81c25..38187300a2b4 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -189,7 +189,7 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
int ret = 0;
th.t_trans_id = 0;
- blocksize = 1 << inode->i_blkbits;
+ blocksize = i_blocksize(inode);
if (logit) {
reiserfs_write_lock(s);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 3d8e7e671d5b..60ba35087d12 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -524,7 +524,7 @@ static int reiserfs_get_blocks_direct_io(struct inode *inode,
* referenced in convert_tail_for_hole() that may be called from
* reiserfs_get_block()
*/
- bh_result->b_size = (1 << inode->i_blkbits);
+ bh_result->b_size = i_blocksize(inode);
ret = reiserfs_get_block(inode, iblock, bh_result,
create | GET_BLOCK_NO_DANGLE);
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index 00ae21151f52..676e394e07be 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -199,7 +199,8 @@ static struct dentry *__sdcardfs_interpose(struct dentry *dentry,
ret_dentry = d_splice_alias(inode, dentry);
dentry = ret_dentry ?: dentry;
- update_derived_permission_lock(dentry);
+ if (!IS_ERR(dentry))
+ update_derived_permission_lock(dentry);
out:
return ret_dentry;
}
diff --git a/fs/stat.c b/fs/stat.c
index d4a61d8dc021..004dd77c3b93 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -31,7 +31,7 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
stat->atime = inode->i_atime;
stat->mtime = inode->i_mtime;
stat->ctime = inode->i_ctime;
- stat->blksize = (1 << inode->i_blkbits);
+ stat->blksize = i_blocksize(inode);
stat->blocks = inode->i_blocks;
}
@@ -454,6 +454,7 @@ void __inode_add_bytes(struct inode *inode, loff_t bytes)
inode->i_bytes -= 512;
}
}
+EXPORT_SYMBOL(__inode_add_bytes);
void inode_add_bytes(struct inode *inode, loff_t bytes)
{
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 566df9b5a6cb..7be3166ba553 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1206,7 +1206,7 @@ int udf_setsize(struct inode *inode, loff_t newsize)
{
int err;
struct udf_inode_info *iinfo;
- int bsize = 1 << inode->i_blkbits;
+ int bsize = i_blocksize(inode);
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index dc5fae601c24..637e17cb0edd 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -81,7 +81,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
ufs_error (sb, "ufs_free_fragments",
"bit already cleared for fragment %u", i);
}
-
+
+ inode_sub_bytes(inode, count << uspi->s_fshift);
fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
uspi->cs_total.cs_nffree += count;
fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
@@ -183,6 +184,7 @@ do_more:
ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
}
ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
+ inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
ufs_clusteracct (sb, ucpi, blkno, 1);
@@ -494,6 +496,20 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
return 0;
}
+static bool try_add_frags(struct inode *inode, unsigned frags)
+{
+ unsigned size = frags * i_blocksize(inode);
+ spin_lock(&inode->i_lock);
+ __inode_add_bytes(inode, size);
+ if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
+ __inode_sub_bytes(inode, size);
+ spin_unlock(&inode->i_lock);
+ return false;
+ }
+ spin_unlock(&inode->i_lock);
+ return true;
+}
+
static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
unsigned oldcount, unsigned newcount)
{
@@ -530,6 +546,9 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
for (i = oldcount; i < newcount; i++)
if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
return 0;
+
+ if (!try_add_frags(inode, count))
+ return 0;
/*
* Block can be extended
*/
@@ -647,6 +666,7 @@ cg_found:
ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
i = uspi->s_fpb - count;
+ inode_sub_bytes(inode, i << uspi->s_fshift);
fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
uspi->cs_total.cs_nffree += i;
fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
@@ -657,6 +677,8 @@ cg_found:
result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
if (result == INVBLOCK)
return 0;
+ if (!try_add_frags(inode, count))
+ return 0;
for (i = 0; i < count; i++)
ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
@@ -716,6 +738,8 @@ norot:
return INVBLOCK;
ucpi->c_rotor = result;
gotit:
+ if (!try_add_frags(inode, uspi->s_fpb))
+ return 0;
blkno = ufs_fragstoblks(result);
ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index a064cf44b143..1f69bb9b1e9d 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -235,7 +235,8 @@ ufs_extend_tail(struct inode *inode, u64 writes_to,
p = ufs_get_direct_data_ptr(uspi, ufsi, block);
tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
- new_size, err, locked_page);
+ new_size - (lastfrag & uspi->s_fpbmask), err,
+ locked_page);
return tmp != 0;
}
@@ -284,7 +285,7 @@ ufs_inode_getfrag(struct inode *inode, unsigned index,
goal += uspi->s_fpb;
}
tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
- goal, uspi->s_fpb, err, locked_page);
+ goal, nfrags, err, locked_page);
if (!tmp) {
*err = -ENOSPC;
@@ -402,7 +403,9 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
if (!create) {
phys64 = ufs_frag_map(inode, offsets, depth);
- goto out;
+ if (phys64)
+ map_bh(bh_result, sb, phys64 + frag);
+ return 0;
}
/* This code entered only while writing ....? */
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index f6390eec02ca..10f364490833 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -746,6 +746,23 @@ static void ufs_put_super(struct super_block *sb)
return;
}
+static u64 ufs_max_bytes(struct super_block *sb)
+{
+ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+ int bits = uspi->s_apbshift;
+ u64 res;
+
+ if (bits > 21)
+ res = ~0ULL;
+ else
+ res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) +
+ (1LL << (3*bits));
+
+ if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift))
+ return MAX_LFS_FILESIZE;
+ return res << uspi->s_bshift;
+}
+
static int ufs_fill_super(struct super_block *sb, void *data, int silent)
{
struct ufs_sb_info * sbi;
@@ -1212,6 +1229,7 @@ magic_found:
"fast symlink size (%u)\n", uspi->s_maxsymlinklen);
uspi->s_maxsymlinklen = maxsymlen;
}
+ sb->s_maxbytes = ufs_max_bytes(sb);
sb->s_max_links = UFS_LINK_MAX;
inode = ufs_iget(sb, UFS_ROOTINO);
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 954175928240..3f9463f8cf2f 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -473,15 +473,19 @@ static inline unsigned _ubh_find_last_zero_bit_(
static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
{
+ u8 mask;
switch (uspi->s_fpb) {
case 8:
return (*ubh_get_addr (ubh, begin + block) == 0xff);
case 4:
- return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2)));
+ mask = 0x0f << ((block & 0x01) << 2);
+ return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask;
case 2:
- return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1)));
+ mask = 0x03 << ((block & 0x03) << 1);
+ return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask;
case 1:
- return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07)));
+ mask = 0x01 << (block & 0x07);
+ return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask;
}
return 0;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 119c2422aac7..75884aecf920 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -2179,8 +2179,10 @@ xfs_bmap_add_extent_delay_real(
}
temp = xfs_bmap_worst_indlen(bma->ip, temp);
temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
- diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
- (bma->cur ? bma->cur->bc_private.b.allocated : 0));
+ diff = (int)(temp + temp2 -
+ (startblockval(PREV.br_startblock) -
+ (bma->cur ?
+ bma->cur->bc_private.b.allocated : 0)));
if (diff > 0) {
error = xfs_mod_fdblocks(bma->ip->i_mount,
-((int64_t)diff), false);
@@ -2232,7 +2234,6 @@ xfs_bmap_add_extent_delay_real(
temp = da_new;
if (bma->cur)
temp += bma->cur->bc_private.b.allocated;
- ASSERT(temp <= da_old);
if (temp < da_old)
xfs_mod_fdblocks(bma->ip->i_mount,
(int64_t)(da_old - temp), false);
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index af1bbee5586e..28bc5e78b110 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -4064,7 +4064,7 @@ xfs_btree_change_owner(
xfs_btree_readahead_ptr(cur, ptr, 1);
/* save for the next iteration of the loop */
- lptr = *ptr;
+ xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
}
/* for each buffer in the level */
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 29e7e5dd5178..187b80267ff9 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -288,7 +288,7 @@ xfs_map_blocks(
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
- ssize_t count = 1 << inode->i_blkbits;
+ ssize_t count = i_blocksize(inode);
xfs_fileoff_t offset_fsb, end_fsb;
int error = 0;
int bmapi_flags = XFS_BMAPI_ENTIRE;
@@ -921,7 +921,7 @@ xfs_aops_discard_page(
break;
}
next_buffer:
- offset += 1 << inode->i_blkbits;
+ offset += i_blocksize(inode);
} while ((bh = bh->b_this_page) != head);
@@ -1363,7 +1363,7 @@ xfs_map_trim_size(
offset + mapping_size >= i_size_read(inode)) {
/* limit mapping to block that spans EOF */
mapping_size = roundup_64(i_size_read(inode) - offset,
- 1 << inode->i_blkbits);
+ i_blocksize(inode));
}
if (mapping_size > LONG_MAX)
mapping_size = LONG_MAX;
@@ -1395,7 +1395,7 @@ __xfs_get_blocks(
return -EIO;
offset = (xfs_off_t)iblock << inode->i_blkbits;
- ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
+ ASSERT(bh_result->b_size >= i_blocksize(inode));
size = bh_result->b_size;
if (!create && direct && offset >= i_size_read(inode))
@@ -1968,7 +1968,7 @@ xfs_vm_set_page_dirty(
if (offset < end_offset)
set_buffer_dirty(bh);
bh = bh->b_this_page;
- offset += 1 << inode->i_blkbits;
+ offset += i_blocksize(inode);
} while (bh != head);
}
/*
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index dd4824589470..234331227c0c 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -112,6 +112,7 @@ typedef struct attrlist_cursor_kern {
*========================================================================*/
+/* Return 0 on success, or -errno; other state communicated via *context */
typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, int,
unsigned char *, int, int, unsigned char *);
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 4fa14820e2e2..c8be331a3196 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -108,16 +108,14 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
(int)sfe->namelen,
(int)sfe->valuelen,
&sfe->nameval[sfe->namelen]);
-
+ if (error)
+ return error;
/*
* Either search callback finished early or
* didn't fit it all in the buffer after all.
*/
if (context->seen_enough)
break;
-
- if (error)
- return error;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
}
trace_xfs_attr_list_sf_all(context);
@@ -581,7 +579,7 @@ xfs_attr_put_listent(
trace_xfs_attr_list_full(context);
alist->al_more = 1;
context->seen_enough = 1;
- return 1;
+ return 0;
}
aep = (attrlist_ent_t *)&context->alist[context->firstu];
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 832764ee035a..863e1bff403b 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -682,7 +682,7 @@ xfs_getbmap(
* extents.
*/
if (map[i].br_startblock == DELAYSTARTBLOCK &&
- map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
+ map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
ASSERT((iflags & BMV_IF_DELALLOC) != 0);
if (map[i].br_startblock == HOLESTARTBLOCK &&
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 8146b0cf20ce..dcb70969ff1c 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -979,6 +979,8 @@ void
xfs_buf_unlock(
struct xfs_buf *bp)
{
+ ASSERT(xfs_buf_islocked(bp));
+
XB_CLEAR_OWNER(bp);
up(&bp->b_sema);
@@ -1713,6 +1715,28 @@ error:
}
/*
+ * Cancel a delayed write list.
+ *
+ * Remove each buffer from the list, clear the delwri queue flag and drop the
+ * associated buffer reference.
+ */
+void
+xfs_buf_delwri_cancel(
+ struct list_head *list)
+{
+ struct xfs_buf *bp;
+
+ while (!list_empty(list)) {
+ bp = list_first_entry(list, struct xfs_buf, b_list);
+
+ xfs_buf_lock(bp);
+ bp->b_flags &= ~_XBF_DELWRI_Q;
+ list_del_init(&bp->b_list);
+ xfs_buf_relse(bp);
+ }
+}
+
+/*
* Add a buffer to the delayed write list.
*
* This queues a buffer for writeout if it hasn't already been. Note that
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index c75721acd867..149bbd451731 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -304,6 +304,7 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
extern void *xfs_buf_offset(struct xfs_buf *, size_t);
/* Delayed Write Buffer Routines */
+extern void xfs_buf_delwri_cancel(struct list_head *);
extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
extern int xfs_buf_delwri_submit(struct list_head *);
extern int xfs_buf_delwri_submit_nowait(struct list_head *);
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 642d55d10075..2fbf643fa10a 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -406,6 +406,7 @@ xfs_dir2_leaf_readbuf(
/*
* Do we need more readahead?
+ * Each loop tries to process 1 full dir blk; last may be partial.
*/
blk_start_plug(&plug);
for (mip->ra_index = mip->ra_offset = i = 0;
@@ -416,7 +417,8 @@ xfs_dir2_leaf_readbuf(
* Read-ahead a contiguous directory block.
*/
if (i > mip->ra_current &&
- map[mip->ra_index].br_blockcount >= geo->fsbcount) {
+ (map[mip->ra_index].br_blockcount - mip->ra_offset) >=
+ geo->fsbcount) {
xfs_dir3_data_readahead(dp,
map[mip->ra_index].br_startoff + mip->ra_offset,
XFS_FSB_TO_DADDR(dp->i_mount,
@@ -437,14 +439,19 @@ xfs_dir2_leaf_readbuf(
}
/*
- * Advance offset through the mapping table.
+ * Advance offset through the mapping table, processing a full
+ * dir block even if it is fragmented into several extents.
+ * But stop if we have consumed all valid mappings, even if
+ * it's not yet a full directory block.
*/
- for (j = 0; j < geo->fsbcount; j += length ) {
+ for (j = 0;
+ j < geo->fsbcount && mip->ra_index < mip->map_valid;
+ j += length ) {
/*
* The rest of this extent but not more than a dir
* block.
*/
- length = min_t(int, geo->fsbcount,
+ length = min_t(int, geo->fsbcount - j,
map[mip->ra_index].br_blockcount -
mip->ra_offset);
mip->ra_offset += length;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index f5392ab2def1..3dd47307363f 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -947,7 +947,7 @@ xfs_file_fallocate(
if (error)
goto out_unlock;
} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
- unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
+ unsigned int blksize_mask = i_blocksize(inode) - 1;
if (offset & blksize_mask || len & blksize_mask) {
error = -EINVAL;
@@ -969,7 +969,7 @@ xfs_file_fallocate(
if (error)
goto out_unlock;
} else if (mode & FALLOC_FL_INSERT_RANGE) {
- unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
+ unsigned int blksize_mask = i_blocksize(inode) - 1;
new_size = i_size_read(inode) + len;
if (offset & blksize_mask || len & blksize_mask) {
@@ -1208,7 +1208,7 @@ xfs_find_get_desired_pgoff(
unsigned nr_pages;
unsigned int i;
- want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+ want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
want);
/*
@@ -1235,17 +1235,6 @@ xfs_find_get_desired_pgoff(
break;
}
- /*
- * At lease we found one page. If this is the first time we
- * step into the loop, and if the first page index offset is
- * greater than the given search offset, a hole was found.
- */
- if (type == HOLE_OFF && lastoff == startoff &&
- lastoff < page_offset(pvec.pages[0])) {
- found = true;
- break;
- }
-
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
loff_t b_offset;
@@ -1257,18 +1246,18 @@ xfs_find_get_desired_pgoff(
* file mapping. However, page->index will not change
* because we have a reference on the page.
*
- * Searching done if the page index is out of range.
- * If the current offset is not reaches the end of
- * the specified search range, there should be a hole
- * between them.
+ * If current page offset is beyond where we've ended,
+ * we've found a hole.
*/
- if (page->index > end) {
- if (type == HOLE_OFF && lastoff < endoff) {
- *offset = lastoff;
- found = true;
- }
+ if (type == HOLE_OFF && lastoff < endoff &&
+ lastoff < page_offset(pvec.pages[i])) {
+ found = true;
+ *offset = lastoff;
goto out;
}
+ /* Searching done if the page index is out of range. */
+ if (page->index > end)
+ goto out;
lock_page(page);
/*
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index d7a490f24ead..adbc1f59969a 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -210,14 +210,17 @@ xfs_iget_cache_hit(
error = inode_init_always(mp->m_super, inode);
if (error) {
+ bool wake;
/*
* Re-initializing the inode failed, and we are in deep
* trouble. Try to re-add it to the reclaim list.
*/
rcu_read_lock();
spin_lock(&ip->i_flags_lock);
-
+ wake = !!__xfs_iflags_test(ip, XFS_INEW);
ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
+ if (wake)
+ wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
trace_xfs_iget_reclaim_fail(ip);
goto out_error;
@@ -363,6 +366,22 @@ out_destroy:
return error;
}
+static void
+xfs_inew_wait(
+ struct xfs_inode *ip)
+{
+ wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
+ DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
+
+ do {
+ prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+ if (!xfs_iflags_test(ip, XFS_INEW))
+ break;
+ schedule();
+ } while (true);
+ finish_wait(wq, &wait.wait);
+}
+
/*
* Look up an inode by number in the given file system.
* The inode is looked up in the cache held in each AG.
@@ -467,9 +486,11 @@ out_error_or_again:
STATIC int
xfs_inode_ag_walk_grab(
- struct xfs_inode *ip)
+ struct xfs_inode *ip,
+ int flags)
{
struct inode *inode = VFS_I(ip);
+ bool newinos = !!(flags & XFS_AGITER_INEW_WAIT);
ASSERT(rcu_read_lock_held());
@@ -487,7 +508,8 @@ xfs_inode_ag_walk_grab(
goto out_unlock_noent;
/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
- if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
+ if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
+ __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
goto out_unlock_noent;
spin_unlock(&ip->i_flags_lock);
@@ -515,7 +537,8 @@ xfs_inode_ag_walk(
void *args),
int flags,
void *args,
- int tag)
+ int tag,
+ int iter_flags)
{
uint32_t first_index;
int last_error = 0;
@@ -557,7 +580,7 @@ restart:
for (i = 0; i < nr_found; i++) {
struct xfs_inode *ip = batch[i];
- if (done || xfs_inode_ag_walk_grab(ip))
+ if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
batch[i] = NULL;
/*
@@ -585,6 +608,9 @@ restart:
for (i = 0; i < nr_found; i++) {
if (!batch[i])
continue;
+ if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
+ xfs_iflags_test(batch[i], XFS_INEW))
+ xfs_inew_wait(batch[i]);
error = execute(batch[i], flags, args);
IRELE(batch[i]);
if (error == -EAGAIN) {
@@ -637,12 +663,13 @@ xfs_eofblocks_worker(
}
int
-xfs_inode_ag_iterator(
+xfs_inode_ag_iterator_flags(
struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags,
void *args),
int flags,
- void *args)
+ void *args,
+ int iter_flags)
{
struct xfs_perag *pag;
int error = 0;
@@ -652,7 +679,8 @@ xfs_inode_ag_iterator(
ag = 0;
while ((pag = xfs_perag_get(mp, ag))) {
ag = pag->pag_agno + 1;
- error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
+ error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
+ iter_flags);
xfs_perag_put(pag);
if (error) {
last_error = error;
@@ -664,6 +692,17 @@ xfs_inode_ag_iterator(
}
int
+xfs_inode_ag_iterator(
+ struct xfs_mount *mp,
+ int (*execute)(struct xfs_inode *ip, int flags,
+ void *args),
+ int flags,
+ void *args)
+{
+ return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
+}
+
+int
xfs_inode_ag_iterator_tag(
struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags,
@@ -680,7 +719,8 @@ xfs_inode_ag_iterator_tag(
ag = 0;
while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
ag = pag->pag_agno + 1;
- error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
+ error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
+ 0);
xfs_perag_put(pag);
if (error) {
last_error = error;
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index 62f1f91c32cb..147a79212e63 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -48,6 +48,11 @@ struct xfs_eofblocks {
#define XFS_IGET_UNTRUSTED 0x2
#define XFS_IGET_DONTCACHE 0x4
+/*
+ * flags for AG inode iterator
+ */
+#define XFS_AGITER_INEW_WAIT 0x1 /* wait on new inodes */
+
int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino,
uint flags, uint lock_flags, xfs_inode_t **ipp);
@@ -72,6 +77,9 @@ void xfs_eofblocks_worker(struct work_struct *);
int xfs_inode_ag_iterator(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags, void *args),
int flags, void *args);
+int xfs_inode_ag_iterator_flags(struct xfs_mount *mp,
+ int (*execute)(struct xfs_inode *ip, int flags, void *args),
+ int flags, void *args, int iter_flags);
int xfs_inode_ag_iterator_tag(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags, void *args),
int flags, void *args, int tag);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index ca9e11989cbd..ae1a49845744 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -208,7 +208,8 @@ xfs_get_initial_prid(struct xfs_inode *dp)
#define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */
#define XFS_ISTALE (1 << 1) /* inode has been staled */
#define XFS_IRECLAIMABLE (1 << 2) /* inode can be reclaimed */
-#define XFS_INEW (1 << 3) /* inode has just been allocated */
+#define __XFS_INEW_BIT 3 /* inode has just been allocated */
+#define XFS_INEW (1 << __XFS_INEW_BIT)
#define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */
#define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */
#define __XFS_IFLOCK_BIT 7 /* inode is being flushed right now */
@@ -453,6 +454,7 @@ static inline void xfs_finish_inode_setup(struct xfs_inode *ip)
xfs_iflags_clear(ip, XFS_INEW);
barrier();
unlock_new_inode(VFS_I(ip));
+ wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
}
static inline void xfs_setup_existing_inode(struct xfs_inode *ip)
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index d42738deec6d..e4a4f82ea13f 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -403,6 +403,7 @@ xfs_attrlist_by_handle(
{
int error = -ENOMEM;
attrlist_cursor_kern_t *cursor;
+ struct xfs_fsop_attrlist_handlereq __user *p = arg;
xfs_fsop_attrlist_handlereq_t al_hreq;
struct dentry *dentry;
char *kbuf;
@@ -435,6 +436,11 @@ xfs_attrlist_by_handle(
if (error)
goto out_kfree;
+ if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
+ error = -EFAULT;
+ goto out_kfree;
+ }
+
if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
error = -EFAULT;
@@ -1379,10 +1385,11 @@ xfs_ioc_getbmap(
unsigned int cmd,
void __user *arg)
{
- struct getbmapx bmx;
+ struct getbmapx bmx = { 0 };
int error;
- if (copy_from_user(&bmx, arg, sizeof(struct getbmapx)))
+ /* struct getbmap is a strict subset of struct getbmapx. */
+ if (copy_from_user(&bmx, arg, offsetof(struct getbmapx, bmv_iflags)))
return -EFAULT;
if (bmx.bmv_count < 2)
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 532ab79d38fe..572b64a135b3 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1355,12 +1355,7 @@ xfs_qm_quotacheck(
mp->m_qflags |= flags;
error_return:
- while (!list_empty(&buffer_list)) {
- struct xfs_buf *bp =
- list_first_entry(&buffer_list, struct xfs_buf, b_list);
- list_del_init(&bp->b_list);
- xfs_buf_relse(bp);
- }
+ xfs_buf_delwri_cancel(&buffer_list);
if (error) {
xfs_warn(mp,
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 3640c6e896af..4d334440bd94 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -764,5 +764,6 @@ xfs_qm_dqrele_all_inodes(
uint flags)
{
ASSERT(mp->m_quotainfo);
- xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, NULL);
+ xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL,
+ XFS_AGITER_INEW_WAIT);
}
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index 839b35ca21c6..9beaf192b4bb 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -180,7 +180,8 @@ xfs_xattr_put_listent(
arraytop = context->count + prefix_len + namelen + 1;
if (arraytop > context->firstu) {
context->count = -1; /* insufficient space */
- return 1;
+ context->seen_enough = 1;
+ return 0;
}
offset = (char *)context->alist + context->count;
strncpy(offset, xfs_xattr_prefix(flags), prefix_len);
@@ -222,12 +223,15 @@ list_one_attr(const char *name, const size_t len, void *data,
}
ssize_t
-xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
+xfs_vn_listxattr(
+ struct dentry *dentry,
+ char *data,
+ size_t size)
{
struct xfs_attr_list_context context;
struct attrlist_cursor_kern cursor = { 0 };
- struct inode *inode = d_inode(dentry);
- int error;
+ struct inode *inode = d_inode(dentry);
+ int error;
/*
* First read the regular on-disk attributes.
@@ -245,7 +249,9 @@ xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
else
context.put_listent = xfs_xattr_put_listent_sizes;
- xfs_attr_list_int(&context);
+ error = xfs_attr_list_int(&context);
+ if (error)
+ return error;
if (context.count < 0)
return -ERANGE;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ad2bcf647b9a..210ccc4ea44b 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -340,6 +340,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css)
}
/**
+ * css_is_dying - test whether the specified css is dying
+ * @css: target css
+ *
+ * Test whether @css is in the process of offlining or already offline. In
+ * most cases, ->css_online() and ->css_offline() callbacks should be
+ * enough; however, the actual offline operations are RCU delayed and this
+ * test returns %true also when @css is scheduled to be offlined.
+ *
+ * This is useful, for example, when the use case requires synchronous
+ * behavior with respect to cgroup removal. cgroup removal schedules css
+ * offlining but the css can seem alive while the operation is being
+ * delayed. If the delay affects user visible semantics, this test can be
+ * used to resolve the situation.
+ */
+static inline bool css_is_dying(struct cgroup_subsys_state *css)
+{
+ return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
+}
+
+/**
* css_put - put a css reference
* @css: target css
*
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index fe865e627528..5daa9e78584c 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -107,6 +107,22 @@ struct cpufreq_policy {
*/
struct rw_semaphore rwsem;
+
+ /*
+ * Fast switch flags:
+ * - fast_switch_possible should be set by the driver if it can
+ * guarantee that frequency can be changed on any CPU sharing the
+ * policy and that the change will affect all of the policy CPUs then.
+ * - fast_switch_enabled is to be set by governors that support fast
+ * freqnency switching with the help of cpufreq_enable_fast_switch().
+ */
+ bool fast_switch_possible;
+ bool fast_switch_enabled;
+
+ /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
+ unsigned int cached_target_freq;
+ int cached_resolved_idx;
+
/* Synchronization for frequency transitions */
bool transition_ongoing; /* Tracks transition status */
spinlock_t transition_lock;
@@ -485,6 +501,8 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
+unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
+ unsigned int target_freq);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
@@ -516,8 +534,42 @@ extern struct cpufreq_governor cpufreq_gov_interactive;
#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED)
extern struct cpufreq_governor cpufreq_gov_sched;
#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_sched)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL)
+extern struct cpufreq_governor cpufreq_gov_schedutil;
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_schedutil)
#endif
+static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
+{
+ if (policy->max < policy->cur)
+ __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > policy->cur)
+ __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+}
+
+/* Governor attribute set */
+struct gov_attr_set {
+ struct kobject kobj;
+ struct list_head policy_list;
+ struct mutex update_lock;
+ int usage_count;
+};
+
+/* sysfs ops for cpufreq governors */
+extern const struct sysfs_ops governor_sysfs_ops;
+
+void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
+void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
+unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
+
+/* Governor sysfs attribute */
+struct governor_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct gov_attr_set *attr_set, char *buf);
+ ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf,
+ size_t count);
+};
+
/*********************************************************************
* FREQUENCY TABLE HELPERS *
*********************************************************************/
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 21a0917119ce..7c92113e20c3 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -144,7 +144,7 @@ the appropriate macros. */
/* This needs to be modified manually now, when we add
a new RANGE of SSIDs to the msg_mask_tbl */
-#define MSG_MASK_TBL_CNT 25
+#define MSG_MASK_TBL_CNT 26
#define APPS_EVENT_LAST_ID 0x0B3F
#define MSG_SSID_0 0
@@ -195,8 +195,10 @@ the appropriate macros. */
#define MSG_SSID_22_LAST 10377
#define MSG_SSID_23 10400
#define MSG_SSID_23_LAST 10416
-#define MSG_SSID_24 0xC000
-#define MSG_SSID_24_LAST 0xC063
+#define MSG_SSID_24 10500
+#define MSG_SSID_24_LAST 10505
+#define MSG_SSID_25 0xC000
+#define MSG_SSID_25_LAST 0xC063
static const uint32_t msg_bld_masks_0[] = {
MSG_LVL_LOW,
@@ -857,6 +859,19 @@ static const uint32_t msg_bld_masks_23[] = {
MSG_LVL_LOW
};
+static const uint32_t msg_bld_masks_24[] = {
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH
+};
+
+static const uint32_t msg_bld_masks_25[] = {
+ MSG_LVL_LOW
+};
+
/* LOG CODES */
static const uint32_t log_code_last_tbl[] = {
0x0, /* EQUIP ID 0 */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 28c8f7038ae0..4b27be2038e3 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -689,6 +689,11 @@ struct inode {
void *i_private; /* fs or device private pointer */
};
+static inline unsigned int i_blocksize(const struct inode *node)
+{
+ return (1 << node->i_blkbits);
+}
+
static inline int inode_unhashed(struct inode *inode)
{
return hlist_unhashed(&inode->i_hash);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 67ce5bd3b56a..19db03dbbd00 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -616,15 +616,16 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
netdev_features_t features)
{
- if (skb_vlan_tagged_multi(skb))
- features = netdev_intersect_features(features,
- NETIF_F_SG |
- NETIF_F_HIGHDMA |
- NETIF_F_FRAGLIST |
- NETIF_F_GEN_CSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX);
-
+ if (skb_vlan_tagged_multi(skb)) {
+ /* In the case of multi-tagged packets, use a direct mask
+ * instead of using netdev_interesect_features(), to make
+ * sure that only devices supporting NETIF_F_HW_CSUM will
+ * have checksum offloading support.
+ */
+ features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
+ NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ }
return features;
}
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index ad16809c8596..b3b1af8a8f8c 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -423,6 +423,12 @@ enum
};
#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
+/* Softirq's where the handling might be long: */
+#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ) | \
+ (1 << NET_RX_SOFTIRQ) | \
+ (1 << BLOCK_SOFTIRQ) | \
+ (1 << BLOCK_IOPOLL_SOFTIRQ) | \
+ (1 << TASKLET_SOFTIRQ))
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
@@ -458,6 +464,7 @@ extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+DECLARE_PER_CPU(__u32, active_softirqs);
static inline struct task_struct *this_cpu_ksoftirqd(void)
{
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index e691b6a23f72..4289343ba1f9 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -75,6 +75,8 @@ struct kthread_work {
struct list_head node;
kthread_work_func_t func;
struct kthread_worker *worker;
+ /* Number of canceling calls that are running at the moment. */
+ int canceling;
};
#define KTHREAD_WORKER_INIT(worker) { \
@@ -129,4 +131,6 @@ bool queue_kthread_work(struct kthread_worker *worker,
void flush_kthread_work(struct kthread_work *work);
void flush_kthread_worker(struct kthread_worker *worker);
+bool kthread_cancel_work_sync(struct kthread_work *work);
+
#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index ab93174fa639..d4b56351027b 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -424,12 +424,20 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
}
#endif
+extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+ phys_addr_t end_addr);
#else
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
{
return 0;
}
+static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+ phys_addr_t end_addr)
+{
+ return 0;
+}
+
#endif /* CONFIG_HAVE_MEMBLOCK */
#endif /* __KERNEL__ */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ad7f915ddf76..9d1161a8d6b7 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -712,6 +712,7 @@ typedef struct pglist_data {
* is the first PFN that needs to be initialised.
*/
unsigned long first_deferred_pfn;
+ unsigned long static_init_size;
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
} pg_data_t;
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index e13bfdf7f314..81fdf4b8aba4 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -50,7 +50,8 @@ extern int ptrace_request(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
extern void ptrace_notify(int exit_code);
extern void __ptrace_link(struct task_struct *child,
- struct task_struct *new_parent);
+ struct task_struct *new_parent,
+ const struct cred *ptracer_cred);
extern void __ptrace_unlink(struct task_struct *child);
extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_READ 0x01
@@ -202,7 +203,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
if (unlikely(ptrace) && current->ptrace) {
child->ptrace = current->ptrace;
- __ptrace_link(child, current->parent);
+ __ptrace_link(child, current->parent, current->ptracer_cred);
if (child->ptrace & PT_SEIZED)
task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
@@ -211,6 +212,8 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
set_tsk_thread_flag(child, TIF_SIGPENDING);
}
+ else
+ child->ptracer_cred = NULL;
}
/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 138fcf72508a..57042d91ae9c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1072,7 +1072,8 @@ extern void wake_up_q(struct wake_q_head *head);
#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
-#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */
+#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */
+#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */
#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
@@ -1133,6 +1134,37 @@ unsigned long capacity_curr_of(int cpu);
struct sched_group;
+struct eas_stats {
+ /* select_idle_sibling() stats */
+ u64 sis_attempts;
+ u64 sis_idle;
+ u64 sis_cache_affine;
+ u64 sis_suff_cap;
+ u64 sis_idle_cpu;
+ u64 sis_count;
+
+ /* select_energy_cpu_brute() stats */
+ u64 secb_attempts;
+ u64 secb_sync;
+ u64 secb_idle_bt;
+ u64 secb_insuff_cap;
+ u64 secb_no_nrg_sav;
+ u64 secb_nrg_sav;
+ u64 secb_count;
+
+ /* find_best_target() stats */
+ u64 fbt_attempts;
+ u64 fbt_no_cpu;
+ u64 fbt_no_sd;
+ u64 fbt_pref_idle;
+ u64 fbt_count;
+
+ /* cas */
+ /* select_task_rq_fair() stats */
+ u64 cas_attempts;
+ u64 cas_count;
+};
+
struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */
@@ -1193,6 +1225,8 @@ struct sched_domain {
unsigned int ttwu_wake_remote;
unsigned int ttwu_move_affine;
unsigned int ttwu_move_balance;
+
+ struct eas_stats eas_stats;
#endif
#ifdef CONFIG_SCHED_DEBUG
char *name;
@@ -1351,6 +1385,35 @@ struct sched_statistics {
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
+
+ /* select_idle_sibling() */
+ u64 nr_wakeups_sis_attempts;
+ u64 nr_wakeups_sis_idle;
+ u64 nr_wakeups_sis_cache_affine;
+ u64 nr_wakeups_sis_suff_cap;
+ u64 nr_wakeups_sis_idle_cpu;
+ u64 nr_wakeups_sis_count;
+
+ /* energy_aware_wake_cpu() */
+ u64 nr_wakeups_secb_attempts;
+ u64 nr_wakeups_secb_sync;
+ u64 nr_wakeups_secb_idle_bt;
+ u64 nr_wakeups_secb_insuff_cap;
+ u64 nr_wakeups_secb_no_nrg_sav;
+ u64 nr_wakeups_secb_nrg_sav;
+ u64 nr_wakeups_secb_count;
+
+ /* find_best_target() */
+ u64 nr_wakeups_fbt_attempts;
+ u64 nr_wakeups_fbt_no_cpu;
+ u64 nr_wakeups_fbt_no_sd;
+ u64 nr_wakeups_fbt_pref_idle;
+ u64 nr_wakeups_fbt_count;
+
+ /* cas */
+ /* select_task_rq_fair() */
+ u64 nr_wakeups_cas_attempts;
+ u64 nr_wakeups_cas_count;
};
#endif
@@ -3512,4 +3575,19 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit);
}
+#define SCHED_CPUFREQ_RT (1U << 0)
+#define SCHED_CPUFREQ_DL (1U << 1)
+#define SCHED_CPUFREQ_IOWAIT (1U << 2)
+
+#ifdef CONFIG_CPU_FREQ
+struct update_util_data {
+ void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
+};
+
+void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
+ void (*func)(struct update_util_data *data, u64 time,
+ unsigned int flags));
+void cpufreq_remove_update_util_hook(int cpu);
+#endif /* CONFIG_CPU_FREQ */
+
#endif
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index ef8a092251aa..128c4a8c9979 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -39,7 +39,6 @@ extern unsigned int sysctl_sched_latency;
extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
-extern unsigned int sysctl_sched_is_big_little;
extern unsigned int sysctl_sched_sync_hint_enable;
extern unsigned int sysctl_sched_initial_task_util;
extern unsigned int sysctl_sched_cstate_aware;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d443d9ab0236..3f61c647fc5c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1084,9 +1084,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
static inline void skb_sender_cpu_clear(struct sk_buff *skb)
{
-#ifdef CONFIG_XPS
- skb->sender_cpu = 0;
-#endif
}
#ifdef NET_SKBUFF_DATA_USES_OFFSET
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a55f127d6836..e8b2ed4ad851 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -330,6 +330,8 @@ struct usb_host_bos {
struct usb_ss_cap_descriptor *ss_cap;
struct usb_ssp_cap_descriptor *ssp_cap;
struct usb_ss_container_id_descriptor *ss_id;
+ struct usb_config_summary_descriptor *config_summary;
+ unsigned int num_config_summary_desc;
};
int __usb_get_extra_descriptor(char *buffer, unsigned size,
diff --git a/include/net/dst.h b/include/net/dst.h
index c7329dcd90cc..e4f450617919 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -110,10 +110,16 @@ struct dst_entry {
};
};
+struct dst_metrics {
+ u32 metrics[RTAX_MAX];
+ atomic_t refcnt;
+};
+extern const struct dst_metrics dst_default_metrics;
+
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
-extern const u32 dst_default_metrics[];
#define DST_METRICS_READ_ONLY 0x1UL
+#define DST_METRICS_REFCOUNTED 0x2UL
#define DST_METRICS_FLAGS 0x3UL
#define __DST_METRICS_PTR(Y) \
((u32 *)((Y) & ~DST_METRICS_FLAGS))
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 3f98233388fb..bda1721e9622 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -112,11 +112,11 @@ struct fib_info {
unsigned char fib_type;
__be32 fib_prefsrc;
u32 fib_priority;
- u32 *fib_metrics;
-#define fib_mtu fib_metrics[RTAX_MTU-1]
-#define fib_window fib_metrics[RTAX_WINDOW-1]
-#define fib_rtt fib_metrics[RTAX_RTT-1]
-#define fib_advmss fib_metrics[RTAX_ADVMSS-1]
+ struct dst_metrics *fib_metrics;
+#define fib_mtu fib_metrics->metrics[RTAX_MTU-1]
+#define fib_window fib_metrics->metrics[RTAX_WINDOW-1]
+#define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
+#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
int fib_nhs;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_weight;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 9a5c9f013784..ad1d6039185d 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -958,6 +958,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
*/
extern const struct proto_ops inet6_stream_ops;
extern const struct proto_ops inet6_dgram_ops;
+extern const struct proto_ops inet6_sockraw_ops;
struct group_source_req;
struct group_filter;
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 9a1ff42a377e..84f7f2139a91 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -1458,14 +1458,21 @@ TRACE_EVENT(sched_contrib_scale_f,
#ifdef CONFIG_SMP
+#ifdef CONFIG_SCHED_WALT
+extern unsigned int sysctl_sched_use_walt_cpu_util;
+extern unsigned int sysctl_sched_use_walt_task_util;
+extern unsigned int walt_ravg_window;
+extern unsigned int walt_disabled;
+#endif
+
/*
* Tracepoint for accounting sched averages for tasks.
*/
TRACE_EVENT(sched_load_avg_task,
- TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
+ TP_PROTO(struct task_struct *tsk, struct sched_avg *avg, void *_ravg),
- TP_ARGS(tsk, avg),
+ TP_ARGS(tsk, avg, _ravg),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -1473,6 +1480,8 @@ TRACE_EVENT(sched_load_avg_task,
__field( int, cpu )
__field( unsigned long, load_avg )
__field( unsigned long, util_avg )
+ __field( unsigned long, util_avg_pelt )
+ __field( unsigned long, util_avg_walt )
__field( u64, load_sum )
__field( u32, util_sum )
__field( u32, period_contrib )
@@ -1487,15 +1496,25 @@ TRACE_EVENT(sched_load_avg_task,
__entry->load_sum = avg->load_sum;
__entry->util_sum = avg->util_sum;
__entry->period_contrib = avg->period_contrib;
+ __entry->util_avg_pelt = avg->util_avg;
+ __entry->util_avg_walt = 0;
+#ifdef CONFIG_SCHED_WALT
+ __entry->util_avg_walt = (((unsigned long)((struct ravg*)_ravg)->demand) << SCHED_LOAD_SHIFT);
+ do_div(__entry->util_avg_walt, walt_ravg_window);
+ if (!walt_disabled && sysctl_sched_use_walt_task_util)
+ __entry->util_avg = __entry->util_avg_walt;
+#endif
),
-
- TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu load_sum=%llu"
+ TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu "
+ "util_avg_pelt=%lu util_avg_walt=%lu load_sum=%llu"
" util_sum=%u period_contrib=%u",
__entry->comm,
__entry->pid,
__entry->cpu,
__entry->load_avg,
__entry->util_avg,
+ __entry->util_avg_pelt,
+ __entry->util_avg_walt,
(u64)__entry->load_sum,
(u32)__entry->util_sum,
(u32)__entry->period_contrib)
@@ -1514,16 +1533,29 @@ TRACE_EVENT(sched_load_avg_cpu,
__field( int, cpu )
__field( unsigned long, load_avg )
__field( unsigned long, util_avg )
+ __field( unsigned long, util_avg_pelt )
+ __field( unsigned long, util_avg_walt )
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->load_avg = cfs_rq->avg.load_avg;
__entry->util_avg = cfs_rq->avg.util_avg;
+ __entry->util_avg_pelt = cfs_rq->avg.util_avg;
+ __entry->util_avg_walt = 0;
+#ifdef CONFIG_SCHED_WALT
+ __entry->util_avg_walt =
+ cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT;
+ do_div(__entry->util_avg_walt, walt_ravg_window);
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ __entry->util_avg = __entry->util_avg_walt;
+#endif
),
- TP_printk("cpu=%d load_avg=%lu util_avg=%lu",
- __entry->cpu, __entry->load_avg, __entry->util_avg)
+ TP_printk("cpu=%d load_avg=%lu util_avg=%lu "
+ "util_avg_pelt=%lu util_avg_walt=%lu",
+ __entry->cpu, __entry->load_avg, __entry->util_avg,
+ __entry->util_avg_pelt, __entry->util_avg_walt)
);
/*
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 51569b7e7aa0..cbd6731aff43 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -68,7 +68,9 @@
#define IPA_IOCTL_ADD_RT_RULE_AFTER 43
#define IPA_IOCTL_ADD_FLT_RULE_AFTER 44
#define IPA_IOCTL_GET_HW_VERSION 45
-#define IPA_IOCTL_MAX 46
+#define IPA_IOCTL_ADD_RT_RULE_EXT 46
+#define IPA_IOCTL_NAT_MODIFY_PDN 47
+#define IPA_IOCTL_MAX 48
/**
* max size of the header to be inserted
@@ -127,6 +129,11 @@
#define IPA_FLT_MAC_ETHER_TYPE (1ul << 21)
/**
+ * maximal number of NAT PDNs in the PDN config table
+ */
+#define IPA_MAX_PDN_NUM 5
+
+/**
* enum ipa_client_type - names for the various IPA "clients"
* these are from the perspective of the clients, for e.g.
* HSIC1_PROD means HSIC client is the producer and IPA is the
@@ -454,6 +461,7 @@ enum ipa_rm_resource_name {
* @IPA_HW_v3_1: IPA hardware version 3.1
* @IPA_HW_v3_5: IPA hardware version 3.5
* @IPA_HW_v3_5_1: IPA hardware version 3.5.1
+ * @IPA_HW_v4_0: IPA hardware version 4.0
*/
enum ipa_hw_type {
IPA_HW_None = 0,
@@ -468,8 +476,11 @@ enum ipa_hw_type {
IPA_HW_v3_1 = 11,
IPA_HW_v3_5 = 12,
IPA_HW_v3_5_1 = 13,
- IPA_HW_MAX
+ IPA_HW_v4_0 = 14,
};
+#define IPA_HW_MAX (IPA_HW_v4_0 + 1)
+
+#define IPA_HW_v4_0 IPA_HW_v4_0
/**
* struct ipa_rule_attrib - attributes of a routing/filtering
@@ -670,6 +681,11 @@ struct ipa_ipfltri_rule_eq {
* consecutive packets
* @rule_id: rule_id to be assigned to the filter rule. In case client specifies
* rule_id as 0 the driver will assign a new rule_id
+ * @set_metadata: bool switch. should metadata replacement at the NAT block
+ * take place?
+ * @pdn_idx: if action is "pass to source\destination NAT" then a comparison
+ * against the PDN index in the matching PDN entry will take place as an
+ * additional condition for NAT hit.
*/
struct ipa_flt_rule {
uint8_t retain_hdr;
@@ -683,6 +699,8 @@ struct ipa_flt_rule {
uint8_t max_prio;
uint8_t hashable;
uint16_t rule_id;
+ uint8_t set_metadata;
+ uint8_t pdn_idx;
};
/**
@@ -1359,6 +1377,20 @@ struct ipa_ioc_nat_dma_cmd {
};
/**
+* struct ipa_ioc_nat_pdn_entry - PDN entry modification data
+* @pdn_index: index of the entry in the PDN config table to be changed
+* @public_ip: PDN's public ip
+* @src_metadata: PDN's source NAT metadata for metadata replacement
+* @dst_metadata: PDN's destination NAT metadata for metadata replacement
+*/
+struct ipa_ioc_nat_pdn_entry {
+ uint8_t pdn_index;
+ uint32_t public_ip;
+ uint32_t src_metadata;
+ uint32_t dst_metadata;
+};
+
+/**
* struct ipa_msg_meta - Format of the message meta-data.
* @msg_type: the type of the message
* @rsvd: reserved bits for future use.
@@ -1580,6 +1612,9 @@ enum ipacm_client_enum {
#define IPA_IOC_GET_NAT_OFFSET _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_GET_NAT_OFFSET, \
uint32_t *)
+#define IPA_IOC_NAT_MODIFY_PDN _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NAT_MODIFY_PDN, \
+ struct ipa_ioc_nat_pdn_entry *)
#define IPA_IOC_SET_FLT _IOW(IPA_IOC_MAGIC, \
IPA_IOCTL_SET_FLT, \
uint32_t)
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 779a62aafafe..d3ab3f57aa9f 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -894,6 +894,29 @@ struct usb_ssp_cap_descriptor {
#define USB_SSP_SUBLINK_SPEED_LSM (0xff << 16) /* Lanespeed mantissa */
} __attribute__((packed));
+/*
+ * Configuration Summary descriptors: Defines a list of functions in the
+ * configuration. This descriptor may be used by Host software to decide
+ * which Configuration to use to obtain the desired functionality.
+ */
+#define USB_CAP_TYPE_CONFIG_SUMMARY 0x10
+
+struct function_class_info {
+ __u8 bClass;
+ __u8 bSubClass;
+ __u8 bProtocol;
+};
+
+struct usb_config_summary_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDevCapabilityType;
+ __u16 bcdVersion;
+ __u8 bConfigurationValue;
+ __u8 bMaxPower;
+ __u8 bNumFunctions;
+ struct function_class_info cs_info[];
+} __attribute__((packed));
/*-------------------------------------------------------------------------*/
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 2e4c02f24a47..0d87fa1e253c 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -1218,6 +1218,13 @@ enum v4l2_mpeg_vidc_video_venc_iframesize_type {
#define V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP \
(V4L2_CID_MPEG_MSM_VIDC_BASE + 101)
+#define V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 102)
+enum v4l2_mpeg_vidc_video_au_delimiter {
+ V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_DISABLED = 0,
+ V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_ENABLED = 1
+};
+
/* Camera class control IDs */
diff --git a/include/uapi/media/ais/msm_ais.h b/include/uapi/media/ais/msm_ais.h
index e3592b672035..9156ca0c9083 100644
--- a/include/uapi/media/ais/msm_ais.h
+++ b/include/uapi/media/ais/msm_ais.h
@@ -220,7 +220,7 @@ struct msm_camera_private_ioctl_arg {
__u32 size;
__u32 result;
__u32 reserved;
- __user __u64 ioctl_ptr;
+ __u64 ioctl_ptr;
};
#define VIDIOC_MSM_CAMERA_PRIVATE_IOCTL_CMD \
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 29c7240172d3..03dbc231a4a0 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -174,9 +174,9 @@ typedef enum {
} cpuset_flagbits_t;
/* convenient tests for these bits */
-static inline bool is_cpuset_online(const struct cpuset *cs)
+static inline bool is_cpuset_online(struct cpuset *cs)
{
- return test_bit(CS_ONLINE, &cs->flags);
+ return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
}
static inline int is_cpu_exclusive(const struct cpuset *cs)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 95c447e658f7..6cd152e99891 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6595,6 +6595,21 @@ static void perf_log_itrace_start(struct perf_event *event)
perf_output_end(&handle);
}
+static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
+{
+ /*
+ * Due to interrupt latency (AKA "skid"), we may enter the
+ * kernel before taking an overflow, even if the PMU is only
+ * counting user events.
+ * To avoid leaking information to userspace, we must always
+ * reject kernel samples when exclude_kernel is set.
+ */
+ if (event->attr.exclude_kernel && !user_mode(regs))
+ return false;
+
+ return true;
+}
+
/*
* Generic event overflow handling, sampling.
*/
@@ -6642,6 +6657,12 @@ static int __perf_event_overflow(struct perf_event *event,
}
/*
+ * For security, drop the skid kernel samples if necessary.
+ */
+ if (!sample_is_allowed(event, regs))
+ return ret;
+
+ /*
* XXX event_limit might not quite work as expected on inherited
* events
*/
diff --git a/kernel/fork.c b/kernel/fork.c
index 2845c5bdc8e3..246b8a57a32d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -370,7 +370,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
set_task_stack_end_magic(tsk);
#ifdef CONFIG_CC_STACKPROTECTOR
- tsk->stack_canary = get_random_int();
+ tsk->stack_canary = get_random_long();
#endif
/*
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 850b255649a2..698b8dec3074 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -604,6 +604,19 @@ repeat:
}
EXPORT_SYMBOL_GPL(kthread_worker_fn);
+/*
+ * Returns true when the work could not be queued at the moment.
+ * It happens when it is already pending in a worker list
+ * or when it is being cancelled.
+ */
+static inline bool queuing_blocked(struct kthread_worker *worker,
+ struct kthread_work *work)
+{
+ lockdep_assert_held(&worker->lock);
+
+ return !list_empty(&work->node) || work->canceling;
+}
+
/* insert @work before @pos in @worker */
static void insert_kthread_work(struct kthread_worker *worker,
struct kthread_work *work,
@@ -633,7 +646,7 @@ bool queue_kthread_work(struct kthread_worker *worker,
unsigned long flags;
spin_lock_irqsave(&worker->lock, flags);
- if (list_empty(&work->node)) {
+ if (!queuing_blocked(worker, work)) {
insert_kthread_work(worker, work, &worker->work_list);
ret = true;
}
@@ -694,6 +707,87 @@ retry:
}
EXPORT_SYMBOL_GPL(flush_kthread_work);
+/*
+ * This function removes the work from the worker queue. Also it makes sure
+ * that it won't get queued later via the delayed work's timer.
+ *
+ * The work might still be in use when this function finishes. See the
+ * current_work proceed by the worker.
+ *
+ * Return: %true if @work was pending and successfully canceled,
+ * %false if @work was not pending
+ */
+static bool __kthread_cancel_work(struct kthread_work *work,
+ unsigned long *flags)
+{
+ /*
+ * Try to remove the work from a worker list. It might either
+ * be from worker->work_list or from worker->delayed_work_list.
+ */
+ if (!list_empty(&work->node)) {
+ list_del_init(&work->node);
+ return true;
+ }
+
+ return false;
+}
+
+static bool __kthread_cancel_work_sync(struct kthread_work *work)
+{
+ struct kthread_worker *worker = work->worker;
+ unsigned long flags;
+ int ret = false;
+
+ if (!worker)
+ goto out;
+
+ spin_lock_irqsave(&worker->lock, flags);
+ /* Work must not be used with >1 worker, see kthread_queue_work(). */
+ WARN_ON_ONCE(work->worker != worker);
+
+ ret = __kthread_cancel_work(work, &flags);
+
+ if (worker->current_work != work)
+ goto out_fast;
+
+ /*
+ * The work is in progress and we need to wait with the lock released.
+ * In the meantime, block any queuing by setting the canceling counter.
+ */
+ work->canceling++;
+ spin_unlock_irqrestore(&worker->lock, flags);
+ flush_kthread_work(work);
+ spin_lock_irqsave(&worker->lock, flags);
+ work->canceling--;
+
+out_fast:
+ spin_unlock_irqrestore(&worker->lock, flags);
+out:
+ return ret;
+}
+
+/**
+ * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
+ * @work: the kthread work to cancel
+ *
+ * Cancel @work and wait for its execution to finish. This function
+ * can be used even if the work re-queues itself. On return from this
+ * function, @work is guaranteed to be not pending or executing on any CPU.
+ *
+ * kthread_cancel_work_sync(&delayed_work->work) must not be used for
+ * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
+ *
+ * The caller must ensure that the worker on which @work was last
+ * queued can't be destroyed before this function returns.
+ *
+ * Return: %true if @work was pending, %false otherwise.
+ */
+bool kthread_cancel_work_sync(struct kthread_work *work)
+{
+ return __kthread_cancel_work_sync(work);
+}
+EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
+
/**
* flush_kthread_worker - flush all current works on a kthread_worker
* @worker: worker to flush
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index c7e8ed99c953..5e2cd1030702 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -28,19 +28,25 @@
#include <linux/compat.h>
+void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
+ const struct cred *ptracer_cred)
+{
+ BUG_ON(!list_empty(&child->ptrace_entry));
+ list_add(&child->ptrace_entry, &new_parent->ptraced);
+ child->parent = new_parent;
+ child->ptracer_cred = get_cred(ptracer_cred);
+}
+
/*
* ptrace a task: make the debugger its new parent and
* move it to the ptrace list.
*
* Must be called with the tasklist lock write-held.
*/
-void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
+static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
{
- BUG_ON(!list_empty(&child->ptrace_entry));
- list_add(&child->ptrace_entry, &new_parent->ptraced);
- child->parent = new_parent;
rcu_read_lock();
- child->ptracer_cred = get_cred(__task_cred(new_parent));
+ __ptrace_link(child, new_parent, __task_cred(new_parent));
rcu_read_unlock();
}
@@ -353,7 +359,7 @@ static int ptrace_attach(struct task_struct *task, long request,
flags |= PT_SEIZED;
task->ptrace = flags;
- __ptrace_link(task, current);
+ ptrace_link(task, current);
/* SEIZE doesn't trap tracee on attach */
if (!seize)
@@ -420,7 +426,7 @@ static int ptrace_traceme(void)
*/
if (!ret && !(current->real_parent->flags & PF_EXITING)) {
current->ptrace = PT_PTRACED;
- __ptrace_link(current, current->real_parent);
+ ptrace_link(current, current->real_parent);
}
}
write_unlock_irq(&tasklist_lock);
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 308f80ce2e43..a353df46c8e4 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -22,4 +22,6 @@ obj-$(CONFIG_SCHED_DEBUG) += debug.o
obj-$(CONFIG_SCHED_TUNE) += tune.o
obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
obj-$(CONFIG_SCHED_CORE_CTL) += core_ctl.o
+obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_CPU_FREQ_GOV_SCHED) += cpufreq_sched.o
+obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2d2b8834dd3b..0071785e698b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2623,9 +2623,9 @@ void wake_up_new_task(struct task_struct *p)
*/
set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
#endif
-
rq = __task_rq_lock(p);
mark_task_starting(p);
+ post_init_entity_util_avg(&p->se);
activate_task(rq, p, ENQUEUE_WAKEUP_NEW);
p->on_rq = TASK_ON_RQ_QUEUED;
trace_sched_wakeup_new(p);
@@ -3154,9 +3154,10 @@ unsigned long sum_capacity_reqs(unsigned long cfs_cap,
return total += scr->dl;
}
+unsigned long boosted_cpu_util(int cpu);
static void sched_freq_tick_pelt(int cpu)
{
- unsigned long cpu_utilization = capacity_max;
+ unsigned long cpu_utilization = boosted_cpu_util(cpu);
unsigned long capacity_curr = capacity_curr_of(cpu);
struct sched_capacity_reqs *scr;
@@ -6460,9 +6461,6 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
if (!(sd->flags & SD_LOAD_BALANCE)) {
printk("does not load-balance\n");
- if (sd->parent)
- printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
- " has parent");
return -1;
}
@@ -6555,8 +6553,12 @@ static inline bool sched_debug(void)
static int sd_degenerate(struct sched_domain *sd)
{
- if (cpumask_weight(sched_domain_span(sd)) == 1)
- return 1;
+ if (cpumask_weight(sched_domain_span(sd)) == 1) {
+ if (sd->groups->sge)
+ sd->flags &= ~SD_LOAD_BALANCE;
+ else
+ return 1;
+ }
/* Following flags need at least 2 groups */
if (sd->flags & (SD_LOAD_BALANCE |
@@ -6564,6 +6566,7 @@ static int sd_degenerate(struct sched_domain *sd)
SD_BALANCE_FORK |
SD_BALANCE_EXEC |
SD_SHARE_CPUCAPACITY |
+ SD_ASYM_CPUCAPACITY |
SD_SHARE_PKG_RESOURCES |
SD_SHARE_POWERDOMAIN |
SD_SHARE_CAP_STATES)) {
@@ -6595,11 +6598,16 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
SD_BALANCE_NEWIDLE |
SD_BALANCE_FORK |
SD_BALANCE_EXEC |
+ SD_ASYM_CPUCAPACITY |
SD_SHARE_CPUCAPACITY |
SD_SHARE_PKG_RESOURCES |
SD_PREFER_SIBLING |
SD_SHARE_POWERDOMAIN |
SD_SHARE_CAP_STATES);
+ if (parent->groups->sge) {
+ parent->flags &= ~SD_LOAD_BALANCE;
+ return 0;
+ }
if (nr_node_ids == 1)
pflags &= ~SD_SERIALIZE;
}
@@ -6680,6 +6688,9 @@ static int init_rootdomain(struct root_domain *rd)
goto free_rto_mask;
init_max_cpu_capacity(&rd->max_cpu_capacity);
+
+ rd->max_cap_orig_cpu = rd->min_cap_orig_cpu = -1;
+
return 0;
free_rto_mask:
@@ -6996,6 +7007,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
*/
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
+ sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
/*
* Make sure the first group of this domain contains the
@@ -7291,11 +7303,19 @@ static int sched_domains_curr_level;
/*
* SD_flags allowed in topology descriptions.
*
- * SD_SHARE_CPUCAPACITY - describes SMT topologies
- * SD_SHARE_PKG_RESOURCES - describes shared caches
- * SD_NUMA - describes NUMA topologies
- * SD_SHARE_POWERDOMAIN - describes shared power domain
- * SD_SHARE_CAP_STATES - describes shared capacity states
+ * These flags are purely descriptive of the topology and do not prescribe
+ * behaviour. Behaviour is artificial and mapped in the below sd_init()
+ * function:
+ *
+ * SD_SHARE_CPUCAPACITY - describes SMT topologies
+ * SD_SHARE_PKG_RESOURCES - describes shared caches
+ * SD_NUMA - describes NUMA topologies
+ * SD_SHARE_POWERDOMAIN - describes shared power domain
+ * SD_ASYM_CPUCAPACITY - describes mixed capacity topologies
+ * SD_SHARE_CAP_STATES - describes shared capacity states
+ *
+ * Odd one out, which beside describing the topology has a quirk also
+ * prescribes the desired behaviour that goes along with it:
*
* Odd one out:
* SD_ASYM_PACKING - describes SMT quirks
@@ -7305,11 +7325,13 @@ static int sched_domains_curr_level;
SD_SHARE_PKG_RESOURCES | \
SD_NUMA | \
SD_ASYM_PACKING | \
+ SD_ASYM_CPUCAPACITY | \
SD_SHARE_POWERDOMAIN | \
SD_SHARE_CAP_STATES)
static struct sched_domain *
-sd_init(struct sched_domain_topology_level *tl, int cpu)
+sd_init(struct sched_domain_topology_level *tl,
+ struct sched_domain *child, int cpu)
{
struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
int sd_weight, sd_flags = 0;
@@ -7361,6 +7383,7 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
.smt_gain = 0,
.max_newidle_lb_cost = 0,
.next_decay_max_lb_cost = jiffies,
+ .child = child,
#ifdef CONFIG_SCHED_DEBUG
.name = tl->name,
#endif
@@ -7370,6 +7393,13 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
* Convert topological properties into behaviour.
*/
+ if (sd->flags & SD_ASYM_CPUCAPACITY) {
+ struct sched_domain *t = sd;
+
+ for_each_lower_domain(t)
+ t->flags |= SD_BALANCE_WAKE;
+ }
+
if (sd->flags & SD_SHARE_CPUCAPACITY) {
sd->flags |= SD_PREFER_SIBLING;
sd->imbalance_pct = 110;
@@ -7816,16 +7846,13 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
struct sched_domain *child, int cpu)
{
- struct sched_domain *sd = sd_init(tl, cpu);
- if (!sd)
- return child;
+ struct sched_domain *sd = sd_init(tl, child, cpu);
cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
if (child) {
sd->level = child->level + 1;
sched_domain_level_max = max(sched_domain_level_max, sd->level);
child->parent = sd;
- sd->child = child;
if (!cpumask_subset(sched_domain_span(child),
sched_domain_span(sd))) {
@@ -7859,7 +7886,6 @@ static int build_sched_domains(const struct cpumask *cpu_map,
enum s_alloc alloc_state;
struct sched_domain *sd;
struct s_data d;
- struct rq *rq = NULL;
int i, ret = -ENOMEM;
alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
@@ -7877,8 +7903,6 @@ static int build_sched_domains(const struct cpumask *cpu_map,
*per_cpu_ptr(d.sd, i) = sd;
if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
sd->flags |= SD_OVERLAP;
- if (cpumask_equal(cpu_map, sched_domain_span(sd)))
- break;
}
}
@@ -7914,8 +7938,19 @@ static int build_sched_domains(const struct cpumask *cpu_map,
/* Attach the domains */
rcu_read_lock();
for_each_cpu(i, cpu_map) {
- rq = cpu_rq(i);
+ int max_cpu = READ_ONCE(d.rd->max_cap_orig_cpu);
+ int min_cpu = READ_ONCE(d.rd->min_cap_orig_cpu);
+
+ if ((max_cpu < 0) || (cpu_rq(i)->cpu_capacity_orig >
+ cpu_rq(max_cpu)->cpu_capacity_orig))
+ WRITE_ONCE(d.rd->max_cap_orig_cpu, i);
+
+ if ((min_cpu < 0) || (cpu_rq(i)->cpu_capacity_orig <
+ cpu_rq(min_cpu)->cpu_capacity_orig))
+ WRITE_ONCE(d.rd->min_cap_orig_cpu, i);
+
sd = *per_cpu_ptr(d.sd, i);
+
cpu_attach_domain(sd, d.rd, i);
}
rcu_read_unlock();
@@ -8339,6 +8374,7 @@ void __init sched_init(void)
#ifdef CONFIG_FAIR_GROUP_SCHED
root_task_group.shares = ROOT_TASK_GROUP_LOAD;
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
+ rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
/*
* How much cpu bandwidth does root_task_group get?
*
diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
new file mode 100644
index 000000000000..dbc51442ecbc
--- /dev/null
+++ b/kernel/sched/cpufreq.c
@@ -0,0 +1,63 @@
+/*
+ * Scheduler code and data structures related to cpufreq.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "sched.h"
+
+DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
+
+/**
+ * cpufreq_add_update_util_hook - Populate the CPU's update_util_data pointer.
+ * @cpu: The CPU to set the pointer for.
+ * @data: New pointer value.
+ * @func: Callback function to set for the CPU.
+ *
+ * Set and publish the update_util_data pointer for the given CPU.
+ *
+ * The update_util_data pointer of @cpu is set to @data and the callback
+ * function pointer in the target struct update_util_data is set to @func.
+ * That function will be called by cpufreq_update_util() from RCU-sched
+ * read-side critical sections, so it must not sleep. @data will always be
+ * passed to it as the first argument which allows the function to get to the
+ * target update_util_data structure and its container.
+ *
+ * The update_util_data pointer of @cpu must be NULL when this function is
+ * called or it will WARN() and return with no effect.
+ */
+void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
+ void (*func)(struct update_util_data *data, u64 time,
+ unsigned int flags))
+{
+ if (WARN_ON(!data || !func))
+ return;
+
+ if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
+ return;
+
+ data->func = func;
+ rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data);
+}
+EXPORT_SYMBOL_GPL(cpufreq_add_update_util_hook);
+
+/**
+ * cpufreq_remove_update_util_hook - Clear the CPU's update_util_data pointer.
+ * @cpu: The CPU to clear the pointer for.
+ *
+ * Clear the update_util_data pointer for the given CPU.
+ *
+ * Callers must use RCU-sched callbacks to free any memory that might be
+ * accessed via the old update_util_data pointer or invoke synchronize_sched()
+ * right after this function to avoid use-after-free.
+ */
+void cpufreq_remove_update_util_hook(int cpu)
+{
+ rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL);
+}
+EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook);
diff --git a/kernel/sched/cpufreq_sched.c b/kernel/sched/cpufreq_sched.c
index d751bc2d0d6e..f10d9f7d6d07 100644
--- a/kernel/sched/cpufreq_sched.c
+++ b/kernel/sched/cpufreq_sched.c
@@ -32,6 +32,12 @@ static struct cpufreq_governor cpufreq_gov_sched;
static DEFINE_PER_CPU(unsigned long, enabled);
DEFINE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
+struct gov_tunables {
+ struct gov_attr_set attr_set;
+ unsigned int up_throttle_nsec;
+ unsigned int down_throttle_nsec;
+};
+
/**
* gov_data - per-policy data internal to the governor
* @up_throttle: next throttling period expiry if increasing OPP
@@ -53,8 +59,8 @@ DEFINE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
struct gov_data {
ktime_t up_throttle;
ktime_t down_throttle;
- unsigned int up_throttle_nsec;
- unsigned int down_throttle_nsec;
+ struct gov_tunables *tunables;
+ struct list_head tunables_hook;
struct task_struct *task;
struct irq_work irq_work;
unsigned int requested_freq;
@@ -71,8 +77,10 @@ static void cpufreq_sched_try_driver_target(struct cpufreq_policy *policy,
__cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
- gd->up_throttle = ktime_add_ns(ktime_get(), gd->up_throttle_nsec);
- gd->down_throttle = ktime_add_ns(ktime_get(), gd->down_throttle_nsec);
+ gd->up_throttle = ktime_add_ns(ktime_get(),
+ gd->tunables->up_throttle_nsec);
+ gd->down_throttle = ktime_add_ns(ktime_get(),
+ gd->tunables->down_throttle_nsec);
up_write(&policy->rwsem);
}
@@ -262,12 +270,70 @@ static inline void clear_sched_freq(void)
static_key_slow_dec(&__sched_freq);
}
-static struct attribute_group sched_attr_group_gov_pol;
-static struct attribute_group *get_sysfs_attr(void)
+/* Tunables */
+static struct gov_tunables *global_tunables;
+
+static inline struct gov_tunables *to_tunables(struct gov_attr_set *attr_set)
{
- return &sched_attr_group_gov_pol;
+ return container_of(attr_set, struct gov_tunables, attr_set);
}
+static ssize_t up_throttle_nsec_show(struct gov_attr_set *attr_set, char *buf)
+{
+ struct gov_tunables *tunables = to_tunables(attr_set);
+
+ return sprintf(buf, "%u\n", tunables->up_throttle_nsec);
+}
+
+static ssize_t up_throttle_nsec_store(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
+{
+ struct gov_tunables *tunables = to_tunables(attr_set);
+ int ret;
+ long unsigned int val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ tunables->up_throttle_nsec = val;
+ return count;
+}
+
+static ssize_t down_throttle_nsec_show(struct gov_attr_set *attr_set, char *buf)
+{
+ struct gov_tunables *tunables = to_tunables(attr_set);
+
+ return sprintf(buf, "%u\n", tunables->down_throttle_nsec);
+}
+
+static ssize_t down_throttle_nsec_store(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
+{
+ struct gov_tunables *tunables = to_tunables(attr_set);
+ int ret;
+ long unsigned int val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ tunables->down_throttle_nsec = val;
+ return count;
+}
+
+static struct governor_attr up_throttle_nsec = __ATTR_RW(up_throttle_nsec);
+static struct governor_attr down_throttle_nsec = __ATTR_RW(down_throttle_nsec);
+
+static struct attribute *schedfreq_attributes[] = {
+ &up_throttle_nsec.attr,
+ &down_throttle_nsec.attr,
+ NULL
+};
+
+static struct kobj_type tunables_ktype = {
+ .default_attrs = schedfreq_attributes,
+ .sysfs_ops = &governor_sysfs_ops,
+};
+
static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
{
struct gov_data *gd;
@@ -282,17 +348,39 @@ static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
if (!gd)
return -ENOMEM;
- gd->up_throttle_nsec = policy->cpuinfo.transition_latency ?
- policy->cpuinfo.transition_latency :
- THROTTLE_UP_NSEC;
- gd->down_throttle_nsec = THROTTLE_DOWN_NSEC;
- pr_debug("%s: throttle threshold = %u [ns]\n",
- __func__, gd->up_throttle_nsec);
-
- rc = sysfs_create_group(&policy->kobj, get_sysfs_attr());
- if (rc) {
- pr_err("%s: couldn't create sysfs attributes: %d\n", __func__, rc);
- goto err;
+ policy->governor_data = gd;
+
+ if (!global_tunables) {
+ gd->tunables = kzalloc(sizeof(*gd->tunables), GFP_KERNEL);
+ if (!gd->tunables)
+ goto free_gd;
+
+ gd->tunables->up_throttle_nsec =
+ policy->cpuinfo.transition_latency ?
+ policy->cpuinfo.transition_latency :
+ THROTTLE_UP_NSEC;
+ gd->tunables->down_throttle_nsec =
+ THROTTLE_DOWN_NSEC;
+
+ rc = kobject_init_and_add(&gd->tunables->attr_set.kobj,
+ &tunables_ktype,
+ get_governor_parent_kobj(policy),
+ "%s", cpufreq_gov_sched.name);
+ if (rc)
+ goto free_tunables;
+
+ gov_attr_set_init(&gd->tunables->attr_set,
+ &gd->tunables_hook);
+
+ pr_debug("%s: throttle_threshold = %u [ns]\n",
+ __func__, gd->tunables->up_throttle_nsec);
+
+ if (!have_governor_per_policy())
+ global_tunables = gd->tunables;
+ } else {
+ gd->tunables = global_tunables;
+ gov_attr_set_get(&global_tunables->attr_set,
+ &gd->tunables_hook);
}
policy->governor_data = gd;
@@ -304,7 +392,7 @@ static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
if (IS_ERR_OR_NULL(gd->task)) {
pr_err("%s: failed to create kschedfreq thread\n",
__func__);
- goto err;
+ goto free_tunables;
}
get_task_struct(gd->task);
kthread_bind_mask(gd->task, policy->related_cpus);
@@ -316,7 +404,9 @@ static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
return 0;
-err:
+free_tunables:
+ kfree(gd->tunables);
+free_gd:
policy->governor_data = NULL;
kfree(gd);
return -ENOMEM;
@@ -324,6 +414,7 @@ err:
static int cpufreq_sched_policy_exit(struct cpufreq_policy *policy)
{
+ unsigned int count;
struct gov_data *gd = policy->governor_data;
clear_sched_freq();
@@ -332,7 +423,12 @@ static int cpufreq_sched_policy_exit(struct cpufreq_policy *policy)
put_task_struct(gd->task);
}
- sysfs_remove_group(&policy->kobj, get_sysfs_attr());
+ count = gov_attr_set_put(&gd->tunables->attr_set, &gd->tunables_hook);
+ if (!count) {
+ if (!have_governor_per_policy())
+ global_tunables = NULL;
+ kfree(gd->tunables);
+ }
policy->governor_data = NULL;
@@ -394,88 +490,6 @@ static int cpufreq_sched_setup(struct cpufreq_policy *policy,
return 0;
}
-/* Tunables */
-static ssize_t show_up_throttle_nsec(struct gov_data *gd, char *buf)
-{
- return sprintf(buf, "%u\n", gd->up_throttle_nsec);
-}
-
-static ssize_t store_up_throttle_nsec(struct gov_data *gd,
- const char *buf, size_t count)
-{
- int ret;
- long unsigned int val;
-
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- gd->up_throttle_nsec = val;
- return count;
-}
-
-static ssize_t show_down_throttle_nsec(struct gov_data *gd, char *buf)
-{
- return sprintf(buf, "%u\n", gd->down_throttle_nsec);
-}
-
-static ssize_t store_down_throttle_nsec(struct gov_data *gd,
- const char *buf, size_t count)
-{
- int ret;
- long unsigned int val;
-
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- gd->down_throttle_nsec = val;
- return count;
-}
-
-/*
- * Create show/store routines
- * - sys: One governor instance for complete SYSTEM
- * - pol: One governor instance per struct cpufreq_policy
- */
-#define show_gov_pol_sys(file_name) \
-static ssize_t show_##file_name##_gov_pol \
-(struct cpufreq_policy *policy, char *buf) \
-{ \
- return show_##file_name(policy->governor_data, buf); \
-}
-
-#define store_gov_pol_sys(file_name) \
-static ssize_t store_##file_name##_gov_pol \
-(struct cpufreq_policy *policy, const char *buf, size_t count) \
-{ \
- return store_##file_name(policy->governor_data, buf, count); \
-}
-
-#define gov_pol_attr_rw(_name) \
- static struct freq_attr _name##_gov_pol = \
- __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
-
-#define show_store_gov_pol_sys(file_name) \
- show_gov_pol_sys(file_name); \
- store_gov_pol_sys(file_name)
-#define tunable_handlers(file_name) \
- show_gov_pol_sys(file_name); \
- store_gov_pol_sys(file_name); \
- gov_pol_attr_rw(file_name)
-
-tunable_handlers(down_throttle_nsec);
-tunable_handlers(up_throttle_nsec);
-
-/* Per policy governor instance */
-static struct attribute *sched_attributes_gov_pol[] = {
- &up_throttle_nsec_gov_pol.attr,
- &down_throttle_nsec_gov_pol.attr,
- NULL,
-};
-
-static struct attribute_group sched_attr_group_gov_pol = {
- .attrs = sched_attributes_gov_pol,
- .name = "sched",
-};
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED
static
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
new file mode 100644
index 000000000000..75bfbb336722
--- /dev/null
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -0,0 +1,770 @@
+/*
+ * CPUFreq governor based on scheduler-provided CPU utilization data.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <trace/events/power.h>
+
+#include "sched.h"
+#include "tune.h"
+
+unsigned long boosted_cpu_util(int cpu);
+
+/* Stub out fast switch routines present on mainline to reduce the backport
+ * overhead. */
+#define cpufreq_driver_fast_switch(x, y) 0
+#define cpufreq_enable_fast_switch(x)
+#define cpufreq_disable_fast_switch(x)
+#define LATENCY_MULTIPLIER (1000)
+#define SUGOV_KTHREAD_PRIORITY 50
+
+struct sugov_tunables {
+ struct gov_attr_set attr_set;
+ unsigned int up_rate_limit_us;
+ unsigned int down_rate_limit_us;
+};
+
+struct sugov_policy {
+ struct cpufreq_policy *policy;
+
+ struct sugov_tunables *tunables;
+ struct list_head tunables_hook;
+
+ raw_spinlock_t update_lock; /* For shared policies */
+ u64 last_freq_update_time;
+ s64 min_rate_limit_ns;
+ s64 up_rate_delay_ns;
+ s64 down_rate_delay_ns;
+ unsigned int next_freq;
+
+ /* The next fields are only needed if fast switch cannot be used. */
+ struct irq_work irq_work;
+ struct kthread_work work;
+ struct mutex work_lock;
+ struct kthread_worker worker;
+ struct task_struct *thread;
+ bool work_in_progress;
+
+ bool need_freq_update;
+};
+
+struct sugov_cpu {
+ struct update_util_data update_util;
+ struct sugov_policy *sg_policy;
+
+ unsigned int cached_raw_freq;
+ unsigned long iowait_boost;
+ unsigned long iowait_boost_max;
+ u64 last_update;
+
+ /* The fields below are only needed when sharing a policy. */
+ unsigned long util;
+ unsigned long max;
+ unsigned int flags;
+};
+
+static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
+
+/************************ Governor internals ***********************/
+
+static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
+{
+ s64 delta_ns;
+
+ if (sg_policy->work_in_progress)
+ return false;
+
+ if (unlikely(sg_policy->need_freq_update)) {
+ sg_policy->need_freq_update = false;
+ /*
+ * This happens when limits change, so forget the previous
+ * next_freq value and force an update.
+ */
+ sg_policy->next_freq = UINT_MAX;
+ return true;
+ }
+
+ delta_ns = time - sg_policy->last_freq_update_time;
+
+ /* No need to recalculate next freq for min_rate_limit_us at least */
+ return delta_ns >= sg_policy->min_rate_limit_ns;
+}
+
+static bool sugov_up_down_rate_limit(struct sugov_policy *sg_policy, u64 time,
+ unsigned int next_freq)
+{
+ s64 delta_ns;
+
+ delta_ns = time - sg_policy->last_freq_update_time;
+
+ if (next_freq > sg_policy->next_freq &&
+ delta_ns < sg_policy->up_rate_delay_ns)
+ return true;
+
+ if (next_freq < sg_policy->next_freq &&
+ delta_ns < sg_policy->down_rate_delay_ns)
+ return true;
+
+ return false;
+}
+
+static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
+ unsigned int next_freq)
+{
+ struct cpufreq_policy *policy = sg_policy->policy;
+
+ if (sugov_up_down_rate_limit(sg_policy, time, next_freq))
+ return;
+
+ if (policy->fast_switch_enabled) {
+ if (sg_policy->next_freq == next_freq) {
+ trace_cpu_frequency(policy->cur, smp_processor_id());
+ return;
+ }
+ sg_policy->next_freq = next_freq;
+ sg_policy->last_freq_update_time = time;
+ next_freq = cpufreq_driver_fast_switch(policy, next_freq);
+ if (next_freq == CPUFREQ_ENTRY_INVALID)
+ return;
+
+ policy->cur = next_freq;
+ trace_cpu_frequency(next_freq, smp_processor_id());
+ } else if (sg_policy->next_freq != next_freq) {
+ sg_policy->next_freq = next_freq;
+ sg_policy->last_freq_update_time = time;
+ sg_policy->work_in_progress = true;
+ irq_work_queue(&sg_policy->irq_work);
+ }
+}
+
+/**
+ * get_next_freq - Compute a new frequency for a given cpufreq policy.
+ * @sg_cpu: schedutil cpu object to compute the new frequency for.
+ * @util: Current CPU utilization.
+ * @max: CPU capacity.
+ *
+ * If the utilization is frequency-invariant, choose the new frequency to be
+ * proportional to it, that is
+ *
+ * next_freq = C * max_freq * util / max
+ *
+ * Otherwise, approximate the would-be frequency-invariant utilization by
+ * util_raw * (curr_freq / max_freq) which leads to
+ *
+ * next_freq = C * curr_freq * util_raw / max
+ *
+ * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
+ *
+ * The lowest driver-supported frequency which is equal or greater than the raw
+ * next_freq (as calculated above) is returned, subject to policy min/max and
+ * cpufreq driver limitations.
+ */
+static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util,
+ unsigned long max)
+{
+ struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+ struct cpufreq_policy *policy = sg_policy->policy;
+ unsigned int freq = arch_scale_freq_invariant() ?
+ policy->cpuinfo.max_freq : policy->cur;
+
+ freq = (freq + (freq >> 2)) * util / max;
+
+ if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
+ return sg_policy->next_freq;
+ sg_cpu->cached_raw_freq = freq;
+ return cpufreq_driver_resolve_freq(policy, freq);
+}
+
+static inline bool use_pelt(void)
+{
+#ifdef CONFIG_SCHED_WALT
+ return (!sysctl_sched_use_walt_cpu_util || walt_disabled);
+#else
+ return true;
+#endif
+}
+
+static void sugov_get_util(unsigned long *util, unsigned long *max, u64 time)
+{
+ int cpu = smp_processor_id();
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long max_cap, rt;
+ s64 delta;
+
+ max_cap = arch_scale_cpu_capacity(NULL, cpu);
+
+ sched_avg_update(rq);
+ delta = time - rq->age_stamp;
+ if (unlikely(delta < 0))
+ delta = 0;
+ rt = div64_u64(rq->rt_avg, sched_avg_period() + delta);
+ rt = (rt * max_cap) >> SCHED_CAPACITY_SHIFT;
+
+ *util = boosted_cpu_util(cpu);
+ if (likely(use_pelt()))
+ *util = min((*util + rt), max_cap);
+
+ *max = max_cap;
+}
+
+static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
+ unsigned int flags)
+{
+ if (flags & SCHED_CPUFREQ_IOWAIT) {
+ sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+ } else if (sg_cpu->iowait_boost) {
+ s64 delta_ns = time - sg_cpu->last_update;
+
+ /* Clear iowait_boost if the CPU apprears to have been idle. */
+ if (delta_ns > TICK_NSEC)
+ sg_cpu->iowait_boost = 0;
+ }
+}
+
+static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
+ unsigned long *max)
+{
+ unsigned long boost_util = sg_cpu->iowait_boost;
+ unsigned long boost_max = sg_cpu->iowait_boost_max;
+
+ if (!boost_util)
+ return;
+
+ if (*util * boost_max < *max * boost_util) {
+ *util = boost_util;
+ *max = boost_max;
+ }
+ sg_cpu->iowait_boost >>= 1;
+}
+
+static void sugov_update_single(struct update_util_data *hook, u64 time,
+ unsigned int flags)
+{
+ struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
+ struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+ struct cpufreq_policy *policy = sg_policy->policy;
+ unsigned long util, max;
+ unsigned int next_f;
+
+ sugov_set_iowait_boost(sg_cpu, time, flags);
+ sg_cpu->last_update = time;
+
+ if (!sugov_should_update_freq(sg_policy, time))
+ return;
+
+ if (flags & SCHED_CPUFREQ_DL) {
+ next_f = policy->cpuinfo.max_freq;
+ } else {
+ sugov_get_util(&util, &max, time);
+ sugov_iowait_boost(sg_cpu, &util, &max);
+ next_f = get_next_freq(sg_cpu, util, max);
+ }
+ sugov_update_commit(sg_policy, time, next_f);
+}
+
+static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
+ unsigned long util, unsigned long max,
+ unsigned int flags)
+{
+ struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+ struct cpufreq_policy *policy = sg_policy->policy;
+ unsigned int max_f = policy->cpuinfo.max_freq;
+ u64 last_freq_update_time = sg_policy->last_freq_update_time;
+ unsigned int j;
+
+ if (flags & SCHED_CPUFREQ_DL)
+ return max_f;
+
+ sugov_iowait_boost(sg_cpu, &util, &max);
+
+ for_each_cpu(j, policy->cpus) {
+ struct sugov_cpu *j_sg_cpu;
+ unsigned long j_util, j_max;
+ s64 delta_ns;
+
+ if (j == smp_processor_id())
+ continue;
+
+ j_sg_cpu = &per_cpu(sugov_cpu, j);
+ /*
+ * If the CPU utilization was last updated before the previous
+ * frequency update and the time elapsed between the last update
+ * of the CPU utilization and the last frequency update is long
+ * enough, don't take the CPU into account as it probably is
+ * idle now (and clear iowait_boost for it).
+ */
+ delta_ns = last_freq_update_time - j_sg_cpu->last_update;
+ if (delta_ns > TICK_NSEC) {
+ j_sg_cpu->iowait_boost = 0;
+ continue;
+ }
+ if (j_sg_cpu->flags & SCHED_CPUFREQ_DL)
+ return max_f;
+
+ j_util = j_sg_cpu->util;
+ j_max = j_sg_cpu->max;
+ if (j_util * max > j_max * util) {
+ util = j_util;
+ max = j_max;
+ }
+
+ sugov_iowait_boost(j_sg_cpu, &util, &max);
+ }
+
+ return get_next_freq(sg_cpu, util, max);
+}
+
+static void sugov_update_shared(struct update_util_data *hook, u64 time,
+ unsigned int flags)
+{
+ struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
+ struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+ unsigned long util, max;
+ unsigned int next_f;
+
+ sugov_get_util(&util, &max, time);
+
+ raw_spin_lock(&sg_policy->update_lock);
+
+ sg_cpu->util = util;
+ sg_cpu->max = max;
+ sg_cpu->flags = flags;
+
+ sugov_set_iowait_boost(sg_cpu, time, flags);
+ sg_cpu->last_update = time;
+
+ if (sugov_should_update_freq(sg_policy, time)) {
+ next_f = sugov_next_freq_shared(sg_cpu, util, max, flags);
+ sugov_update_commit(sg_policy, time, next_f);
+ }
+
+ raw_spin_unlock(&sg_policy->update_lock);
+}
+
+static void sugov_work(struct kthread_work *work)
+{
+ struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
+
+ mutex_lock(&sg_policy->work_lock);
+ __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
+ CPUFREQ_RELATION_L);
+ mutex_unlock(&sg_policy->work_lock);
+
+ sg_policy->work_in_progress = false;
+}
+
+static void sugov_irq_work(struct irq_work *irq_work)
+{
+ struct sugov_policy *sg_policy;
+
+ sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
+
+ /*
+ * For Real Time and Deadline tasks, schedutil governor shoots the
+ * frequency to maximum. And special care must be taken to ensure that
+ * this kthread doesn't result in that.
+ *
+ * This is (mostly) guaranteed by the work_in_progress flag. The flag is
+ * updated only at the end of the sugov_work() and before that schedutil
+ * rejects all other frequency scaling requests.
+ *
+ * Though there is a very rare case where the RT thread yields right
+ * after the work_in_progress flag is cleared. The effects of that are
+ * neglected for now.
+ */
+ queue_kthread_work(&sg_policy->worker, &sg_policy->work);
+}
+
+/************************** sysfs interface ************************/
+
+static struct sugov_tunables *global_tunables;
+static DEFINE_MUTEX(global_tunables_lock);
+
+static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
+{
+ return container_of(attr_set, struct sugov_tunables, attr_set);
+}
+
+static DEFINE_MUTEX(min_rate_lock);
+
+static void update_min_rate_limit_us(struct sugov_policy *sg_policy)
+{
+ mutex_lock(&min_rate_lock);
+ sg_policy->min_rate_limit_ns = min(sg_policy->up_rate_delay_ns,
+ sg_policy->down_rate_delay_ns);
+ mutex_unlock(&min_rate_lock);
+}
+
+static ssize_t up_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
+{
+ struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+ return sprintf(buf, "%u\n", tunables->up_rate_limit_us);
+}
+
+static ssize_t down_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
+{
+ struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+ return sprintf(buf, "%u\n", tunables->down_rate_limit_us);
+}
+
+static ssize_t up_rate_limit_us_store(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
+{
+ struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+ struct sugov_policy *sg_policy;
+ unsigned int rate_limit_us;
+
+ if (kstrtouint(buf, 10, &rate_limit_us))
+ return -EINVAL;
+
+ tunables->up_rate_limit_us = rate_limit_us;
+
+ list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
+ sg_policy->up_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
+ update_min_rate_limit_us(sg_policy);
+ }
+
+ return count;
+}
+
+static ssize_t down_rate_limit_us_store(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
+{
+ struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+ struct sugov_policy *sg_policy;
+ unsigned int rate_limit_us;
+
+ if (kstrtouint(buf, 10, &rate_limit_us))
+ return -EINVAL;
+
+ tunables->down_rate_limit_us = rate_limit_us;
+
+ list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
+ sg_policy->down_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
+ update_min_rate_limit_us(sg_policy);
+ }
+
+ return count;
+}
+
+static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us);
+static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us);
+
+static struct attribute *sugov_attributes[] = {
+ &up_rate_limit_us.attr,
+ &down_rate_limit_us.attr,
+ NULL
+};
+
+static struct kobj_type sugov_tunables_ktype = {
+ .default_attrs = sugov_attributes,
+ .sysfs_ops = &governor_sysfs_ops,
+};
+
+/********************** cpufreq governor interface *********************/
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
+static
+#endif
+struct cpufreq_governor cpufreq_gov_schedutil;
+
+static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
+{
+ struct sugov_policy *sg_policy;
+
+ sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
+ if (!sg_policy)
+ return NULL;
+
+ sg_policy->policy = policy;
+ init_irq_work(&sg_policy->irq_work, sugov_irq_work);
+ mutex_init(&sg_policy->work_lock);
+ raw_spin_lock_init(&sg_policy->update_lock);
+ return sg_policy;
+}
+
+static void sugov_policy_free(struct sugov_policy *sg_policy)
+{
+ mutex_destroy(&sg_policy->work_lock);
+ kfree(sg_policy);
+}
+
+static int sugov_kthread_create(struct sugov_policy *sg_policy)
+{
+ struct task_struct *thread;
+ struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
+ struct cpufreq_policy *policy = sg_policy->policy;
+ int ret;
+
+ /* kthread only required for slow path */
+ if (policy->fast_switch_enabled)
+ return 0;
+
+ init_kthread_work(&sg_policy->work, sugov_work);
+ init_kthread_worker(&sg_policy->worker);
+ thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
+ "sugov:%d",
+ cpumask_first(policy->related_cpus));
+ if (IS_ERR(thread)) {
+ pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
+ return PTR_ERR(thread);
+ }
+
+ ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, &param);
+ if (ret) {
+ kthread_stop(thread);
+ pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
+ return ret;
+ }
+
+ sg_policy->thread = thread;
+ kthread_bind_mask(thread, policy->related_cpus);
+ wake_up_process(thread);
+
+ return 0;
+}
+
+static void sugov_kthread_stop(struct sugov_policy *sg_policy)
+{
+ /* kthread only required for slow path */
+ if (sg_policy->policy->fast_switch_enabled)
+ return;
+
+ flush_kthread_worker(&sg_policy->worker);
+ kthread_stop(sg_policy->thread);
+}
+
+static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
+{
+ struct sugov_tunables *tunables;
+
+ tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
+ if (tunables) {
+ gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
+ if (!have_governor_per_policy())
+ global_tunables = tunables;
+ }
+ return tunables;
+}
+
+static void sugov_tunables_free(struct sugov_tunables *tunables)
+{
+ if (!have_governor_per_policy())
+ global_tunables = NULL;
+
+ kfree(tunables);
+}
+
+static int sugov_init(struct cpufreq_policy *policy)
+{
+ struct sugov_policy *sg_policy;
+ struct sugov_tunables *tunables;
+ unsigned int lat;
+ int ret = 0;
+
+ /* State should be equivalent to EXIT */
+ if (policy->governor_data)
+ return -EBUSY;
+
+ sg_policy = sugov_policy_alloc(policy);
+ if (!sg_policy)
+ return -ENOMEM;
+
+ ret = sugov_kthread_create(sg_policy);
+ if (ret)
+ goto free_sg_policy;
+
+ mutex_lock(&global_tunables_lock);
+
+ if (global_tunables) {
+ if (WARN_ON(have_governor_per_policy())) {
+ ret = -EINVAL;
+ goto stop_kthread;
+ }
+ policy->governor_data = sg_policy;
+ sg_policy->tunables = global_tunables;
+
+ gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
+ goto out;
+ }
+
+ tunables = sugov_tunables_alloc(sg_policy);
+ if (!tunables) {
+ ret = -ENOMEM;
+ goto stop_kthread;
+ }
+
+ tunables->up_rate_limit_us = LATENCY_MULTIPLIER;
+ tunables->down_rate_limit_us = LATENCY_MULTIPLIER;
+ lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
+ if (lat) {
+ tunables->up_rate_limit_us *= lat;
+ tunables->down_rate_limit_us *= lat;
+ }
+
+ policy->governor_data = sg_policy;
+ sg_policy->tunables = tunables;
+
+ ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
+ get_governor_parent_kobj(policy), "%s",
+ cpufreq_gov_schedutil.name);
+ if (ret)
+ goto fail;
+
+ out:
+ mutex_unlock(&global_tunables_lock);
+
+ cpufreq_enable_fast_switch(policy);
+ return 0;
+
+ fail:
+ policy->governor_data = NULL;
+ sugov_tunables_free(tunables);
+
+stop_kthread:
+ sugov_kthread_stop(sg_policy);
+
+free_sg_policy:
+ mutex_unlock(&global_tunables_lock);
+
+ sugov_policy_free(sg_policy);
+ pr_err("initialization failed (error %d)\n", ret);
+ return ret;
+}
+
+static int sugov_exit(struct cpufreq_policy *policy)
+{
+ struct sugov_policy *sg_policy = policy->governor_data;
+ struct sugov_tunables *tunables = sg_policy->tunables;
+ unsigned int count;
+
+ cpufreq_disable_fast_switch(policy);
+
+ mutex_lock(&global_tunables_lock);
+
+ count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
+ policy->governor_data = NULL;
+ if (!count)
+ sugov_tunables_free(tunables);
+
+ mutex_unlock(&global_tunables_lock);
+
+ sugov_kthread_stop(sg_policy);
+ sugov_policy_free(sg_policy);
+
+ return 0;
+}
+
+static int sugov_start(struct cpufreq_policy *policy)
+{
+ struct sugov_policy *sg_policy = policy->governor_data;
+ unsigned int cpu;
+
+ sg_policy->up_rate_delay_ns =
+ sg_policy->tunables->up_rate_limit_us * NSEC_PER_USEC;
+ sg_policy->down_rate_delay_ns =
+ sg_policy->tunables->down_rate_limit_us * NSEC_PER_USEC;
+ update_min_rate_limit_us(sg_policy);
+ sg_policy->last_freq_update_time = 0;
+ sg_policy->next_freq = UINT_MAX;
+ sg_policy->work_in_progress = false;
+ sg_policy->need_freq_update = false;
+
+ for_each_cpu(cpu, policy->cpus) {
+ struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
+
+ sg_cpu->sg_policy = sg_policy;
+ if (policy_is_shared(policy)) {
+ sg_cpu->util = 0;
+ sg_cpu->max = 0;
+ sg_cpu->flags = SCHED_CPUFREQ_DL;
+ sg_cpu->last_update = 0;
+ sg_cpu->cached_raw_freq = 0;
+ sg_cpu->iowait_boost = 0;
+ sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
+ cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+ sugov_update_shared);
+ } else {
+ cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+ sugov_update_single);
+ }
+ }
+ return 0;
+}
+
+static int sugov_stop(struct cpufreq_policy *policy)
+{
+ struct sugov_policy *sg_policy = policy->governor_data;
+ unsigned int cpu;
+
+ for_each_cpu(cpu, policy->cpus)
+ cpufreq_remove_update_util_hook(cpu);
+
+ synchronize_sched();
+
+ irq_work_sync(&sg_policy->irq_work);
+ kthread_cancel_work_sync(&sg_policy->work);
+
+ return 0;
+}
+
+static int sugov_limits(struct cpufreq_policy *policy)
+{
+ struct sugov_policy *sg_policy = policy->governor_data;
+
+ if (!policy->fast_switch_enabled) {
+ mutex_lock(&sg_policy->work_lock);
+ cpufreq_policy_apply_limits(policy);
+ mutex_unlock(&sg_policy->work_lock);
+ }
+
+ sg_policy->need_freq_update = true;
+
+ return 0;
+}
+
+static int cpufreq_schedutil_cb(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ switch(event) {
+ case CPUFREQ_GOV_POLICY_INIT:
+ return sugov_init(policy);
+ case CPUFREQ_GOV_POLICY_EXIT:
+ return sugov_exit(policy);
+ case CPUFREQ_GOV_START:
+ return sugov_start(policy);
+ case CPUFREQ_GOV_STOP:
+ return sugov_stop(policy);
+ case CPUFREQ_GOV_LIMITS:
+ return sugov_limits(policy);
+ default:
+ BUG();
+ }
+}
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
+static
+#endif
+struct cpufreq_governor cpufreq_gov_schedutil = {
+ .name = "schedutil",
+ .governor = cpufreq_schedutil_cb,
+ .owner = THIS_MODULE,
+};
+
+static int __init sugov_register(void)
+{
+ return cpufreq_register_governor(&cpufreq_gov_schedutil);
+}
+fs_initcall(sugov_register);
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 981fcd7dc394..1d00cf8c00fa 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -27,6 +27,8 @@
* of the License.
*/
+#include "sched.h"
+
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/sched/rt.h>
@@ -51,6 +53,27 @@ static int convert_prio(int prio)
}
/**
+ * drop_nopreempt_cpus - remove a cpu from the mask if it is likely
+ * non-preemptible
+ * @lowest_mask: mask with selected CPUs (non-NULL)
+ */
+static void
+drop_nopreempt_cpus(struct cpumask *lowest_mask)
+{
+ unsigned int cpu = cpumask_first(lowest_mask);
+
+ while (cpu < nr_cpu_ids) {
+ /* unlocked access */
+ struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr);
+
+ if (task_may_not_preempt(task, cpu))
+ cpumask_clear_cpu(cpu, lowest_mask);
+
+ cpu = cpumask_next(cpu, lowest_mask);
+ }
+}
+
+/**
* cpupri_find - find the best (lowest-pri) CPU in the system
* @cp: The cpupri context
* @p: The task
@@ -70,9 +93,11 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
{
int idx = 0;
int task_pri = convert_prio(p->prio);
+ bool drop_nopreempts = task_pri <= MAX_RT_PRIO;
BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
+retry:
for (idx = 0; idx < task_pri; idx++) {
struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
int skip = 0;
@@ -108,7 +133,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
if (lowest_mask) {
cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
-
+ if (drop_nopreempts)
+ drop_nopreempt_cpus(lowest_mask);
/*
* We have to ensure that we have at least one bit
* still set in the array, since the map could have
@@ -123,7 +149,14 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
return 1;
}
-
+ /*
+ * If we can't find any non-preemptible cpu's, retry so we can
+ * find the lowest priority target and avoid priority inversion.
+ */
+ if (drop_nopreempts) {
+ drop_nopreempts = false;
+ goto retry;
+ }
return 0;
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 3d55ec89c400..a105e97ab6bf 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -755,6 +755,9 @@ static void update_curr_dl(struct rq *rq)
if (unlikely((s64)delta_exec <= 0))
return;
+ /* kick cpufreq (see the comment in kernel/sched/sched.h). */
+ cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_DL);
+
schedstat_set(curr->se.statistics.exec_max,
max(curr->se.statistics.exec_max, delta_exec));
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index c8c4272c61d8..ed8e6bb4531b 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -636,6 +636,32 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
P(se.statistics.nr_wakeups_affine_attempts);
P(se.statistics.nr_wakeups_passive);
P(se.statistics.nr_wakeups_idle);
+ /* eas */
+ /* select_idle_sibling() */
+ P(se.statistics.nr_wakeups_sis_attempts);
+ P(se.statistics.nr_wakeups_sis_idle);
+ P(se.statistics.nr_wakeups_sis_cache_affine);
+ P(se.statistics.nr_wakeups_sis_suff_cap);
+ P(se.statistics.nr_wakeups_sis_idle_cpu);
+ P(se.statistics.nr_wakeups_sis_count);
+ /* select_energy_cpu_brute() */
+ P(se.statistics.nr_wakeups_secb_attempts);
+ P(se.statistics.nr_wakeups_secb_sync);
+ P(se.statistics.nr_wakeups_secb_idle_bt);
+ P(se.statistics.nr_wakeups_secb_insuff_cap);
+ P(se.statistics.nr_wakeups_secb_no_nrg_sav);
+ P(se.statistics.nr_wakeups_secb_nrg_sav);
+ P(se.statistics.nr_wakeups_secb_count);
+ /* find_best_target() */
+ P(se.statistics.nr_wakeups_fbt_attempts);
+ P(se.statistics.nr_wakeups_fbt_no_cpu);
+ P(se.statistics.nr_wakeups_fbt_no_sd);
+ P(se.statistics.nr_wakeups_fbt_pref_idle);
+ P(se.statistics.nr_wakeups_fbt_count);
+ /* cas */
+ /* select_task_rq_fair() */
+ P(se.statistics.nr_wakeups_cas_attempts);
+ P(se.statistics.nr_wakeups_cas_count);
#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
__P(load_avg);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 099a1b93bebf..422438d43d90 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -50,7 +50,6 @@
unsigned int sysctl_sched_latency = 6000000ULL;
unsigned int normalized_sysctl_sched_latency = 6000000ULL;
-unsigned int sysctl_sched_is_big_little = 0;
unsigned int sysctl_sched_sync_hint_enable = 1;
unsigned int sysctl_sched_initial_task_util = 0;
unsigned int sysctl_sched_cstate_aware = 1;
@@ -119,6 +118,12 @@ unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
#endif
+/*
+ * The margin used when comparing utilization with CPU capacity:
+ * util * margin < capacity * 1024
+ */
+unsigned int capacity_margin = 1280; /* ~20% */
+
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
@@ -294,19 +299,59 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
if (!cfs_rq->on_list) {
+ struct rq *rq = rq_of(cfs_rq);
+ int cpu = cpu_of(rq);
/*
* Ensure we either appear before our parent (if already
* enqueued) or force our parent to appear after us when it is
- * enqueued. The fact that we always enqueue bottom-up
- * reduces this to two cases.
+ * enqueued. The fact that we always enqueue bottom-up
+ * reduces this to two cases and a special case for the root
+ * cfs_rq. Furthermore, it also means that we will always reset
+ * tmp_alone_branch either when the branch is connected
+ * to a tree or when we reach the beg of the tree
*/
if (cfs_rq->tg->parent &&
- cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
- list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
- &rq_of(cfs_rq)->leaf_cfs_rq_list);
- } else {
+ cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
+ /*
+ * If parent is already on the list, we add the child
+ * just before. Thanks to circular linked property of
+ * the list, this means to put the child at the tail
+ * of the list that starts by parent.
+ */
+ list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
+ &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
+ /*
+ * The branch is now connected to its tree so we can
+ * reset tmp_alone_branch to the beginning of the
+ * list.
+ */
+ rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
+ } else if (!cfs_rq->tg->parent) {
+ /*
+ * cfs rq without parent should be put
+ * at the tail of the list.
+ */
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
- &rq_of(cfs_rq)->leaf_cfs_rq_list);
+ &rq->leaf_cfs_rq_list);
+ /*
+ * We have reach the beg of a tree so we can reset
+ * tmp_alone_branch to the beginning of the list.
+ */
+ rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
+ } else {
+ /*
+ * The parent has not already been added so we want to
+ * make sure that it will be put after us.
+ * tmp_alone_branch points to the beg of the branch
+ * where we will add parent.
+ */
+ list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
+ rq->tmp_alone_branch);
+ /*
+ * update tmp_alone_branch to points to the new beg
+ * of the branch
+ */
+ rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
}
cfs_rq->on_list = 1;
@@ -664,7 +709,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
}
#ifdef CONFIG_SMP
-static int select_idle_sibling(struct task_struct *p, int cpu);
+static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
static unsigned long task_h_load(struct task_struct *p);
/*
@@ -688,20 +733,115 @@ void init_entity_runnable_average(struct sched_entity *se)
* will definitely be update (after enqueue).
*/
sa->period_contrib = 1023;
- sa->load_avg = scale_load_down(se->load.weight);
+ /*
+ * Tasks are intialized with full load to be seen as heavy tasks until
+ * they get a chance to stabilize to their real load level.
+ * Group entities are intialized with zero load to reflect the fact that
+ * nothing has been attached to the task group yet.
+ */
+ if (entity_is_task(se))
+ sa->load_avg = scale_load_down(se->load.weight);
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
- sa->util_avg = sched_freq() ?
- sysctl_sched_initial_task_util :
- scale_load_down(SCHED_LOAD_SCALE);
- sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
+ /*
+ * In previous Android versions, we used to have:
+ * sa->util_avg = sched_freq() ?
+ * sysctl_sched_initial_task_util :
+ * scale_load_down(SCHED_LOAD_SCALE);
+ * sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
+ * However, that functionality has been moved to enqueue.
+ * It is unclear if we should restore this in enqueue.
+ */
+ /*
+ * At this point, util_avg won't be used in select_task_rq_fair anyway
+ */
+ sa->util_avg = 0;
+ sa->util_sum = 0;
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
}
+static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
+static void attach_entity_cfs_rq(struct sched_entity *se);
+
+/*
+ * With new tasks being created, their initial util_avgs are extrapolated
+ * based on the cfs_rq's current util_avg:
+ *
+ * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
+ *
+ * However, in many cases, the above util_avg does not give a desired
+ * value. Moreover, the sum of the util_avgs may be divergent, such
+ * as when the series is a harmonic series.
+ *
+ * To solve this problem, we also cap the util_avg of successive tasks to
+ * only 1/2 of the left utilization budget:
+ *
+ * util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n
+ *
+ * where n denotes the nth task.
+ *
+ * For example, a simplest series from the beginning would be like:
+ *
+ * task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
+ * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
+ *
+ * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
+ * if util_avg > util_avg_cap.
+ */
+void post_init_entity_util_avg(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ struct sched_avg *sa = &se->avg;
+ long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
+
+ if (cap > 0) {
+ if (cfs_rq->avg.util_avg != 0) {
+ sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
+ sa->util_avg /= (cfs_rq->avg.load_avg + 1);
+
+ if (sa->util_avg > cap)
+ sa->util_avg = cap;
+ } else {
+ sa->util_avg = cap;
+ }
+ /*
+ * If we wish to restore tuning via setting initial util,
+ * this is where we should do it.
+ */
+ sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
+ }
+
+ if (entity_is_task(se)) {
+ struct task_struct *p = task_of(se);
+ if (p->sched_class != &fair_sched_class) {
+ /*
+ * For !fair tasks do:
+ *
+ update_cfs_rq_load_avg(now, cfs_rq, false);
+ attach_entity_load_avg(cfs_rq, se);
+ switched_from_fair(rq, p);
+ *
+ * such that the next switched_to_fair() has the
+ * expected state.
+ */
+ se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);
+ return;
+ }
+ }
+
+ attach_entity_cfs_rq(se);
+}
+
#else
void init_entity_runnable_average(struct sched_entity *se)
{
}
-#endif
+void post_init_entity_util_avg(struct sched_entity *se)
+{
+}
+static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
+{
+}
+#endif /* CONFIG_SMP */
/*
* Update the current task's runtime statistics.
@@ -1425,7 +1565,8 @@ balance:
* Call select_idle_sibling to maybe find a better one.
*/
if (!cur)
- env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
+ env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
+ env->dst_cpu);
assign:
assigned = true;
@@ -2410,28 +2551,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
#ifdef CONFIG_FAIR_GROUP_SCHED
# ifdef CONFIG_SMP
-static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
{
- long tg_weight;
+ long tg_weight, load, shares;
/*
- * Use this CPU's real-time load instead of the last load contribution
- * as the updating of the contribution is delayed, and we will use the
- * the real-time load to calc the share. See update_tg_load_avg().
+ * This really should be: cfs_rq->avg.load_avg, but instead we use
+ * cfs_rq->load.weight, which is its upper bound. This helps ramp up
+ * the shares for small weight interactive tasks.
*/
- tg_weight = atomic_long_read(&tg->load_avg);
- tg_weight -= cfs_rq->tg_load_avg_contrib;
- tg_weight += cfs_rq->load.weight;
+ load = scale_load_down(cfs_rq->load.weight);
- return tg_weight;
-}
-
-static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
-{
- long tg_weight, load, shares;
+ tg_weight = atomic_long_read(&tg->load_avg);
- tg_weight = calc_tg_weight(tg, cfs_rq);
- load = cfs_rq->load.weight;
+ /* Ensure tg_weight >= load */
+ tg_weight -= cfs_rq->tg_load_avg_contrib;
+ tg_weight += load;
shares = (tg->shares * load);
if (tg_weight)
@@ -2450,6 +2585,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
return tg->shares;
}
# endif /* CONFIG_SMP */
+
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
unsigned long weight)
{
@@ -2468,16 +2604,20 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
-static void update_cfs_shares(struct cfs_rq *cfs_rq)
+static void update_cfs_shares(struct sched_entity *se)
{
+ struct cfs_rq *cfs_rq = group_cfs_rq(se);
struct task_group *tg;
- struct sched_entity *se;
long shares;
- tg = cfs_rq->tg;
- se = tg->se[cpu_of(rq_of(cfs_rq))];
- if (!se || throttled_hierarchy(cfs_rq))
+ if (!cfs_rq)
+ return;
+
+ if (throttled_hierarchy(cfs_rq))
return;
+
+ tg = cfs_rq->tg;
+
#ifndef CONFIG_SMP
if (likely(se->load.weight == tg->shares))
return;
@@ -2486,8 +2626,9 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq)
reweight_entity(cfs_rq_of(se), se, shares);
}
+
#else /* CONFIG_FAIR_GROUP_SCHED */
-static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
+static inline void update_cfs_shares(struct sched_entity *se)
{
}
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -3790,25 +3931,262 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
return decayed;
}
-#ifdef CONFIG_FAIR_GROUP_SCHED
/*
- * Updating tg's load_avg is necessary before update_cfs_share (which is done)
- * and effective_load (which is not done because it is too costly).
+ * Signed add and clamp on underflow.
+ *
+ * Explicitly do a load-store to ensure the intermediate value never hits
+ * memory. This allows lockless observations without ever seeing the negative
+ * values.
+ */
+#define add_positive(_ptr, _val) do { \
+ typeof(_ptr) ptr = (_ptr); \
+ typeof(_val) val = (_val); \
+ typeof(*ptr) res, var = READ_ONCE(*ptr); \
+ \
+ res = var + val; \
+ \
+ if (val < 0 && res > var) \
+ res = 0; \
+ \
+ WRITE_ONCE(*ptr, res); \
+} while (0)
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+/**
+ * update_tg_load_avg - update the tg's load avg
+ * @cfs_rq: the cfs_rq whose avg changed
+ * @force: update regardless of how small the difference
+ *
+ * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
+ * However, because tg->load_avg is a global value there are performance
+ * considerations.
+ *
+ * In order to avoid having to look at the other cfs_rq's, we use a
+ * differential update where we store the last value we propagated. This in
+ * turn allows skipping updates if the differential is 'small'.
+ *
+ * Updating tg's load_avg is necessary before update_cfs_share() (which is
+ * done) and effective_load() (which is not done because it is too costly).
*/
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
{
long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
+ /*
+ * No need to update load_avg for root_task_group as it is not used.
+ */
+ if (cfs_rq->tg == &root_task_group)
+ return;
+
if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
atomic_long_add(delta, &cfs_rq->tg->load_avg);
cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
}
}
+/*
+ * Called within set_task_rq() right before setting a task's cpu. The
+ * caller only guarantees p->pi_lock is held; no other assumptions,
+ * including the state of rq->lock, should be made.
+ */
+void set_task_rq_fair(struct sched_entity *se,
+ struct cfs_rq *prev, struct cfs_rq *next)
+{
+ if (!sched_feat(ATTACH_AGE_LOAD))
+ return;
+
+ /*
+ * We are supposed to update the task to "current" time, then its up to
+ * date and ready to go to new CPU/cfs_rq. But we have difficulty in
+ * getting what current time is, so simply throw away the out-of-date
+ * time. This will result in the wakee task is less decayed, but giving
+ * the wakee more load sounds not bad.
+ */
+ if (se->avg.last_update_time && prev) {
+ u64 p_last_update_time;
+ u64 n_last_update_time;
+
+#ifndef CONFIG_64BIT
+ u64 p_last_update_time_copy;
+ u64 n_last_update_time_copy;
+
+ do {
+ p_last_update_time_copy = prev->load_last_update_time_copy;
+ n_last_update_time_copy = next->load_last_update_time_copy;
+
+ smp_rmb();
+
+ p_last_update_time = prev->avg.last_update_time;
+ n_last_update_time = next->avg.last_update_time;
+
+ } while (p_last_update_time != p_last_update_time_copy ||
+ n_last_update_time != n_last_update_time_copy);
+#else
+ p_last_update_time = prev->avg.last_update_time;
+ n_last_update_time = next->avg.last_update_time;
+#endif
+ __update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),
+ &se->avg, 0, 0, NULL);
+ se->avg.last_update_time = n_last_update_time;
+ }
+}
+
+/* Take into account change of utilization of a child task group */
+static inline void
+update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ struct cfs_rq *gcfs_rq = group_cfs_rq(se);
+ long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
+
+ /* Nothing to update */
+ if (!delta)
+ return;
+
+ /* Set new sched_entity's utilization */
+ se->avg.util_avg = gcfs_rq->avg.util_avg;
+ se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX;
+
+ /* Update parent cfs_rq utilization */
+ add_positive(&cfs_rq->avg.util_avg, delta);
+ cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX;
+}
+
+/* Take into account change of load of a child task group */
+static inline void
+update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ struct cfs_rq *gcfs_rq = group_cfs_rq(se);
+ long delta, load = gcfs_rq->avg.load_avg;
+
+ /*
+ * If the load of group cfs_rq is null, the load of the
+ * sched_entity will also be null so we can skip the formula
+ */
+ if (load) {
+ long tg_load;
+
+ /* Get tg's load and ensure tg_load > 0 */
+ tg_load = atomic_long_read(&gcfs_rq->tg->load_avg) + 1;
+
+ /* Ensure tg_load >= load and updated with current load*/
+ tg_load -= gcfs_rq->tg_load_avg_contrib;
+ tg_load += load;
+
+ /*
+ * We need to compute a correction term in the case that the
+ * task group is consuming more CPU than a task of equal
+ * weight. A task with a weight equals to tg->shares will have
+ * a load less or equal to scale_load_down(tg->shares).
+ * Similarly, the sched_entities that represent the task group
+ * at parent level, can't have a load higher than
+ * scale_load_down(tg->shares). And the Sum of sched_entities'
+ * load must be <= scale_load_down(tg->shares).
+ */
+ if (tg_load > scale_load_down(gcfs_rq->tg->shares)) {
+ /* scale gcfs_rq's load into tg's shares*/
+ load *= scale_load_down(gcfs_rq->tg->shares);
+ load /= tg_load;
+ }
+ }
+
+ delta = load - se->avg.load_avg;
+
+ /* Nothing to update */
+ if (!delta)
+ return;
+
+ /* Set new sched_entity's load */
+ se->avg.load_avg = load;
+ se->avg.load_sum = se->avg.load_avg * LOAD_AVG_MAX;
+
+ /* Update parent cfs_rq load */
+ add_positive(&cfs_rq->avg.load_avg, delta);
+ cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * LOAD_AVG_MAX;
+
+ /*
+ * If the sched_entity is already enqueued, we also have to update the
+ * runnable load avg.
+ */
+ if (se->on_rq) {
+ /* Update parent cfs_rq runnable_load_avg */
+ add_positive(&cfs_rq->runnable_load_avg, delta);
+ cfs_rq->runnable_load_sum = cfs_rq->runnable_load_avg * LOAD_AVG_MAX;
+ }
+}
+
+static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq)
+{
+ cfs_rq->propagate_avg = 1;
+}
+
+static inline int test_and_clear_tg_cfs_propagate(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq = group_cfs_rq(se);
+
+ if (!cfs_rq->propagate_avg)
+ return 0;
+
+ cfs_rq->propagate_avg = 0;
+ return 1;
+}
+
+/* Update task and its cfs_rq load average */
+static inline int propagate_entity_load_avg(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq;
+
+ if (entity_is_task(se))
+ return 0;
+
+ if (!test_and_clear_tg_cfs_propagate(se))
+ return 0;
+
+ cfs_rq = cfs_rq_of(se);
+
+ set_tg_cfs_propagate(cfs_rq);
+
+ update_tg_cfs_util(cfs_rq, se);
+ update_tg_cfs_load(cfs_rq, se);
+
+ return 1;
+}
+
#else /* CONFIG_FAIR_GROUP_SCHED */
+
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
+
+static inline int propagate_entity_load_avg(struct sched_entity *se)
+{
+ return 0;
+}
+
+static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
+
#endif /* CONFIG_FAIR_GROUP_SCHED */
+static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
+{
+ if (&this_rq()->cfs == cfs_rq) {
+ /*
+ * There are a few boundary cases this might miss but it should
+ * get called often enough that that should (hopefully) not be
+ * a real problem -- added to that it only calls on the local
+ * CPU, so if we enqueue remotely we'll miss an update, but
+ * the next tick/schedule should update.
+ *
+ * It will not get called when we go idle, because the idle
+ * thread is a different class (!fair), nor will the utilization
+ * number include things like RT tasks.
+ *
+ * As is, the util number is not freq-invariant (we'd have to
+ * implement arch_scale_freq_capacity() for that).
+ *
+ * See cpu_util().
+ */
+ cpufreq_update_util(rq_of(cfs_rq), 0);
+ }
+}
+
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
/*
@@ -3828,23 +4206,43 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
WRITE_ONCE(*ptr, res); \
} while (0)
-/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
-static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+/**
+ * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
+ * @now: current time, as per cfs_rq_clock_task()
+ * @cfs_rq: cfs_rq to update
+ * @update_freq: should we call cfs_rq_util_change() or will the call do so
+ *
+ * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
+ * avg. The immediate corollary is that all (fair) tasks must be attached, see
+ * post_init_entity_util_avg().
+ *
+ * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
+ *
+ * Returns true if the load decayed or we removed load.
+ *
+ * Since both these conditions indicate a changed cfs_rq->avg.load we should
+ * call update_tg_load_avg() when this function returns true.
+ */
+static inline int
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
{
struct sched_avg *sa = &cfs_rq->avg;
- int decayed, removed = 0;
+ int decayed, removed = 0, removed_util = 0;
if (atomic_long_read(&cfs_rq->removed_load_avg)) {
s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
sub_positive(&sa->load_avg, r);
sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
removed = 1;
+ set_tg_cfs_propagate(cfs_rq);
}
if (atomic_long_read(&cfs_rq->removed_util_avg)) {
long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
sub_positive(&sa->util_avg, r);
sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
+ removed_util = 1;
+ set_tg_cfs_propagate(cfs_rq);
}
decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
@@ -3859,68 +4257,89 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
if (cfs_rq == &rq_of(cfs_rq)->cfs)
trace_sched_load_avg_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
+ if (update_freq && (decayed || removed_util))
+ cfs_rq_util_change(cfs_rq);
+
return decayed || removed;
}
+/*
+ * Optional action to be done while updating the load average
+ */
+#define UPDATE_TG 0x1
+#define SKIP_AGE_LOAD 0x2
+
/* Update task and its cfs_rq load average */
-static inline void update_load_avg(struct sched_entity *se, int update_tg)
+static inline void update_load_avg(struct sched_entity *se, int flags)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 now = cfs_rq_clock_task(cfs_rq);
int cpu = cpu_of(rq_of(cfs_rq));
+ int decayed;
+ void *ptr = NULL;
/*
* Track task load average for carrying it to new CPU after migrated, and
* track group sched_entity load average for task_h_load calc in migration
*/
- __update_load_avg(now, cpu, &se->avg,
+ if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) {
+ __update_load_avg(now, cpu, &se->avg,
se->on_rq * scale_load_down(se->load.weight),
cfs_rq->curr == se, NULL);
+ }
- if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
+ decayed = update_cfs_rq_load_avg(now, cfs_rq, true);
+ decayed |= propagate_entity_load_avg(se);
+
+ if (decayed && (flags & UPDATE_TG))
update_tg_load_avg(cfs_rq, 0);
- if (entity_is_task(se))
- trace_sched_load_avg_task(task_of(se), &se->avg);
+ if (entity_is_task(se)) {
+#ifdef CONFIG_SCHED_WALT
+ ptr = (void *)&(task_of(se)->ravg);
+#endif
+ trace_sched_load_avg_task(task_of(se), &se->avg, ptr);
+ }
}
+/**
+ * attach_entity_load_avg - attach this entity to its cfs_rq load avg
+ * @cfs_rq: cfs_rq to attach to
+ * @se: sched_entity to attach
+ *
+ * Must call update_cfs_rq_load_avg() before this, since we rely on
+ * cfs_rq->avg.last_update_time being current.
+ */
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- if (!sched_feat(ATTACH_AGE_LOAD))
- goto skip_aging;
-
- /*
- * If we got migrated (either between CPUs or between cgroups) we'll
- * have aged the average right before clearing @last_update_time.
- */
- if (se->avg.last_update_time) {
- __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
- &se->avg, 0, 0, NULL);
-
- /*
- * XXX: we could have just aged the entire load away if we've been
- * absent from the fair class for too long.
- */
- }
-
-skip_aging:
se->avg.last_update_time = cfs_rq->avg.last_update_time;
cfs_rq->avg.load_avg += se->avg.load_avg;
cfs_rq->avg.load_sum += se->avg.load_sum;
cfs_rq->avg.util_avg += se->avg.util_avg;
cfs_rq->avg.util_sum += se->avg.util_sum;
+ set_tg_cfs_propagate(cfs_rq);
+
+ cfs_rq_util_change(cfs_rq);
}
+/**
+ * detach_entity_load_avg - detach this entity from its cfs_rq load avg
+ * @cfs_rq: cfs_rq to detach from
+ * @se: sched_entity to detach
+ *
+ * Must call update_cfs_rq_load_avg() before this, since we rely on
+ * cfs_rq->avg.last_update_time being current.
+ */
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
- &se->avg, se->on_rq * scale_load_down(se->load.weight),
- cfs_rq->curr == se, NULL);
sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
+ set_tg_cfs_propagate(cfs_rq);
+
+ cfs_rq_util_change(cfs_rq);
}
/* Add the load generated by se into cfs_rq's load average */
@@ -3928,34 +4347,20 @@ static inline void
enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct sched_avg *sa = &se->avg;
- u64 now = cfs_rq_clock_task(cfs_rq);
- int migrated, decayed;
-
- migrated = !sa->last_update_time;
- if (!migrated) {
- __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
- se->on_rq * scale_load_down(se->load.weight),
- cfs_rq->curr == se, NULL);
- }
-
- decayed = update_cfs_rq_load_avg(now, cfs_rq);
cfs_rq->runnable_load_avg += sa->load_avg;
cfs_rq->runnable_load_sum += sa->load_sum;
- if (migrated)
+ if (!sa->last_update_time) {
attach_entity_load_avg(cfs_rq, se);
-
- if (decayed || migrated)
update_tg_load_avg(cfs_rq, 0);
+ }
}
/* Remove the runnable load generated by se from cfs_rq's runnable load average */
static inline void
dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- update_load_avg(se, 1);
-
cfs_rq->runnable_load_avg =
max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
cfs_rq->runnable_load_sum =
@@ -3984,13 +4389,25 @@ static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
#endif
/*
+ * Synchronize entity load avg of dequeued entity without locking
+ * the previous rq.
+ */
+void sync_entity_load_avg(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ u64 last_update_time;
+
+ last_update_time = cfs_rq_last_update_time(cfs_rq);
+ __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
+}
+
+/*
* Task first catches up with cfs_rq, and then subtract
* itself from the cfs_rq (task must be off the queue now).
*/
void remove_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
- u64 last_update_time;
/*
* Newly created task or never used group entity should not be removed
@@ -3999,9 +4416,7 @@ void remove_entity_load_avg(struct sched_entity *se)
if (se->avg.last_update_time == 0)
return;
- last_update_time = cfs_rq_last_update_time(cfs_rq);
-
- __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
+ sync_entity_load_avg(se);
atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
}
@@ -4038,7 +4453,16 @@ static int idle_balance(struct rq *this_rq);
#else /* CONFIG_SMP */
-static inline void update_load_avg(struct sched_entity *se, int update_tg) {}
+static inline int
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
+{
+ return 0;
+}
+
+#define UPDATE_TG 0x0
+#define SKIP_AGE_LOAD 0x0
+
+static inline void update_load_avg(struct sched_entity *se, int not_used1){}
static inline void
enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void
@@ -4187,9 +4611,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
+ update_load_avg(se, UPDATE_TG);
enqueue_entity_load_avg(cfs_rq, se);
+ update_cfs_shares(se);
account_entity_enqueue(cfs_rq, se);
- update_cfs_shares(cfs_rq);
if (flags & ENQUEUE_WAKEUP) {
place_entity(cfs_rq, se, 0);
@@ -4262,6 +4687,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
+
+ /*
+ * When dequeuing a sched_entity, we must:
+ * - Update loads to have both entity and cfs_rq synced with now.
+ * - Substract its load from the cfs_rq->runnable_avg.
+ * - Substract its previous weight from cfs_rq->load.weight.
+ * - For group entity, update its weight to reflect the new share
+ * of its group cfs_rq.
+ */
+ update_load_avg(se, UPDATE_TG);
dequeue_entity_load_avg(cfs_rq, se);
update_stats_dequeue(cfs_rq, se);
@@ -4297,7 +4732,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
return_cfs_rq_runtime(cfs_rq);
update_min_vruntime(cfs_rq);
- update_cfs_shares(cfs_rq);
+ update_cfs_shares(se);
}
/*
@@ -4352,7 +4787,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
*/
update_stats_wait_end(cfs_rq, se);
__dequeue_entity(cfs_rq, se);
- update_load_avg(se, 1);
+ update_load_avg(se, UPDATE_TG);
}
update_stats_curr_start(cfs_rq, se);
@@ -4468,8 +4903,8 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
/*
* Ensure that runnable average is periodically updated.
*/
- update_load_avg(curr, 1);
- update_cfs_shares(cfs_rq);
+ update_load_avg(curr, UPDATE_TG);
+ update_cfs_shares(curr);
#ifdef CONFIG_SCHED_HRTICK
/*
@@ -5373,7 +5808,7 @@ static inline void hrtick_update(struct rq *rq)
#ifdef CONFIG_SMP
static bool cpu_overutilized(int cpu);
-static inline unsigned long boosted_cpu_util(int cpu);
+unsigned long boosted_cpu_util(int cpu);
#else
#define boosted_cpu_util(cpu) cpu_util(cpu)
#endif
@@ -5410,6 +5845,14 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
int task_wakeup = flags & ENQUEUE_WAKEUP;
#endif
+ /*
+ * If in_iowait is set, the code below may not trigger any cpufreq
+ * utilization updates, so do it here explicitly with the IOWAIT flag
+ * passed.
+ */
+ if (p->in_iowait)
+ cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_IOWAIT);
+
for_each_sched_entity(se) {
if (se->on_rq)
break;
@@ -5438,8 +5881,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
- update_load_avg(se, 1);
- update_cfs_shares(cfs_rq);
+ update_load_avg(se, UPDATE_TG);
+ update_cfs_shares(se);
}
if (!se) {
@@ -5544,8 +5987,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
- update_load_avg(se, 1);
- update_cfs_shares(cfs_rq);
+ update_load_avg(se, UPDATE_TG);
+ update_cfs_shares(se);
}
if (!se) {
@@ -6151,15 +6594,7 @@ static int sched_group_energy(struct energy_env *eenv)
*/
sd = rcu_dereference(per_cpu(sd_scs, cpu));
- if (!sd)
- /*
- * We most probably raced with hotplug; returning a
- * wrong energy estimation is better than entering an
- * infinite loop.
- */
- return -EINVAL;
-
- if (sd->parent)
+ if (sd && sd->parent)
sg_shared_cap = sd->parent->groups;
for_each_domain(cpu, sd) {
@@ -6214,6 +6649,14 @@ static int sched_group_energy(struct energy_env *eenv)
} while (sg = sg->next, sg != sd->groups);
}
+
+ /*
+ * If we raced with hotplug and got an sd NULL-pointer;
+ * returning a wrong energy estimation is better than
+ * entering an infinite loop.
+ */
+ if (cpumask_test_cpu(cpu, &visit_cpus))
+ return -EINVAL;
next_cpu:
cpumask_clear_cpu(cpu, &visit_cpus);
continue;
@@ -6240,6 +6683,7 @@ static inline int __energy_diff(struct energy_env *eenv)
struct sched_domain *sd;
struct sched_group *sg;
int sd_cpu = -1, energy_before = 0, energy_after = 0;
+ int diff, margin;
struct energy_env eenv_before = {
.util_delta = 0,
@@ -6282,12 +6726,22 @@ static inline int __energy_diff(struct energy_env *eenv)
eenv->nrg.after = energy_after;
eenv->nrg.diff = eenv->nrg.after - eenv->nrg.before;
eenv->payoff = 0;
-
+#ifndef CONFIG_SCHED_TUNE
trace_sched_energy_diff(eenv->task,
eenv->src_cpu, eenv->dst_cpu, eenv->util_delta,
eenv->nrg.before, eenv->nrg.after, eenv->nrg.diff,
eenv->cap.before, eenv->cap.after, eenv->cap.delta,
eenv->nrg.delta, eenv->payoff);
+#endif
+ /*
+ * Dead-zone margin preventing too many migrations.
+ */
+
+ margin = eenv->nrg.before >> 6; /* ~1.56% */
+
+ diff = eenv->nrg.after - eenv->nrg.before;
+
+ eenv->nrg.diff = (abs(diff) < margin) ? 0 : eenv->nrg.diff;
return eenv->nrg.diff;
}
@@ -6295,30 +6749,37 @@ static inline int __energy_diff(struct energy_env *eenv)
#ifdef CONFIG_SCHED_TUNE
struct target_nrg schedtune_target_nrg;
-
+extern bool schedtune_initialized;
/*
* System energy normalization
- * Returns the normalized value, in the range [0..SCHED_LOAD_SCALE],
+ * Returns the normalized value, in the range [0..SCHED_CAPACITY_SCALE],
* corresponding to the specified energy variation.
*/
static inline int
normalize_energy(int energy_diff)
{
u32 normalized_nrg;
+
+ /* during early setup, we don't know the extents */
+ if (unlikely(!schedtune_initialized))
+ return energy_diff < 0 ? -1 : 1 ;
+
#ifdef CONFIG_SCHED_DEBUG
+ {
int max_delta;
/* Check for boundaries */
max_delta = schedtune_target_nrg.max_power;
max_delta -= schedtune_target_nrg.min_power;
WARN_ON(abs(energy_diff) >= max_delta);
+ }
#endif
/* Do scaling using positive numbers to increase the range */
normalized_nrg = (energy_diff < 0) ? -energy_diff : energy_diff;
/* Scale by energy magnitude */
- normalized_nrg <<= SCHED_LOAD_SHIFT;
+ normalized_nrg <<= SCHED_CAPACITY_SHIFT;
/* Normalize on max energy for target platform */
normalized_nrg = reciprocal_divide(
@@ -6349,6 +6810,12 @@ energy_diff(struct energy_env *eenv)
eenv->cap.delta,
eenv->task);
+ trace_sched_energy_diff(eenv->task,
+ eenv->src_cpu, eenv->dst_cpu, eenv->util_delta,
+ eenv->nrg.before, eenv->nrg.after, eenv->nrg.diff,
+ eenv->cap.before, eenv->cap.after, eenv->cap.delta,
+ eenv->nrg.delta, eenv->payoff);
+
/*
* When SchedTune is enabled, the energy_diff() function will return
* the computed energy payoff value. Since the energy_diff() return
@@ -6388,18 +6855,18 @@ static int wake_wide(struct task_struct *p)
return 1;
}
-static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
+static int wake_affine(struct sched_domain *sd, struct task_struct *p,
+ int prev_cpu, int sync)
{
s64 this_load, load;
s64 this_eff_load, prev_eff_load;
- int idx, this_cpu, prev_cpu;
+ int idx, this_cpu;
struct task_group *tg;
unsigned long weight;
int balanced;
idx = sd->wake_idx;
this_cpu = smp_processor_id();
- prev_cpu = task_cpu(p);
load = source_load(prev_cpu, idx);
this_load = target_load(this_cpu, idx);
@@ -6459,8 +6926,6 @@ static inline unsigned long task_util(struct task_struct *p)
return p->se.avg.util_avg;
}
-unsigned int capacity_margin = 1280; /* ~20% margin */
-
static inline unsigned long boosted_task_util(struct task_struct *task);
static inline bool __task_fits(struct task_struct *p, int cpu, int util)
@@ -6486,11 +6951,6 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
return __task_fits(p, cpu, 0);
}
-static inline bool task_fits_spare(struct task_struct *p, int cpu)
-{
- return __task_fits(p, cpu, cpu_util(cpu));
-}
-
static bool cpu_overutilized(int cpu)
{
return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
@@ -6498,6 +6958,8 @@ static bool cpu_overutilized(int cpu)
#ifdef CONFIG_SCHED_TUNE
+struct reciprocal_value schedtune_spc_rdiv;
+
static long
schedtune_margin(unsigned long signal, long boost)
{
@@ -6508,29 +6970,16 @@ schedtune_margin(unsigned long signal, long boost)
*
* The Boost (B) value is used to compute a Margin (M) which is
* proportional to the complement of the original Signal (S):
- * M = B * (SCHED_LOAD_SCALE - S), if B is positive
- * M = B * S, if B is negative
+ * M = B * (SCHED_CAPACITY_SCALE - S)
* The obtained M could be used by the caller to "boost" S.
*/
if (boost >= 0) {
- margin = SCHED_LOAD_SCALE - signal;
+ margin = SCHED_CAPACITY_SCALE - signal;
margin *= boost;
} else
margin = -signal * boost;
- /*
- * Fast integer division by constant:
- * Constant : (C) = 100
- * Precision : 0.1% (P) = 0.1
- * Reference : C * 100 / P (R) = 100000
- *
- * Thus:
- * Shift bits : ceil(log(R,2)) (S) = 17
- * Mult const : round(2^S/C) (M) = 1311
- *
- *
- */
- margin *= 1311;
- margin >>= 17;
+
+ margin = reciprocal_divide(margin, schedtune_spc_rdiv);
if (boost < 0)
margin *= -1;
@@ -6580,7 +7029,7 @@ schedtune_task_margin(struct task_struct *task)
#endif /* CONFIG_SCHED_TUNE */
-static inline unsigned long
+unsigned long
boosted_cpu_util(int cpu)
{
unsigned long util = cpu_util(cpu);
@@ -6602,6 +7051,13 @@ boosted_task_util(struct task_struct *task)
return util + margin;
}
+static int cpu_util_wake(int cpu, struct task_struct *p);
+
+static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
+{
+ return capacity_orig_of(cpu) - cpu_util_wake(cpu, p);
+}
+
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
@@ -6611,10 +7067,9 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int sd_flag)
{
struct sched_group *idlest = NULL, *group = sd->groups;
- struct sched_group *fit_group = NULL, *spare_group = NULL;
+ struct sched_group *most_spare_sg = NULL;
unsigned long min_load = ULONG_MAX, this_load = 0;
- unsigned long fit_capacity = ULONG_MAX;
- unsigned long max_spare_capacity = capacity_margin - SCHED_LOAD_SCALE;
+ unsigned long most_spare = 0, this_spare = 0;
int load_idx = sd->forkexec_idx;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
@@ -6622,7 +7077,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
load_idx = sd->wake_idx;
do {
- unsigned long load, avg_load, spare_capacity;
+ unsigned long load, avg_load, spare_cap, max_spare_cap;
int local_group;
int i;
@@ -6634,8 +7089,12 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
local_group = cpumask_test_cpu(this_cpu,
sched_group_cpus(group));
- /* Tally up the load of all CPUs in the group */
+ /*
+ * Tally up the load of all CPUs in the group and find
+ * the group containing the CPU with most spare capacity.
+ */
avg_load = 0;
+ max_spare_cap = 0;
for_each_cpu(i, sched_group_cpus(group)) {
/* Bias balancing toward cpus of our domain */
@@ -6646,24 +7105,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
avg_load += load;
- /*
- * Look for most energy-efficient group that can fit
- * that can fit the task.
- */
- if (capacity_of(i) < fit_capacity && task_fits_spare(p, i)) {
- fit_capacity = capacity_of(i);
- fit_group = group;
- }
+ spare_cap = capacity_spare_wake(i, p);
- /*
- * Look for group which has most spare capacity on a
- * single cpu.
- */
- spare_capacity = capacity_of(i) - cpu_util(i);
- if (spare_capacity > max_spare_capacity) {
- max_spare_capacity = spare_capacity;
- spare_group = group;
- }
+ if (spare_cap > max_spare_cap)
+ max_spare_cap = spare_cap;
}
/* Adjust by relative CPU capacity of the group */
@@ -6671,17 +7116,32 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
if (local_group) {
this_load = avg_load;
- } else if (avg_load < min_load) {
- min_load = avg_load;
- idlest = group;
+ this_spare = max_spare_cap;
+ } else {
+ if (avg_load < min_load) {
+ min_load = avg_load;
+ idlest = group;
+ }
+
+ if (most_spare < max_spare_cap) {
+ most_spare = max_spare_cap;
+ most_spare_sg = group;
+ }
}
} while (group = group->next, group != sd->groups);
- if (fit_group)
- return fit_group;
-
- if (spare_group)
- return spare_group;
+ /*
+ * The cross-over point between using spare capacity or least load
+ * is too conservative for high utilization tasks on partially
+ * utilized systems if we require spare_capacity > task_util(p),
+ * so we allow for some task stuffing by using
+ * spare_capacity > task_util(p)/2.
+ */
+ if (this_spare > task_util(p) / 2 &&
+ imbalance*this_spare > 100*most_spare)
+ return NULL;
+ else if (most_spare > task_util(p) / 2)
+ return most_spare_sg;
if (!idlest || 100*this_load < imbalance*min_load)
return NULL;
@@ -6701,9 +7161,13 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
int shallowest_idle_cpu = -1;
int i;
+ /* Check if we have any choice: */
+ if (group->group_weight == 1)
+ return cpumask_first(sched_group_cpus(group));
+
/* Traverse only the allowed CPUs */
for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
- if (task_fits_spare(p, i)) {
+ if (idle_cpu(i)) {
struct rq *rq = cpu_rq(i);
struct cpuidle_state *idle = idle_get_state(rq);
if (idle && idle->exit_latency < min_exit_latency) {
@@ -6715,8 +7179,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
min_exit_latency = idle->exit_latency;
latest_idle_timestamp = rq->idle_stamp;
shallowest_idle_cpu = i;
- } else if (idle_cpu(i) &&
- (!idle || idle->exit_latency == min_exit_latency) &&
+ } else if ((!idle || idle->exit_latency == min_exit_latency) &&
rq->idle_stamp > latest_idle_timestamp) {
/*
* If equal or no active idle state, then
@@ -6725,13 +7188,6 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
*/
latest_idle_timestamp = rq->idle_stamp;
shallowest_idle_cpu = i;
- } else if (shallowest_idle_cpu == -1) {
- /*
- * If we haven't found an idle CPU yet
- * pick a non-idle one that can fit the task as
- * fallback.
- */
- shallowest_idle_cpu = i;
}
} else if (shallowest_idle_cpu == -1) {
load = weighted_cpuload(i);
@@ -6748,24 +7204,32 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
/*
* Try and locate an idle CPU in the sched_domain.
*/
-static int select_idle_sibling(struct task_struct *p, int target)
+static int select_idle_sibling(struct task_struct *p, int prev, int target)
{
struct sched_domain *sd;
struct sched_group *sg;
- int i = task_cpu(p);
- int best_idle = -1;
- int best_idle_cstate = -1;
- int best_idle_capacity = INT_MAX;
+ int best_idle_cpu = -1;
+ int best_idle_cstate = INT_MAX;
+ unsigned long best_idle_capacity = ULONG_MAX;
+
+ schedstat_inc(p, se.statistics.nr_wakeups_sis_attempts);
+ schedstat_inc(this_rq(), eas_stats.sis_attempts);
if (!sysctl_sched_cstate_aware) {
- if (idle_cpu(target))
+ if (idle_cpu(target)) {
+ schedstat_inc(p, se.statistics.nr_wakeups_sis_idle);
+ schedstat_inc(this_rq(), eas_stats.sis_idle);
return target;
+ }
/*
* If the prevous cpu is cache affine and idle, don't be stupid.
*/
- if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
- return i;
+ if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev)) {
+ schedstat_inc(p, se.statistics.nr_wakeups_sis_cache_affine);
+ schedstat_inc(this_rq(), eas_stats.sis_cache_affine);
+ return prev;
+ }
}
if (!(current->flags & PF_WAKE_UP_IDLE) &&
@@ -6779,24 +7243,30 @@ static int select_idle_sibling(struct task_struct *p, int target)
for_each_lower_domain(sd) {
sg = sd->groups;
do {
+ int i;
if (!cpumask_intersects(sched_group_cpus(sg),
tsk_cpus_allowed(p)))
goto next;
if (sysctl_sched_cstate_aware) {
for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg)) {
- struct rq *rq = cpu_rq(i);
- int idle_idx = idle_get_state_idx(rq);
+ int idle_idx = idle_get_state_idx(cpu_rq(i));
unsigned long new_usage = boosted_task_util(p);
unsigned long capacity_orig = capacity_orig_of(i);
+
if (new_usage > capacity_orig || !idle_cpu(i))
goto next;
- if (i == target && new_usage <= capacity_curr_of(target))
+ if (i == target && new_usage <= capacity_curr_of(target)) {
+ schedstat_inc(p, se.statistics.nr_wakeups_sis_suff_cap);
+ schedstat_inc(this_rq(), eas_stats.sis_suff_cap);
+ schedstat_inc(sd, eas_stats.sis_suff_cap);
return target;
+ }
- if (best_idle < 0 || (idle_idx < best_idle_cstate && capacity_orig <= best_idle_capacity)) {
- best_idle = i;
+ if (idle_idx < best_idle_cstate &&
+ capacity_orig <= best_idle_capacity) {
+ best_idle_cpu = i;
best_idle_cstate = idle_idx;
best_idle_capacity = capacity_orig;
}
@@ -6809,231 +7279,283 @@ static int select_idle_sibling(struct task_struct *p, int target)
target = cpumask_first_and(sched_group_cpus(sg),
tsk_cpus_allowed(p));
+ schedstat_inc(p, se.statistics.nr_wakeups_sis_idle_cpu);
+ schedstat_inc(this_rq(), eas_stats.sis_idle_cpu);
+ schedstat_inc(sd, eas_stats.sis_idle_cpu);
goto done;
}
next:
sg = sg->next;
} while (sg != sd->groups);
}
- if (best_idle > 0)
- target = best_idle;
+
+ if (best_idle_cpu >= 0)
+ target = best_idle_cpu;
done:
+ schedstat_inc(p, se.statistics.nr_wakeups_sis_count);
+ schedstat_inc(this_rq(), eas_stats.sis_count);
+
return target;
}
-static inline int find_best_target(struct task_struct *p, bool boosted, bool prefer_idle)
+/*
+ * cpu_util_wake: Compute cpu utilization with any contributions from
+ * the waking task p removed.
+ */
+static int cpu_util_wake(int cpu, struct task_struct *p)
{
- int iter_cpu;
- int target_cpu = -1;
- int target_util = 0;
- int backup_capacity = 0;
- int best_idle_cpu = -1;
- int best_idle_cstate = INT_MAX;
- int backup_cpu = -1;
- unsigned long task_util_boosted, new_util;
-
- task_util_boosted = boosted_task_util(p);
- for (iter_cpu = 0; iter_cpu < NR_CPUS; iter_cpu++) {
- int cur_capacity;
- struct rq *rq;
- int idle_idx;
-
- /*
- * Iterate from higher cpus for boosted tasks.
- */
- int i = boosted ? NR_CPUS-iter_cpu-1 : iter_cpu;
-
- if (!cpu_online(i) || !cpumask_test_cpu(i, tsk_cpus_allowed(p)))
- continue;
+ unsigned long util, capacity;
- /*
- * p's blocked utilization is still accounted for on prev_cpu
- * so prev_cpu will receive a negative bias due to the double
- * accounting. However, the blocked utilization may be zero.
- */
- new_util = cpu_util(i) + task_util_boosted;
-
- /*
- * Ensure minimum capacity to grant the required boost.
- * The target CPU can be already at a capacity level higher
- * than the one required to boost the task.
- */
- if (new_util > capacity_orig_of(i))
- continue;
+#ifdef CONFIG_SCHED_WALT
+ /*
+ * WALT does not decay idle tasks in the same manner
+ * as PELT, so it makes little sense to subtract task
+ * utilization from cpu utilization. Instead just use
+ * cpu_util for this case.
+ */
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ return cpu_util(cpu);
+#endif
+ /* Task has no contribution or is new */
+ if (cpu != task_cpu(p) || !p->se.avg.last_update_time)
+ return cpu_util(cpu);
- /*
- * Unconditionally favoring tasks that prefer idle cpus to
- * improve latency.
- */
- if (idle_cpu(i) && prefer_idle) {
- if (best_idle_cpu < 0)
- best_idle_cpu = i;
- continue;
- }
+ capacity = capacity_orig_of(cpu);
+ util = max_t(long, cpu_util(cpu) - task_util(p), 0);
- cur_capacity = capacity_curr_of(i);
- rq = cpu_rq(i);
- idle_idx = idle_get_state_idx(rq);
+ return (util >= capacity) ? capacity : util;
+}
- if (new_util < cur_capacity) {
- if (cpu_rq(i)->nr_running) {
- if (prefer_idle) {
- /* Find a target cpu with highest
- * utilization.
- */
- if (target_util == 0 ||
- target_util < new_util) {
- target_cpu = i;
- target_util = new_util;
- }
- } else {
- /* Find a target cpu with lowest
- * utilization.
- */
- if (target_util == 0 ||
- target_util > new_util) {
- target_cpu = i;
- target_util = new_util;
- }
- }
- } else if (!prefer_idle) {
- if (best_idle_cpu < 0 ||
- (sysctl_sched_cstate_aware &&
- best_idle_cstate > idle_idx)) {
- best_idle_cstate = idle_idx;
- best_idle_cpu = i;
- }
- }
- } else if (backup_capacity == 0 ||
- backup_capacity > cur_capacity) {
- // Find a backup cpu with least capacity.
- backup_capacity = cur_capacity;
- backup_cpu = i;
- }
- }
+static int start_cpu(bool boosted)
+{
+ struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
- if (prefer_idle && best_idle_cpu >= 0)
- target_cpu = best_idle_cpu;
- else if (target_cpu < 0)
- target_cpu = best_idle_cpu >= 0 ? best_idle_cpu : backup_cpu;
+ RCU_LOCKDEP_WARN(rcu_read_lock_sched_held(),
+ "sched RCU must be held");
- return target_cpu;
+ return boosted ? rd->max_cap_orig_cpu : rd->min_cap_orig_cpu;
}
-static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
+static inline int find_best_target(struct task_struct *p, bool boosted, bool prefer_idle)
{
+ int target_cpu = -1;
+ unsigned long target_util = prefer_idle ? ULONG_MAX : 0;
+ unsigned long backup_capacity = ULONG_MAX;
+ int best_idle_cpu = -1;
+ int best_idle_cstate = INT_MAX;
+ int backup_cpu = -1;
+ unsigned long min_util = boosted_task_util(p);
struct sched_domain *sd;
- struct sched_group *sg, *sg_target;
- int target_max_cap = INT_MAX;
- int target_cpu = task_cpu(p);
- unsigned long task_util_boosted, new_util;
- int i;
+ struct sched_group *sg;
+ int cpu = start_cpu(boosted);
- if (sysctl_sched_sync_hint_enable && sync) {
- int cpu = smp_processor_id();
- cpumask_t search_cpus;
- cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
- if (cpumask_test_cpu(cpu, &search_cpus))
- return cpu;
+ schedstat_inc(p, se.statistics.nr_wakeups_fbt_attempts);
+ schedstat_inc(this_rq(), eas_stats.fbt_attempts);
+
+ if (cpu < 0) {
+ schedstat_inc(p, se.statistics.nr_wakeups_fbt_no_cpu);
+ schedstat_inc(this_rq(), eas_stats.fbt_no_cpu);
+ return target_cpu;
}
- sd = rcu_dereference(per_cpu(sd_ea, task_cpu(p)));
+ sd = rcu_dereference(per_cpu(sd_ea, cpu));
- if (!sd)
- return target;
+ if (!sd) {
+ schedstat_inc(p, se.statistics.nr_wakeups_fbt_no_sd);
+ schedstat_inc(this_rq(), eas_stats.fbt_no_sd);
+ return target_cpu;
+ }
sg = sd->groups;
- sg_target = sg;
- if (sysctl_sched_is_big_little) {
+ do {
+ int i;
- /*
- * Find group with sufficient capacity. We only get here if no cpu is
- * overutilized. We may end up overutilizing a cpu by adding the task,
- * but that should not be any worse than select_idle_sibling().
- * load_balance() should sort it out later as we get above the tipping
- * point.
- */
- do {
- /* Assuming all cpus are the same in group */
- int max_cap_cpu = group_first_cpu(sg);
+ for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg)) {
+ unsigned long cur_capacity, new_util, wake_util;
+ unsigned long min_wake_util = ULONG_MAX;
- /*
- * Assume smaller max capacity means more energy-efficient.
- * Ideally we should query the energy model for the right
- * answer but it easily ends up in an exhaustive search.
- */
- if (capacity_of(max_cap_cpu) < target_max_cap &&
- task_fits_max(p, max_cap_cpu)) {
- sg_target = sg;
- target_max_cap = capacity_of(max_cap_cpu);
- }
- } while (sg = sg->next, sg != sd->groups);
+ if (!cpu_online(i))
+ continue;
- task_util_boosted = boosted_task_util(p);
- /* Find cpu with sufficient capacity */
- for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg_target)) {
/*
* p's blocked utilization is still accounted for on prev_cpu
* so prev_cpu will receive a negative bias due to the double
* accounting. However, the blocked utilization may be zero.
*/
- new_util = cpu_util(i) + task_util_boosted;
+ wake_util = cpu_util_wake(i, p);
+ new_util = wake_util + task_util(p);
/*
* Ensure minimum capacity to grant the required boost.
* The target CPU can be already at a capacity level higher
* than the one required to boost the task.
*/
+ new_util = max(min_util, new_util);
+
if (new_util > capacity_orig_of(i))
continue;
- if (new_util < capacity_curr_of(i)) {
- target_cpu = i;
- if (cpu_rq(i)->nr_running)
- break;
+ /*
+ * Unconditionally favoring tasks that prefer idle cpus to
+ * improve latency.
+ */
+ if (idle_cpu(i) && prefer_idle) {
+ schedstat_inc(p, se.statistics.nr_wakeups_fbt_pref_idle);
+ schedstat_inc(this_rq(), eas_stats.fbt_pref_idle);
+ return i;
+ }
+
+ cur_capacity = capacity_curr_of(i);
+
+ if (new_util < cur_capacity) {
+ if (cpu_rq(i)->nr_running) {
+ /*
+ * Find a target cpu with the lowest/highest
+ * utilization if prefer_idle/!prefer_idle.
+ */
+ if (prefer_idle) {
+ /* Favor the CPU that last ran the task */
+ if (new_util > target_util ||
+ wake_util > min_wake_util)
+ continue;
+ min_wake_util = wake_util;
+ target_util = new_util;
+ target_cpu = i;
+ } else if (target_util < new_util) {
+ target_util = new_util;
+ target_cpu = i;
+ }
+ } else if (!prefer_idle) {
+ int idle_idx = idle_get_state_idx(cpu_rq(i));
+
+ if (best_idle_cpu < 0 ||
+ (sysctl_sched_cstate_aware &&
+ best_idle_cstate > idle_idx)) {
+ best_idle_cstate = idle_idx;
+ best_idle_cpu = i;
+ }
+ }
+ } else if (backup_capacity > cur_capacity) {
+ /* Find a backup cpu with least capacity. */
+ backup_capacity = cur_capacity;
+ backup_cpu = i;
}
+ }
+ } while (sg = sg->next, sg != sd->groups);
+
+ if (target_cpu < 0)
+ target_cpu = best_idle_cpu >= 0 ? best_idle_cpu : backup_cpu;
+
+ if (target_cpu >= 0) {
+ schedstat_inc(p, se.statistics.nr_wakeups_fbt_count);
+ schedstat_inc(this_rq(), eas_stats.fbt_count);
+ }
+
+ return target_cpu;
+}
+
+/*
+ * Disable WAKE_AFFINE in the case where task @p doesn't fit in the
+ * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
+ *
+ * In that case WAKE_AFFINE doesn't make sense and we'll let
+ * BALANCE_WAKE sort things out.
+ */
+static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
+{
+ long min_cap, max_cap;
- /* cpu has capacity at higher OPP, keep it as fallback */
- if (target_cpu == task_cpu(p))
- target_cpu = i;
+ min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
+ max_cap = cpu_rq(cpu)->rd->max_cpu_capacity.val;
+
+ /* Minimum capacity is close to max, no need to abort wake_affine */
+ if (max_cap - min_cap < max_cap >> 3)
+ return 0;
+
+ /* Bring task utilization in sync with prev_cpu */
+ sync_entity_load_avg(&p->se);
+
+ return min_cap * 1024 < task_util(p) * capacity_margin;
+}
+
+static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
+{
+ struct sched_domain *sd;
+ int target_cpu = prev_cpu, tmp_target;
+ bool boosted, prefer_idle;
+
+ schedstat_inc(p, se.statistics.nr_wakeups_secb_attempts);
+ schedstat_inc(this_rq(), eas_stats.secb_attempts);
+
+ if (sysctl_sched_sync_hint_enable && sync) {
+ int cpu = smp_processor_id();
+
+ if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
+ schedstat_inc(p, se.statistics.nr_wakeups_secb_sync);
+ schedstat_inc(this_rq(), eas_stats.secb_sync);
+ return cpu;
}
- } else {
- /*
- * Find a cpu with sufficient capacity
- */
+ }
+
+ rcu_read_lock();
#ifdef CONFIG_CGROUP_SCHEDTUNE
- bool boosted = schedtune_task_boost(p) > 0;
- bool prefer_idle = schedtune_prefer_idle(p) > 0;
+ boosted = schedtune_task_boost(p) > 0;
+ prefer_idle = schedtune_prefer_idle(p) > 0;
#else
- bool boosted = 0;
- bool prefer_idle = 0;
+ boosted = get_sysctl_sched_cfs_boost() > 0;
+ prefer_idle = 0;
#endif
- int tmp_target = find_best_target(p, boosted, prefer_idle);
- if (tmp_target >= 0) {
- target_cpu = tmp_target;
- if ((boosted || prefer_idle) && idle_cpu(target_cpu))
- return target_cpu;
+
+ sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
+ /* Find a cpu with sufficient capacity */
+ tmp_target = find_best_target(p, boosted, prefer_idle);
+
+ if (!sd)
+ goto unlock;
+ if (tmp_target >= 0) {
+ target_cpu = tmp_target;
+ if ((boosted || prefer_idle) && idle_cpu(target_cpu)) {
+ schedstat_inc(p, se.statistics.nr_wakeups_secb_idle_bt);
+ schedstat_inc(this_rq(), eas_stats.secb_idle_bt);
+ goto unlock;
}
}
- if (target_cpu != task_cpu(p)) {
+ if (target_cpu != prev_cpu) {
struct energy_env eenv = {
- .util_delta = task_util(p),
- .src_cpu = task_cpu(p),
- .dst_cpu = target_cpu,
- .task = p,
+ .util_delta = task_util(p),
+ .src_cpu = prev_cpu,
+ .dst_cpu = target_cpu,
+ .task = p,
};
/* Not enough spare capacity on previous cpu */
- if (cpu_overutilized(task_cpu(p)))
- return target_cpu;
+ if (cpu_overutilized(prev_cpu)) {
+ schedstat_inc(p, se.statistics.nr_wakeups_secb_insuff_cap);
+ schedstat_inc(this_rq(), eas_stats.secb_insuff_cap);
+ goto unlock;
+ }
+
+ if (energy_diff(&eenv) >= 0) {
+ schedstat_inc(p, se.statistics.nr_wakeups_secb_no_nrg_sav);
+ schedstat_inc(this_rq(), eas_stats.secb_no_nrg_sav);
+ target_cpu = prev_cpu;
+ goto unlock;
+ }
- if (energy_diff(&eenv) >= 0)
- return task_cpu(p);
+ schedstat_inc(p, se.statistics.nr_wakeups_secb_nrg_sav);
+ schedstat_inc(this_rq(), eas_stats.secb_nrg_sav);
+ goto unlock;
}
+ schedstat_inc(p, se.statistics.nr_wakeups_secb_count);
+ schedstat_inc(this_rq(), eas_stats.secb_count);
+
+unlock:
+ rcu_read_unlock();
+
return target_cpu;
}
@@ -7062,10 +7584,19 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
return select_best_cpu(p, prev_cpu, 0, sync);
#endif
- if (sd_flag & SD_BALANCE_WAKE)
- want_affine = (!wake_wide(p) && task_fits_max(p, cpu) &&
- cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) ||
- energy_aware();
+ if (sd_flag & SD_BALANCE_WAKE) {
+ /*
+ * do wake_cap unconditionally as it causes task and cpu
+ * utilization to be synced, and we need that for energy
+ * aware wakeups
+ */
+ int _wake_cap = wake_cap(p, cpu, prev_cpu);
+ want_affine = !wake_wide(p) && !_wake_cap
+ && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
+ }
+
+ if (energy_aware() && !(cpu_rq(prev_cpu)->rd->overutilized))
+ return select_energy_cpu_brute(p, prev_cpu, sync);
rcu_read_lock();
for_each_domain(cpu, tmp) {
@@ -7090,49 +7621,65 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if (affine_sd) {
sd = NULL; /* Prefer wake_affine over balance flags */
- if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
+ if (cpu != prev_cpu && wake_affine(affine_sd, p, prev_cpu, sync))
new_cpu = cpu;
}
if (!sd) {
- if (energy_aware() && !cpu_rq(cpu)->rd->overutilized)
- new_cpu = energy_aware_wake_cpu(p, prev_cpu, sync);
- else if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
- new_cpu = select_idle_sibling(p, new_cpu);
+ if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
+ new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
- } else while (sd) {
- struct sched_group *group;
- int weight;
+ } else {
+ int wu = sd_flag & SD_BALANCE_WAKE;
+ int cas_cpu = -1;
- if (!(sd->flags & sd_flag)) {
- sd = sd->child;
- continue;
+ if (wu) {
+ schedstat_inc(p, se.statistics.nr_wakeups_cas_attempts);
+ schedstat_inc(this_rq(), eas_stats.cas_attempts);
}
- group = find_idlest_group(sd, p, cpu, sd_flag);
- if (!group) {
- sd = sd->child;
- continue;
- }
+ while (sd) {
+ struct sched_group *group;
+ int weight;
- new_cpu = find_idlest_cpu(group, p, cpu);
- if (new_cpu == -1 || new_cpu == cpu) {
- /* Now try balancing at a lower domain level of cpu */
- sd = sd->child;
- continue;
+ if (wu)
+ schedstat_inc(sd, eas_stats.cas_attempts);
+
+ if (!(sd->flags & sd_flag)) {
+ sd = sd->child;
+ continue;
+ }
+
+ group = find_idlest_group(sd, p, cpu, sd_flag);
+ if (!group) {
+ sd = sd->child;
+ continue;
+ }
+
+ new_cpu = find_idlest_cpu(group, p, cpu);
+ if (new_cpu == -1 || new_cpu == cpu) {
+ /* Now try balancing at a lower domain level of cpu */
+ sd = sd->child;
+ continue;
+ }
+
+ /* Now try balancing at a lower domain level of new_cpu */
+ cpu = cas_cpu = new_cpu;
+ weight = sd->span_weight;
+ sd = NULL;
+ for_each_domain(cpu, tmp) {
+ if (weight <= tmp->span_weight)
+ break;
+ if (tmp->flags & sd_flag)
+ sd = tmp;
+ }
+ /* while loop will break here if sd == NULL */
}
- /* Now try balancing at a lower domain level of new_cpu */
- cpu = new_cpu;
- weight = sd->span_weight;
- sd = NULL;
- for_each_domain(cpu, tmp) {
- if (weight <= tmp->span_weight)
- break;
- if (tmp->flags & sd_flag)
- sd = tmp;
+ if (wu && (cas_cpu >= 0)) {
+ schedstat_inc(p, se.statistics.nr_wakeups_cas_count);
+ schedstat_inc(this_rq(), eas_stats.cas_count);
}
- /* while loop will break here if sd == NULL */
}
rcu_read_unlock();
@@ -8135,8 +8682,13 @@ static void update_blocked_averages(int cpu)
if (throttled_hierarchy(cfs_rq))
continue;
- if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
+ if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq,
+ true))
update_tg_load_avg(cfs_rq, 0);
+
+ /* Propagate pending load changes to the parent */
+ if (cfs_rq->tg->se[cpu])
+ update_load_avg(cfs_rq->tg->se[cpu], 0);
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -8196,7 +8748,7 @@ static inline void update_blocked_averages(int cpu)
raw_spin_lock_irqsave(&rq->lock, flags);
update_rq_clock(rq);
- update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+ update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -8434,13 +8986,14 @@ skip_unlock: __attribute__ ((unused));
cpu_rq(cpu)->cpu_capacity = capacity;
sdg->sgc->capacity = capacity;
sdg->sgc->max_capacity = capacity;
+ sdg->sgc->min_capacity = capacity;
}
void update_group_capacity(struct sched_domain *sd, int cpu)
{
struct sched_domain *child = sd->child;
struct sched_group *group, *sdg = sd->groups;
- unsigned long capacity, max_capacity;
+ unsigned long capacity, max_capacity, min_capacity;
unsigned long interval;
interval = msecs_to_jiffies(sd->balance_interval);
@@ -8454,6 +9007,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
capacity = 0;
max_capacity = 0;
+ min_capacity = ULONG_MAX;
if (child->flags & SD_OVERLAP) {
/*
@@ -8486,6 +9040,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
}
max_capacity = max(capacity, max_capacity);
+ min_capacity = min(capacity, min_capacity);
}
} else {
/*
@@ -8503,6 +9058,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
if (!cpu_isolated(cpumask_first(cpus))) {
capacity += sgc->capacity;
max_capacity = max(sgc->max_capacity, max_capacity);
+ min_capacity = min(sgc->min_capacity, min_capacity);
}
group = group->next;
} while (group != child->groups);
@@ -8510,6 +9066,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
sdg->sgc->capacity = capacity;
sdg->sgc->max_capacity = max_capacity;
+ sdg->sgc->min_capacity = min_capacity;
}
/*
@@ -8791,15 +9348,21 @@ static bool update_sd_pick_busiest(struct lb_env *env,
if (sgs->avg_load <= busiest->avg_load)
return false;
+ if (!(env->sd->flags & SD_ASYM_CPUCAPACITY))
+ goto asym_packing;
+
/*
- * Candiate sg has no more than one task per cpu and has higher
- * per-cpu capacity. No reason to pull tasks to less capable cpus.
+ * Candidate sg has no more than one task per CPU and
+ * has higher per-CPU capacity. Migrating tasks to less
+ * capable CPUs may harm throughput. Maximize throughput,
+ * power/energy consequences are not considered.
*/
if (sgs->sum_nr_running <= sgs->group_weight &&
group_smaller_cpu_capacity(sds->local, sg))
return false;
}
+asym_packing:
/* This is the busiest node in its class. */
if (!(env->sd->flags & SD_ASYM_PACKING))
return true;
@@ -8850,6 +9413,9 @@ static inline enum fbq_type fbq_classify_rq(struct rq *rq)
}
#endif /* CONFIG_NUMA_BALANCING */
+#define lb_sd_parent(sd) \
+ (sd->parent && sd->parent->groups != sd->parent->groups->next)
+
/**
* update_sd_lb_stats - Update sched_domain's statistics for load balancing.
* @env: The load balancing environment.
@@ -8935,7 +9501,7 @@ next_group:
env->src_grp_nr_running = sds->busiest_stat.sum_nr_running;
- if (!env->sd->parent) {
+ if (!lb_sd_parent(env->sd)) {
/* update overload indicator if we are at root domain */
if (env->dst_rq->rd->overload != overload)
env->dst_rq->rd->overload = overload;
@@ -9524,7 +10090,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
int *continue_balancing)
{
int ld_moved = 0, cur_ld_moved, active_balance = 0;
- struct sched_domain *sd_parent = sd->parent;
+ struct sched_domain *sd_parent = lb_sd_parent(sd) ? sd->parent : NULL;
struct sched_group *group = NULL;
struct rq *busiest = NULL;
unsigned long flags;
@@ -10808,6 +11374,61 @@ static inline bool vruntime_normalized(struct task_struct *p)
return false;
}
+#ifdef CONFIG_FAIR_GROUP_SCHED
+/*
+ * Propagate the changes of the sched_entity across the tg tree to make it
+ * visible to the root
+ */
+static void propagate_entity_cfs_rq(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq;
+
+ /* Start to propagate at parent */
+ se = se->parent;
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+
+ if (cfs_rq_throttled(cfs_rq))
+ break;
+
+ update_load_avg(se, UPDATE_TG);
+ }
+}
+#else
+static void propagate_entity_cfs_rq(struct sched_entity *se) { }
+#endif
+
+static void detach_entity_cfs_rq(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ /* Catch up with the cfs_rq and remove our load when we leave */
+ update_load_avg(se, 0);
+ detach_entity_load_avg(cfs_rq, se);
+ update_tg_load_avg(cfs_rq, false);
+ propagate_entity_cfs_rq(se);
+}
+
+static void attach_entity_cfs_rq(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ /*
+ * Since the real-depth could have been changed (only FAIR
+ * class maintain depth value), reset depth properly.
+ */
+ se->depth = se->parent ? se->parent->depth + 1 : 0;
+#endif
+
+ /* Synchronize entity with its cfs_rq */
+ update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
+ attach_entity_load_avg(cfs_rq, se);
+ update_tg_load_avg(cfs_rq, false);
+ propagate_entity_cfs_rq(se);
+}
+
static void detach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
@@ -10822,8 +11443,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
se->vruntime -= cfs_rq->min_vruntime;
}
- /* Catch up with the cfs_rq and remove our load when we leave */
- detach_entity_load_avg(cfs_rq, se);
+ detach_entity_cfs_rq(se);
}
static void attach_task_cfs_rq(struct task_struct *p)
@@ -10831,16 +11451,7 @@ static void attach_task_cfs_rq(struct task_struct *p)
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
-#ifdef CONFIG_FAIR_GROUP_SCHED
- /*
- * Since the real-depth could have been changed (only FAIR
- * class maintain depth value), reset depth properly.
- */
- se->depth = se->parent ? se->parent->depth + 1 : 0;
-#endif
-
- /* Synchronize task with its cfs_rq */
- attach_entity_load_avg(cfs_rq, se);
+ attach_entity_cfs_rq(se);
if (!vruntime_normalized(p))
se->vruntime += cfs_rq->min_vruntime;
@@ -10894,6 +11505,9 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
#ifdef CONFIG_SMP
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ cfs_rq->propagate_avg = 0;
+#endif
atomic_long_set(&cfs_rq->removed_load_avg, 0);
atomic_long_set(&cfs_rq->removed_util_avg, 0);
#endif
@@ -10931,8 +11545,9 @@ void free_fair_sched_group(struct task_group *tg)
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
- struct cfs_rq *cfs_rq;
struct sched_entity *se;
+ struct cfs_rq *cfs_rq;
+ struct rq *rq;
int i;
tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
@@ -10947,6 +11562,8 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
init_cfs_bandwidth(tg_cfs_bandwidth(tg));
for_each_possible_cpu(i) {
+ rq = cpu_rq(i);
+
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
GFP_KERNEL, cpu_to_node(i));
if (!cfs_rq)
@@ -10960,6 +11577,10 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
init_cfs_rq(cfs_rq);
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
init_entity_runnable_average(se);
+
+ raw_spin_lock_irq(&rq->lock);
+ post_init_entity_util_avg(se);
+ raw_spin_unlock_irq(&rq->lock);
}
return 1;
@@ -11056,8 +11677,10 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
/* Possible calls to update_curr() need rq clock */
update_rq_clock(rq);
- for_each_sched_entity(se)
- update_cfs_shares(group_cfs_rq(se));
+ for_each_sched_entity(se) {
+ update_load_avg(se, UPDATE_TG);
+ update_cfs_shares(se);
+ }
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 29345ed74069..c03d51a017bf 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -5,6 +5,7 @@
#include "sched.h"
+#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/irq_work.h>
#include <trace/events/sched.h>
@@ -1005,6 +1006,9 @@ static void update_curr_rt(struct rq *rq)
if (unlikely((s64)delta_exec <= 0))
return;
+ /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
+ cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
+
schedstat_set(curr->se.statistics.exec_max,
max(curr->se.statistics.exec_max, delta_exec));
@@ -1456,11 +1460,30 @@ select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
}
#endif
+/*
+ * Return whether the task on the given cpu is currently non-preemptible
+ * while handling a potentially long softint, or if the task is likely
+ * to block preemptions soon because it is a ksoftirq thread that is
+ * handling slow softints.
+ */
+bool
+task_may_not_preempt(struct task_struct *task, int cpu)
+{
+ __u32 softirqs = per_cpu(active_softirqs, cpu) |
+ __IRQ_STAT(cpu, __softirq_pending);
+ struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
+
+ return ((softirqs & LONG_SOFTIRQ_MASK) &&
+ (task == cpu_ksoftirqd ||
+ task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
+}
+
static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
{
struct task_struct *curr;
struct rq *rq;
+ bool may_not_preempt;
#ifdef CONFIG_SCHED_HMP
return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
@@ -1476,7 +1499,17 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
curr = READ_ONCE(rq->curr); /* unlocked access */
/*
- * If the current task on @p's runqueue is an RT task, then
+ * If the current task on @p's runqueue is a softirq task,
+ * it may run without preemption for a time that is
+ * ill-suited for a waiting RT task. Therefore, try to
+ * wake this RT task on another runqueue.
+ *
+ * Also, if the current task on @p's runqueue is an RT task, then
+ * it may run without preemption for a time that is
+ * ill-suited for a waiting RT task. Therefore, try to
+ * wake this RT task on another runqueue.
+ *
+ * Also, if the current task on @p's runqueue is an RT task, then
* try to see if we can wake this RT task up on another
* runqueue. Otherwise simply start this RT task
* on its current runqueue.
@@ -1497,17 +1530,22 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
* This test is optimistic, if we get it wrong the load-balancer
* will have to sort it out.
*/
- if (curr && unlikely(rt_task(curr)) &&
+ may_not_preempt = task_may_not_preempt(curr, cpu);
+ if (may_not_preempt ||
+ (unlikely(rt_task(curr)) &&
(curr->nr_cpus_allowed < 2 ||
- curr->prio <= p->prio)) {
+ curr->prio <= p->prio))) {
int target = find_lowest_rq(p);
/*
- * Don't bother moving it if the destination CPU is
- * not running a lower priority task.
+ * If cpu is non-preemptible, prefer remote cpu
+ * even if it's running a higher-prio task.
+ * Otherwise: Don't bother moving it if the
+ * destination CPU is not running a lower priority task.
*/
if (target != -1 &&
- p->prio < cpu_rq(target)->rt.highest_prio.curr)
+ (may_not_preempt ||
+ p->prio < cpu_rq(target)->rt.highest_prio.curr))
cpu = target;
}
rcu_read_unlock();
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b88f647ea935..67b7da81f8a2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -465,6 +465,7 @@ struct cfs_rq {
unsigned long runnable_load_avg;
#ifdef CONFIG_FAIR_GROUP_SCHED
unsigned long tg_load_avg_contrib;
+ unsigned long propagate_avg;
#endif
atomic_long_t removed_load_avg, removed_util_avg;
#ifndef CONFIG_64BIT
@@ -651,6 +652,9 @@ struct root_domain {
/* Maximum cpu capacity in the system. */
struct max_cpu_capacity max_cpu_capacity;
+
+ /* First cpu with maximum and minimum original capacity */
+ int max_cap_orig_cpu, min_cap_orig_cpu;
};
extern struct root_domain def_root_domain;
@@ -708,6 +712,7 @@ struct rq {
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
struct list_head leaf_cfs_rq_list;
+ struct list_head *tmp_alone_branch;
#endif /* CONFIG_FAIR_GROUP_SCHED */
/*
@@ -827,6 +832,9 @@ struct rq {
/* try_to_wake_up() stats */
unsigned int ttwu_count;
unsigned int ttwu_local;
+#ifdef CONFIG_SMP
+ struct eas_stats eas_stats;
+#endif
#endif
#ifdef CONFIG_SMP
@@ -997,6 +1005,7 @@ struct sched_group_capacity {
*/
unsigned long capacity;
unsigned long max_capacity; /* Max per-cpu capacity in group */
+ unsigned long min_capacity; /* Min per-CPU capacity in group */
unsigned long next_update;
int imbalance; /* XXX unrelated to capacity but shared group state */
/*
@@ -2158,6 +2167,7 @@ extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
unsigned long to_ratio(u64 period, u64 runtime);
extern void init_entity_runnable_average(struct sched_entity *se);
+extern void post_init_entity_util_avg(struct sched_entity *se);
static inline void __add_nr_running(struct rq *rq, unsigned count)
{
@@ -2671,6 +2681,11 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__release(rq2->lock);
}
+/*
+ * task_may_not_preempt - check whether a task may not be preemptible soon
+ */
+extern bool task_may_not_preempt(struct task_struct *task, int cpu);
+
#else /* CONFIG_SMP */
/*
@@ -2792,3 +2807,55 @@ static inline u64 irq_time_read(int cpu)
}
#endif /* CONFIG_64BIT */
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+
+#ifdef CONFIG_CPU_FREQ
+DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
+
+/**
+ * cpufreq_update_util - Take a note about CPU utilization changes.
+ * @rq: Runqueue to carry out the update for.
+ * @flags: Update reason flags.
+ *
+ * This function is called by the scheduler on the CPU whose utilization is
+ * being updated.
+ *
+ * It can only be called from RCU-sched read-side critical sections.
+ *
+ * The way cpufreq is currently arranged requires it to evaluate the CPU
+ * performance state (frequency/voltage) on a regular basis to prevent it from
+ * being stuck in a completely inadequate performance level for too long.
+ * That is not guaranteed to happen if the updates are only triggered from CFS,
+ * though, because they may not be coming in if RT or deadline tasks are active
+ * all the time (or there are RT and DL tasks only).
+ *
+ * As a workaround for that issue, this function is called by the RT and DL
+ * sched classes to trigger extra cpufreq updates to prevent it from stalling,
+ * but that really is a band-aid. Going forward it should be replaced with
+ * solutions targeted more specifically at RT and DL tasks.
+ */
+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
+{
+ struct update_util_data *data;
+
+ data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
+ if (data)
+ data->func(data, rq_clock(rq), flags);
+}
+
+static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
+{
+ if (cpu_of(rq) == smp_processor_id())
+ cpufreq_update_util(rq, flags);
+}
+#else
+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
+static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
+#endif /* CONFIG_CPU_FREQ */
+
+#ifdef arch_scale_freq_capacity
+#ifndef arch_scale_freq_invariant
+#define arch_scale_freq_invariant() (true)
+#endif
+#else /* arch_scale_freq_capacity */
+#define arch_scale_freq_invariant() (false)
+#endif
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
index 87e2c9f0c33e..6d74a7c77c8c 100644
--- a/kernel/sched/stats.c
+++ b/kernel/sched/stats.c
@@ -12,6 +12,28 @@
*/
#define SCHEDSTAT_VERSION 15
+#ifdef CONFIG_SMP
+static inline void show_easstat(struct seq_file *seq, struct eas_stats *stats)
+{
+ /* eas-specific runqueue stats */
+ seq_printf(seq, "eas %llu %llu %llu %llu %llu %llu ",
+ stats->sis_attempts, stats->sis_idle, stats->sis_cache_affine,
+ stats->sis_suff_cap, stats->sis_idle_cpu, stats->sis_count);
+
+ seq_printf(seq, "%llu %llu %llu %llu %llu %llu %llu ",
+ stats->secb_attempts, stats->secb_sync, stats->secb_idle_bt,
+ stats->secb_insuff_cap, stats->secb_no_nrg_sav,
+ stats->secb_nrg_sav, stats->secb_count);
+
+ seq_printf(seq, "%llu %llu %llu %llu %llu ",
+ stats->fbt_attempts, stats->fbt_no_cpu, stats->fbt_no_sd,
+ stats->fbt_pref_idle, stats->fbt_count);
+
+ seq_printf(seq, "%llu %llu\n",
+ stats->cas_attempts, stats->cas_count);
+}
+#endif
+
static int show_schedstat(struct seq_file *seq, void *v)
{
int cpu;
@@ -40,6 +62,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
seq_printf(seq, "\n");
#ifdef CONFIG_SMP
+ show_easstat(seq, &rq->eas_stats);
+
/* domain-specific stats */
rcu_read_lock();
for_each_domain(cpu, sd) {
@@ -66,6 +90,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
sd->ttwu_wake_remote, sd->ttwu_move_affine,
sd->ttwu_move_balance);
+
+ show_easstat(seq, &sd->eas_stats);
}
rcu_read_unlock();
#endif
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index b0c5fe6d1f3b..a71e94cecdb6 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -12,11 +12,12 @@
#include "tune.h"
#ifdef CONFIG_CGROUP_SCHEDTUNE
-static bool schedtune_initialized = false;
+bool schedtune_initialized = false;
#endif
unsigned int sysctl_sched_cfs_boost __read_mostly;
+extern struct reciprocal_value schedtune_spc_rdiv;
extern struct target_nrg schedtune_target_nrg;
/* Performance Boost region (B) threshold params */
@@ -675,6 +676,9 @@ int schedtune_task_boost(struct task_struct *p)
struct schedtune *st;
int task_boost;
+ if (!unlikely(schedtune_initialized))
+ return 0;
+
/* Get task boost value */
rcu_read_lock();
st = task_schedtune(p);
@@ -689,6 +693,9 @@ int schedtune_prefer_idle(struct task_struct *p)
struct schedtune *st;
int prefer_idle;
+ if (!unlikely(schedtune_initialized))
+ return 0;
+
/* Get prefer_idle value */
rcu_read_lock();
st = task_schedtune(p);
@@ -822,6 +829,7 @@ schedtune_boostgroup_init(struct schedtune *st)
bg = &per_cpu(cpu_boost_groups, cpu);
bg->group[st->idx].boost = 0;
bg->group[st->idx].tasks = 0;
+ raw_spin_lock_init(&bg->lock);
}
return 0;
@@ -1121,9 +1129,12 @@ schedtune_init(void)
pr_info("schedtune: configured to support global boosting only\n");
#endif
+ schedtune_spc_rdiv = reciprocal_value(100);
+
return 0;
nodata:
+ pr_warning("schedtune: disabled!\n");
rcu_read_unlock();
return -EINVAL;
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 479e4436f787..39ffd41594ce 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -57,6 +57,13 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+/*
+ * active_softirqs -- per cpu, a mask of softirqs that are being handled,
+ * with the expectation that approximate answers are acceptable and therefore
+ * no synchronization.
+ */
+DEFINE_PER_CPU(__u32, active_softirqs);
+
const char * const softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
"TASKLET", "SCHED", "HRTIMER", "RCU"
@@ -253,6 +260,7 @@ asmlinkage __visible void __do_softirq(void)
restart:
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
+ __this_cpu_write(active_softirqs, pending);
local_irq_enable();
@@ -282,6 +290,7 @@ restart:
pending >>= softirq_bit;
}
+ __this_cpu_write(active_softirqs, 0);
rcu_bh_qs();
local_irq_disable();
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 816999804a16..f27d2ba78d14 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -522,13 +522,6 @@ static struct ctl_table kern_table[] = {
.extra2 = &max_sched_granularity_ns,
},
{
- .procname = "sched_is_big_little",
- .data = &sysctl_sched_is_big_little,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
.procname = "sched_sync_hint_enable",
.data = &sysctl_sched_sync_hint_enable,
.maxlen = sizeof(unsigned int),
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 0ecef3e4690e..5e6db6b1e3bd 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -58,7 +58,9 @@ static int __init test_user_copy_init(void)
usermem = (char __user *)user_addr;
bad_usermem = (char *)user_addr;
- /* Legitimate usage: none of these should fail. */
+ /*
+ * Legitimate usage: none of these copies should fail.
+ */
ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
"legitimate copy_from_user failed");
ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
@@ -68,19 +70,33 @@ static int __init test_user_copy_init(void)
ret |= test(put_user(value, (unsigned long __user *)usermem),
"legitimate put_user failed");
- /* Invalid usage: none of these should succeed. */
+ /*
+ * Invalid usage: none of these copies should succeed.
+ */
+
+ /* Reject kernel-to-kernel copies through copy_from_user(). */
ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
PAGE_SIZE),
"illegal all-kernel copy_from_user passed");
+
+#if 0
+ /*
+ * When running with SMAP/PAN/etc, this will Oops the kernel
+ * due to the zeroing of userspace memory on failure. This needs
+ * to be tested in LKDTM instead, since this test module does not
+ * expect to explode.
+ */
ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
PAGE_SIZE),
"illegal reversed copy_from_user passed");
+#endif
ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
PAGE_SIZE),
"illegal all-kernel copy_to_user passed");
ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
PAGE_SIZE),
"illegal reversed copy_to_user passed");
+
ret |= test(!get_user(value, (unsigned long __user *)kmem),
"illegal get_user passed");
ret |= test(!put_user(value, (unsigned long __user *)kmem),
diff --git a/mm/memblock.c b/mm/memblock.c
index 89e74fa82290..351a4840a407 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1688,6 +1688,30 @@ static void __init_memblock memblock_dump(struct memblock_type *type, char *name
}
}
+extern unsigned long __init_memblock
+memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
+{
+ struct memblock_type *type = &memblock.reserved;
+ unsigned long size = 0;
+ int idx;
+
+ for (idx = 0; idx < type->cnt; idx++) {
+ struct memblock_region *rgn = &type->regions[idx];
+ phys_addr_t start, end;
+
+ if (rgn->base + rgn->size < start_addr)
+ continue;
+ if (rgn->base > end_addr)
+ continue;
+
+ start = rgn->base;
+ end = start + rgn->size;
+ size += end - start;
+ }
+
+ return size;
+}
+
void __init_memblock __memblock_dump_all(void)
{
pr_info("MEMBLOCK configuration:\n");
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index b3becd9941e9..44443a5b5a59 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1619,12 +1619,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
if (ret) {
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags);
- /*
- * We know that soft_offline_huge_page() tries to migrate
- * only one hugepage pointed to by hpage, so we need not
- * run through the pagelist here.
- */
- putback_active_hugepage(hpage);
+ if (!list_empty(&pagelist))
+ putback_movable_pages(&pagelist);
if (ret > 0)
ret = -EIO;
} else {
diff --git a/mm/mlock.c b/mm/mlock.c
index d843bc9d32dd..206e86b98a03 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -277,7 +277,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
{
int i;
int nr = pagevec_count(pvec);
- int delta_munlocked;
+ int delta_munlocked = -nr;
struct pagevec pvec_putback;
int pgrescued = 0;
@@ -297,6 +297,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
continue;
else
__munlock_isolation_failed(page);
+ } else {
+ delta_munlocked++;
}
/*
@@ -308,7 +310,6 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
pagevec_add(&pvec_putback, pvec->pages[i]);
pvec->pages[i] = NULL;
}
- delta_munlocked = -nr + pagevec_count(&pvec_putback);
__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
spin_unlock_irq(&zone->lru_lock);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 170c1486e5c9..4fbb23c1cba7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -274,6 +274,26 @@ int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
static inline void reset_deferred_meminit(pg_data_t *pgdat)
{
+ unsigned long max_initialise;
+ unsigned long reserved_lowmem;
+
+ /*
+ * Initialise at least 2G of a node but also take into account that
+ * two large system hashes that can take up 1GB for 0.25TB/node.
+ */
+ max_initialise = max(2UL << (30 - PAGE_SHIFT),
+ (pgdat->node_spanned_pages >> 8));
+
+ /*
+ * Compensate the all the memblock reservations (e.g. crash kernel)
+ * from the initial estimation to make sure we will initialize enough
+ * memory to boot.
+ */
+ reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
+ pgdat->node_start_pfn + max_initialise);
+ max_initialise += reserved_lowmem;
+
+ pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
pgdat->first_deferred_pfn = ULONG_MAX;
}
@@ -307,10 +327,9 @@ static inline bool update_defer_init(pg_data_t *pgdat,
/* Always populate low zones for address-contrained allocations */
if (zone_end < pgdat_end_pfn(pgdat))
return true;
-
/* Initialise at least 2G of the highest zone */
(*nr_initialised)++;
- if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) &&
+ if ((*nr_initialised > pgdat->static_init_size) &&
(pfn & (PAGES_PER_SECTION - 1)) == 0) {
pgdat->first_deferred_pfn = pfn;
return false;
@@ -5456,7 +5475,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
/* pg_data_t should be reset to zero when it's allocated */
WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
- reset_deferred_meminit(pgdat);
pgdat->node_id = nid;
pgdat->node_start_pfn = node_start_pfn;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
@@ -5475,6 +5493,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
(unsigned long)pgdat->node_mem_map);
#endif
+ reset_deferred_meminit(pgdat);
free_area_init_core(pgdat);
}
diff --git a/mm/slub.c b/mm/slub.c
index 64bc4e973789..a5f6c6d107e9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5357,6 +5357,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
char mbuf[64];
char *buf;
struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
+ ssize_t len;
if (!attr || !attr->store || !attr->show)
continue;
@@ -5381,8 +5382,9 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
buf = buffer;
}
- attr->show(root_cache, buf);
- attr->store(s, buf, strlen(buf));
+ len = attr->show(root_cache, buf);
+ if (len > 0)
+ attr->store(s, buf, len);
}
if (buffer)
diff --git a/mm/truncate.c b/mm/truncate.c
index 30c2f059a488..8ca20a98c327 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -799,7 +799,7 @@ EXPORT_SYMBOL(truncate_setsize);
*/
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
{
- int bsize = 1 << inode->i_blkbits;
+ int bsize = i_blocksize(inode);
loff_t rounded_from;
struct page *page;
pgoff_t index;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 413d18e37083..ff8bb41d713f 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -768,6 +768,13 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
return -EPROTONOSUPPORT;
}
}
+
+ if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
+ __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
+
+ if (defpvid >= VLAN_VID_MASK)
+ return -EINVAL;
+ }
#endif
return 0;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 8a7ada8bb947..bcb4559e735d 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -166,6 +166,8 @@ static void br_stp_start(struct net_bridge *br)
br_debug(br, "using kernel STP\n");
/* To start timers on any ports left in blocking */
+ if (br->dev->flags & IFF_UP)
+ mod_timer(&br->hello_timer, jiffies + br->hello_time);
br_port_state_selection(br);
}
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 5f0f5af0ec35..7dbe6a5c31eb 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,7 @@ static void br_hello_timer_expired(unsigned long arg)
if (br->dev->flags & IFF_UP) {
br_config_bpdu_generation(br);
- if (br->stp_enabled != BR_USER_STP)
+ if (br->stp_enabled == BR_KERNEL_STP)
mod_timer(&br->hello_timer,
round_jiffies(jiffies + br->hello_time));
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 57922df9c250..50e77fe096f4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -184,7 +184,7 @@ EXPORT_SYMBOL(dev_base_lock);
/* protects napi_hash addition/deletion and napi_gen_id */
static DEFINE_SPINLOCK(napi_hash_lock);
-static unsigned int napi_gen_id;
+static unsigned int napi_gen_id = NR_CPUS;
static DEFINE_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq;
@@ -3055,7 +3055,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
int queue_index = 0;
#ifdef CONFIG_XPS
- if (skb->sender_cpu == 0)
+ u32 sender_cpu = skb->sender_cpu - 1;
+
+ if (sender_cpu >= (u32)NR_CPUS)
skb->sender_cpu = raw_smp_processor_id() + 1;
#endif
@@ -4745,25 +4747,22 @@ EXPORT_SYMBOL_GPL(napi_by_id);
void napi_hash_add(struct napi_struct *napi)
{
- if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
+ if (test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
+ return;
- spin_lock(&napi_hash_lock);
+ spin_lock(&napi_hash_lock);
- /* 0 is not a valid id, we also skip an id that is taken
- * we expect both events to be extremely rare
- */
- napi->napi_id = 0;
- while (!napi->napi_id) {
- napi->napi_id = ++napi_gen_id;
- if (napi_by_id(napi->napi_id))
- napi->napi_id = 0;
- }
+ /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
+ do {
+ if (unlikely(++napi_gen_id < NR_CPUS + 1))
+ napi_gen_id = NR_CPUS + 1;
+ } while (napi_by_id(napi_gen_id));
+ napi->napi_id = napi_gen_id;
- hlist_add_head_rcu(&napi->napi_hash_node,
- &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
+ hlist_add_head_rcu(&napi->napi_hash_node,
+ &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
- spin_unlock(&napi_hash_lock);
- }
+ spin_unlock(&napi_hash_lock);
}
EXPORT_SYMBOL_GPL(napi_hash_add);
diff --git a/net/core/dst.c b/net/core/dst.c
index a1656e3b8d72..d7ad628bf64e 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -151,13 +151,13 @@ int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(dst_discard_out);
-const u32 dst_default_metrics[RTAX_MAX + 1] = {
+const struct dst_metrics dst_default_metrics = {
/* This initializer is needed to force linker to place this variable
* into const section. Otherwise it might end into bss section.
* We really want to avoid false sharing on this variable, and catch
* any writes on it.
*/
- [RTAX_MAX] = 0xdeadbeef,
+ .refcnt = ATOMIC_INIT(1),
};
void dst_init(struct dst_entry *dst, struct dst_ops *ops,
@@ -169,7 +169,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
if (dev)
dev_hold(dev);
dst->ops = ops;
- dst_init_metrics(dst, dst_default_metrics, true);
+ dst_init_metrics(dst, dst_default_metrics.metrics, true);
dst->expires = 0UL;
dst->path = dst;
dst->from = NULL;
@@ -315,25 +315,30 @@ EXPORT_SYMBOL(dst_release);
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
{
- u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
+ struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
if (p) {
- u32 *old_p = __DST_METRICS_PTR(old);
+ struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
unsigned long prev, new;
- memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+ atomic_set(&p->refcnt, 1);
+ memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
new = (unsigned long) p;
prev = cmpxchg(&dst->_metrics, old, new);
if (prev != old) {
kfree(p);
- p = __DST_METRICS_PTR(prev);
+ p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
if (prev & DST_METRICS_READ_ONLY)
p = NULL;
+ } else if (prev & DST_METRICS_REFCOUNTED) {
+ if (atomic_dec_and_test(&old_p->refcnt))
+ kfree(old_p);
}
}
- return p;
+ BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
+ return (u32 *)p;
}
EXPORT_SYMBOL(dst_cow_metrics_generic);
@@ -342,7 +347,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
{
unsigned long prev, new;
- new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
+ new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
prev = cmpxchg(&dst->_metrics, old, new);
if (prev == old)
kfree(__DST_METRICS_PTR(old));
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index fe38ef58997c..d43544ce7550 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1458,13 +1458,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
cb->nlh->nlmsg_seq, 0,
NLM_F_MULTI,
ext_filter_mask);
- /* If we ran out of room on the first message,
- * we're in trouble
- */
- WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
- if (err < 0)
- goto out;
+ if (err < 0) {
+ if (likely(skb->len))
+ goto out;
+
+ goto out_err;
+ }
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
@@ -1472,10 +1472,12 @@ cont:
}
}
out:
+ err = skb->len;
+out_err:
cb->args[1] = idx;
cb->args[0] = h;
- return skb->len;
+ return err;
}
int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len)
@@ -3127,8 +3129,12 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
err = br_dev->netdev_ops->ndo_bridge_getlink(
skb, portid, seq, dev,
filter_mask, NLM_F_MULTI);
- if (err < 0 && err != -EOPNOTSUPP)
- break;
+ if (err < 0 && err != -EOPNOTSUPP) {
+ if (likely(skb->len))
+ break;
+
+ goto out_err;
+ }
}
idx++;
}
@@ -3139,16 +3145,22 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
seq, dev,
filter_mask,
NLM_F_MULTI);
- if (err < 0 && err != -EOPNOTSUPP)
- break;
+ if (err < 0 && err != -EOPNOTSUPP) {
+ if (likely(skb->len))
+ break;
+
+ goto out_err;
+ }
}
idx++;
}
}
+ err = skb->len;
+out_err:
rcu_read_unlock();
cb->args[0] = idx;
- return skb->len;
+ return err;
}
static inline size_t bridge_nlmsg_size(void)
diff --git a/net/core/sock.c b/net/core/sock.c
index 4efaa3b6633d..39e9ab7c598e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1702,17 +1702,17 @@ EXPORT_SYMBOL(skb_set_owner_w);
void skb_orphan_partial(struct sk_buff *skb)
{
- /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
- * so we do not completely orphan skb, but transfert all
- * accounted bytes but one, to avoid unexpected reorders.
- */
if (skb->destructor == sock_wfree
#ifdef CONFIG_INET
|| skb->destructor == tcp_wfree
#endif
) {
- atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
- skb->truesize = 1;
+ struct sock *sk = skb->sk;
+
+ if (atomic_inc_not_zero(&sk->sk_refcnt)) {
+ atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
+ skb->destructor = sock_efree;
+ }
} else {
skb_orphan(skb);
}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 8113ad58fcb4..3470ad1843bb 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -422,6 +422,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
newnp->pktoptions = NULL;
newnp->opt = NULL;
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
@@ -486,6 +489,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
/* Clone RX bits */
newnp->rxopt.all = np->rxopt.all;
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index c9ef99fbd0dd..aad274c67b5e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1035,7 +1035,7 @@ static struct inet_protosw inetsw_array[] =
.type = SOCK_DGRAM,
.protocol = IPPROTO_ICMP,
.prot = &ping_prot,
- .ops = &inet_dgram_ops,
+ .ops = &inet_sockraw_ops,
.flags = INET_PROTOSW_REUSE,
},
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index a353d1d92f01..7c4c881a7187 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -758,7 +758,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
unsigned int e = 0, s_e;
struct fib_table *tb;
struct hlist_head *head;
- int dumped = 0;
+ int dumped = 0, err;
if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
@@ -778,20 +778,27 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
if (dumped)
memset(&cb->args[2], 0, sizeof(cb->args) -
2 * sizeof(cb->args[0]));
- if (fib_table_dump(tb, skb, cb) < 0)
- goto out;
+ err = fib_table_dump(tb, skb, cb);
+ if (err < 0) {
+ if (likely(skb->len))
+ goto out;
+
+ goto out_err;
+ }
dumped = 1;
next:
e++;
}
}
out:
+ err = skb->len;
+out_err:
rcu_read_unlock();
cb->args[1] = e;
cb->args[0] = h;
- return skb->len;
+ return err;
}
/* Prepare and feed intra-kernel routing request.
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 67d44aa9e09f..b2504712259f 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -204,6 +204,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
static void free_fib_info_rcu(struct rcu_head *head)
{
struct fib_info *fi = container_of(head, struct fib_info, rcu);
+ struct dst_metrics *m;
change_nexthops(fi) {
if (nexthop_nh->nh_dev)
@@ -214,8 +215,9 @@ static void free_fib_info_rcu(struct rcu_head *head)
rt_fibinfo_free(&nexthop_nh->nh_rth_input);
} endfor_nexthops(fi);
- if (fi->fib_metrics != (u32 *) dst_default_metrics)
- kfree(fi->fib_metrics);
+ m = fi->fib_metrics;
+ if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt))
+ kfree(m);
kfree(fi);
}
@@ -982,11 +984,11 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
val = 255;
if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
return -EINVAL;
- fi->fib_metrics[type - 1] = val;
+ fi->fib_metrics->metrics[type - 1] = val;
}
if (ecn_ca)
- fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
+ fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
return 0;
}
@@ -1044,11 +1046,12 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
goto failure;
fib_info_cnt++;
if (cfg->fc_mx) {
- fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+ fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
if (!fi->fib_metrics)
goto failure;
+ atomic_set(&fi->fib_metrics->refcnt, 1);
} else
- fi->fib_metrics = (u32 *) dst_default_metrics;
+ fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
fi->fib_net = net;
fi->fib_protocol = cfg->fc_protocol;
@@ -1251,7 +1254,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
if (fi->fib_priority &&
nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
goto nla_put_failure;
- if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
+ if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
goto nla_put_failure;
if (fi->fib_prefsrc &&
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 2059c2010f9c..fa59bc35dbb5 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1906,6 +1906,8 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
/* rcu_read_lock is hold by caller */
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
+ int err;
+
if (i < s_i) {
i++;
continue;
@@ -1916,17 +1918,14 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
continue;
}
- if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWROUTE,
- tb->tb_id,
- fa->fa_type,
- xkey,
- KEYLENGTH - fa->fa_slen,
- fa->fa_tos,
- fa->fa_info, NLM_F_MULTI) < 0) {
+ err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, RTM_NEWROUTE,
+ tb->tb_id, fa->fa_type,
+ xkey, KEYLENGTH - fa->fa_slen,
+ fa->fa_tos, fa->fa_info, NLM_F_MULTI);
+ if (err < 0) {
cb->args[4] = i;
- return -1;
+ return err;
}
i++;
}
@@ -1948,10 +1947,13 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
t_key key = cb->args[3];
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
- if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
+ int err;
+
+ err = fn_trie_dump_leaf(l, tb, skb, cb);
+ if (err < 0) {
cb->args[3] = key;
cb->args[2] = count;
- return -1;
+ return err;
}
++count;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 012aa120b090..81fcff83d309 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -676,6 +676,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
newsk->sk_write_space = sk_stream_write_space;
+ inet_sk(newsk)->mc_list = NULL;
+
newsk->sk_mark = inet_rsk(req)->ir_mark;
atomic64_set(&newsk->sk_cookie,
atomic64_read(&inet_rsk(req)->ir_cookie));
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 7e31491e9396..fd15e55b28d1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1362,8 +1362,12 @@ static void rt_add_uncached_list(struct rtable *rt)
static void ipv4_dst_destroy(struct dst_entry *dst)
{
+ struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
struct rtable *rt = (struct rtable *) dst;
+ if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
+ kfree(p);
+
if (!list_empty(&rt->rt_uncached)) {
struct uncached_list *ul = rt->rt_uncached_list;
@@ -1415,7 +1419,11 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
rt->rt_gateway = nh->nh_gw;
rt->rt_uses_gateway = 1;
}
- dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+ dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
+ if (fi->fib_metrics != &dst_default_metrics) {
+ rt->dst._metrics |= DST_METRICS_REFCOUNTED;
+ atomic_inc(&fi->fib_metrics->refcnt);
+ }
#ifdef CONFIG_IP_ROUTE_CLASSID
rt->dst.tclassid = nh->nh_tclassid;
#endif
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 62815497f29c..57e5938fd669 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1077,9 +1077,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
int *copied, size_t size)
{
struct tcp_sock *tp = tcp_sk(sk);
+ struct sockaddr *uaddr = msg->msg_name;
int err, flags;
- if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
+ if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
+ (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
+ uaddr->sa_family == AF_UNSPEC))
return -EOPNOTSUPP;
if (tp->fastopen_req)
return -EALREADY; /* Another Fast Open is in progress */
@@ -1092,7 +1095,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
tp->fastopen_req->size = size;
flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
- err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
+ err = __inet_stream_connect(sk->sk_socket, uaddr,
msg->msg_namelen, flags);
*copied = tp->fastopen_req->copied;
tcp_free_fastopen_req(tp);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 882caa4e72bc..aafe68134763 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -183,6 +183,7 @@ void tcp_init_congestion_control(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
+ tcp_sk(sk)->prior_ssthresh = 0;
if (icsk->icsk_ca_ops->init)
icsk->icsk_ca_ops->init(sk);
if (tcp_ca_needs_ecn(sk))
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3f87c731477f..710101376a76 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1135,13 +1135,14 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
*/
if (pkt_len > mss) {
unsigned int new_len = (pkt_len / mss) * mss;
- if (!in_sack && new_len < pkt_len) {
+ if (!in_sack && new_len < pkt_len)
new_len += mss;
- if (new_len >= skb->len)
- return 0;
- }
pkt_len = new_len;
}
+
+ if (pkt_len >= skb->len && !in_sack)
+ return 0;
+
err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
if (err < 0)
return err;
@@ -3220,7 +3221,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
int delta;
/* Non-retransmitted hole got filled? That's reordering */
- if (reord < prior_fackets)
+ if (reord < prior_fackets && reord <= tp->fackets_out)
tcp_update_reordering(sk, tp->fackets_out - reord, 0);
delta = tcp_is_fack(tp) ? pkts_acked :
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 3feeca6a713d..c612daad9e92 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -62,7 +62,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
const struct net_offload *ops;
int proto;
struct frag_hdr *fptr;
- unsigned int unfrag_ip6hlen;
u8 *prevhdr;
int offset = 0;
bool encap, udpfrag;
@@ -121,8 +120,12 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
skb->network_header = (u8 *)ipv6h - skb->head;
if (udpfrag) {
- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
- fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
+ int err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0) {
+ kfree_skb_list(segs);
+ return ERR_PTR(err);
+ }
+ fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
fptr->frag_off = htons(offset);
if (skb->next)
fptr->frag_off |= htons(IP6_MF);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cf90a9bf26a3..01b79c2499c4 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -571,7 +571,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int ptr, offset = 0, err = 0;
u8 *prevhdr, nexthdr = 0;
- hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0)
+ goto fail;
+ hlen = err;
nexthdr = *prevhdr;
mtu = ip6_skb_dst_mtu(skb);
@@ -1428,6 +1431,11 @@ alloc_new_skb:
*/
alloclen += sizeof(struct frag_hdr);
+ copy = datalen - transhdrlen - fraggap;
+ if (copy < 0) {
+ err = -EINVAL;
+ goto error;
+ }
if (transhdrlen) {
skb = sock_alloc_send_skb(sk,
alloclen + hh_len,
@@ -1477,13 +1485,9 @@ alloc_new_skb:
data += fraggap;
pskb_trim_unique(skb_prev, maxfraglen);
}
- copy = datalen - transhdrlen - fraggap;
-
- if (copy < 0) {
- err = -EINVAL;
- kfree_skb(skb);
- goto error;
- } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
+ if (copy > 0 &&
+ getfrag(from, data + transhdrlen, offset,
+ copy, fraggap, skb) < 0) {
err = -EFAULT;
kfree_skb(skb);
goto error;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 1d184322a7b1..8b56c5240429 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -79,14 +79,13 @@ EXPORT_SYMBOL(ipv6_select_ident);
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
{
u16 offset = sizeof(struct ipv6hdr);
- struct ipv6_opt_hdr *exthdr =
- (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
unsigned int packet_len = skb_tail_pointer(skb) -
skb_network_header(skb);
int found_rhdr = 0;
*nexthdr = &ipv6_hdr(skb)->nexthdr;
- while (offset + 1 <= packet_len) {
+ while (offset <= packet_len) {
+ struct ipv6_opt_hdr *exthdr;
switch (**nexthdr) {
@@ -107,13 +106,16 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
return offset;
}
- offset += ipv6_optlen(exthdr);
- *nexthdr = &exthdr->nexthdr;
+ if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
+ return -EINVAL;
+
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
+ offset += ipv6_optlen(exthdr);
+ *nexthdr = &exthdr->nexthdr;
}
- return offset;
+ return -EINVAL;
}
EXPORT_SYMBOL(ip6_find_1stfragopt);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 1737fc0f2988..4c753f4a3712 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -50,7 +50,7 @@ static struct inet_protosw pingv6_protosw = {
.type = SOCK_DGRAM,
.protocol = IPPROTO_ICMPV6,
.prot = &pingv6_prot,
- .ops = &inet6_dgram_ops,
+ .ops = &inet6_sockraw_ops,
.flags = INET_PROTOSW_REUSE,
};
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index d13ed145e93a..0ef8e114c8ab 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1304,7 +1304,7 @@ void raw6_proc_exit(void)
#endif /* CONFIG_PROC_FS */
/* Same as inet6_dgram_ops, sans udp_poll. */
-static const struct proto_ops inet6_sockraw_ops = {
+const struct proto_ops inet6_sockraw_ops = {
.family = PF_INET6,
.owner = THIS_MODULE,
.release = inet6_release,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 8532768b4eaa..753f41260bf5 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1035,6 +1035,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
+ newnp->ipv6_mc_list = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
@@ -1104,6 +1105,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
First: no IPv4 options.
*/
newinet->inet_opt = NULL;
+ newnp->ipv6_mc_list = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 7441e1e63893..01582966ffa0 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -29,6 +29,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
u8 frag_hdr_sz = sizeof(struct frag_hdr);
__wsum csum;
int tnl_hlen;
+ int err;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
@@ -97,7 +98,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
/* Find the unfragmentable header and shift it left by frag_hdr_sz
* bytes to insert fragment header.
*/
- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0)
+ return ERR_PTR(err);
+ unfrag_ip6hlen = err;
nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT;
unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
index 0e015906f9ca..07d36573f50b 100644
--- a/net/ipv6/xfrm6_mode_ro.c
+++ b/net/ipv6/xfrm6_mode_ro.c
@@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
iph = ipv6_hdr(skb);
hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+ if (hdr_len < 0)
+ return hdr_len;
skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
skb_set_network_header(skb, -x->props.header_len);
skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 4e344105b3fd..1d3bbe6e1183 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -28,6 +28,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
iph = ipv6_hdr(skb);
hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+ if (hdr_len < 0)
+ return hdr_len;
skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
skb_set_network_header(skb, -x->props.header_len);
skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index b6493b3f11a9..2d7859c03fd2 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -472,15 +472,14 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
struct sctp_association **app,
struct sctp_transport **tpp)
{
+ struct sctp_init_chunk *chunkhdr, _chunkhdr;
union sctp_addr saddr;
union sctp_addr daddr;
struct sctp_af *af;
struct sock *sk = NULL;
struct sctp_association *asoc;
struct sctp_transport *transport = NULL;
- struct sctp_init_chunk *chunkhdr;
__u32 vtag = ntohl(sctphdr->vtag);
- int len = skb->len - ((void *)sctphdr - (void *)skb->data);
*app = NULL; *tpp = NULL;
@@ -515,13 +514,16 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
* discard the packet.
*/
if (vtag == 0) {
- chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
- if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
- + sizeof(__be32) ||
+ /* chunk header + first 4 octects of init header */
+ chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
+ sizeof(struct sctphdr),
+ sizeof(struct sctp_chunkhdr) +
+ sizeof(__be32), &_chunkhdr);
+ if (!chunkhdr ||
chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
- ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
+ ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
goto out;
- }
+
} else if (vtag != asoc->c.peer_vtag) {
goto out;
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index ce46f1c7f133..7527c168e471 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -239,12 +239,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
struct sctp_bind_addr *bp;
struct ipv6_pinfo *np = inet6_sk(sk);
struct sctp_sockaddr_entry *laddr;
- union sctp_addr *baddr = NULL;
union sctp_addr *daddr = &t->ipaddr;
union sctp_addr dst_saddr;
struct in6_addr *final_p, final;
__u8 matchlen = 0;
- __u8 bmatchlen;
sctp_scope_t scope;
memset(fl6, 0, sizeof(struct flowi6));
@@ -311,23 +309,37 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
*/
rcu_read_lock();
list_for_each_entry_rcu(laddr, &bp->address_list, list) {
- if (!laddr->valid)
+ struct dst_entry *bdst;
+ __u8 bmatchlen;
+
+ if (!laddr->valid ||
+ laddr->state != SCTP_ADDR_SRC ||
+ laddr->a.sa.sa_family != AF_INET6 ||
+ scope > sctp_scope(&laddr->a))
continue;
- if ((laddr->state == SCTP_ADDR_SRC) &&
- (laddr->a.sa.sa_family == AF_INET6) &&
- (scope <= sctp_scope(&laddr->a))) {
- bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
- if (!baddr || (matchlen < bmatchlen)) {
- baddr = &laddr->a;
- matchlen = bmatchlen;
- }
- }
- }
- if (baddr) {
- fl6->saddr = baddr->v6.sin6_addr;
- fl6->fl6_sport = baddr->v6.sin6_port;
+
+ fl6->saddr = laddr->a.v6.sin6_addr;
+ fl6->fl6_sport = laddr->a.v6.sin6_port;
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
- dst = ip6_dst_lookup_flow(sk, fl6, final_p);
+ bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
+
+ if (!IS_ERR(bdst) &&
+ ipv6_chk_addr(dev_net(bdst->dev),
+ &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
+ if (!IS_ERR_OR_NULL(dst))
+ dst_release(dst);
+ dst = bdst;
+ break;
+ }
+
+ bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
+ if (matchlen > bmatchlen)
+ continue;
+
+ if (!IS_ERR_OR_NULL(dst))
+ dst_release(dst);
+ dst = bdst;
+ matchlen = bmatchlen;
}
rcu_read_unlock();
@@ -662,6 +674,9 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
rcu_read_lock();
opt = rcu_dereference(np->opt);
diff --git a/security/keys/key.c b/security/keys/key.c
index 534808915371..09c10b181881 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -934,12 +934,11 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
/* the key must be writable */
ret = key_permission(key_ref, KEY_NEED_WRITE);
if (ret < 0)
- goto error;
+ return ret;
/* attempt to update it if supported */
- ret = -EOPNOTSUPP;
if (!key->type->update)
- goto error;
+ return -EOPNOTSUPP;
memset(&prep, 0, sizeof(prep));
prep.data = payload;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 442e350c209d..671709d8610d 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -97,7 +97,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
/* pull the payload in if one was supplied */
payload = NULL;
- if (_payload) {
+ if (plen) {
ret = -ENOMEM;
payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
if (!payload) {
@@ -327,7 +327,7 @@ long keyctl_update_key(key_serial_t id,
/* pull the payload in if one was supplied */
payload = NULL;
- if (_payload) {
+ if (plen) {
ret = -ENOMEM;
payload = kmalloc(plen, GFP_KERNEL);
if (!payload)
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 30b28e80c6e6..f0675acecc93 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1625,6 +1625,7 @@ static int snd_timer_user_tselect(struct file *file,
if (err < 0)
goto __err;
+ tu->qhead = tu->qtail = tu->qused = 0;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
@@ -1962,6 +1963,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
+ mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
@@ -1977,7 +1979,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
+ mutex_unlock(&tu->ioctl_lock);
schedule();
+ mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
@@ -1997,7 +2001,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
tu->qused--;
spin_unlock_irq(&tu->qlock);
- mutex_lock(&tu->ioctl_lock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
@@ -2007,7 +2010,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
- mutex_unlock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
if (err < 0)
@@ -2017,6 +2019,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
}
_error:
spin_unlock_irq(&tu->qlock);
+ mutex_unlock(&tu->ioctl_lock);
return result > 0 ? result : err;
}
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 37b70f8e878f..0abab7926dca 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1537,6 +1537,8 @@ static const struct snd_pci_quirk stac9200_fixup_tbl[] = {
"Dell Inspiron 1501", STAC_9200_DELL_M26),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6,
"unknown Dell", STAC_9200_DELL_M26),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0201,
+ "Dell Latitude D430", STAC_9200_DELL_M22),
/* Panasonic */
SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC),
/* Gateway machines needs EAPD to be set on resume */
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 61dd478b97e4..28aaf2172221 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -1999,13 +1999,11 @@ static struct cal_block_data *adm_find_cal_by_path(int cal_index, int path)
if (cal_index == ADM_AUDPROC_CAL) {
audproc_cal_info = cal_block->cal_info;
- if ((audproc_cal_info->path == path) &&
- (cal_block->cal_data.size > 0))
+ if (audproc_cal_info->path == path)
return cal_block;
} else if (cal_index == ADM_AUDVOL_CAL) {
audvol_cal_info = cal_block->cal_info;
- if ((audvol_cal_info->path == path) &&
- (cal_block->cal_data.size > 0))
+ if (audvol_cal_info->path == path)
return cal_block;
}
}
@@ -2032,14 +2030,12 @@ static struct cal_block_data *adm_find_cal_by_app_type(int cal_index, int path,
if (cal_index == ADM_AUDPROC_CAL) {
audproc_cal_info = cal_block->cal_info;
if ((audproc_cal_info->path == path) &&
- (audproc_cal_info->app_type == app_type) &&
- (cal_block->cal_data.size > 0))
+ (audproc_cal_info->app_type == app_type))
return cal_block;
} else if (cal_index == ADM_AUDVOL_CAL) {
audvol_cal_info = cal_block->cal_info;
if ((audvol_cal_info->path == path) &&
- (audvol_cal_info->app_type == app_type) &&
- (cal_block->cal_data.size > 0))
+ (audvol_cal_info->app_type == app_type))
return cal_block;
}
}
@@ -2070,15 +2066,13 @@ static struct cal_block_data *adm_find_cal(int cal_index, int path,
if ((audproc_cal_info->path == path) &&
(audproc_cal_info->app_type == app_type) &&
(audproc_cal_info->acdb_id == acdb_id) &&
- (audproc_cal_info->sample_rate == sample_rate) &&
- (cal_block->cal_data.size > 0))
+ (audproc_cal_info->sample_rate == sample_rate))
return cal_block;
} else if (cal_index == ADM_AUDVOL_CAL) {
audvol_cal_info = cal_block->cal_info;
if ((audvol_cal_info->path == path) &&
(audvol_cal_info->app_type == app_type) &&
- (audvol_cal_info->acdb_id == acdb_id) &&
- (cal_block->cal_data.size > 0))
+ (audvol_cal_info->acdb_id == acdb_id))
return cal_block;
}
}
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index cac2d4975a15..e63b4346e82e 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1792,6 +1792,9 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
for (i = 0; i < card->num_aux_devs; i++)
soc_remove_aux_dev(card, i);
+ /* free the ALSA card at first; this syncs with pending operations */
+ snd_card_free(card->snd_card);
+
/* remove and free each DAI */
soc_remove_dai_links(card);
@@ -1803,9 +1806,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
snd_soc_dapm_free(&card->dapm);
- snd_card_free(card->snd_card);
return 0;
-
}
/* removes a socdev */