summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/f2fs.txt18
-rw-r--r--Makefile2
-rw-r--r--android/configs/README15
-rw-r--r--android/configs/android-base-arm64.cfg5
-rw-r--r--android/configs/android-base.cfg164
-rw-r--r--android/configs/android-recommended.cfg140
-rw-r--r--arch/alpha/include/asm/types.h2
-rw-r--r--arch/alpha/include/uapi/asm/types.h12
-rw-r--r--arch/arc/kernel/entry.S6
-rw-r--r--arch/arc/mm/tlb.c3
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-mdss.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/msm8998.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/sdm630.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/sdm660.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts19
-rw-r--r--arch/arm/kvm/mmu.c23
-rw-r--r--arch/arm/mm/fault.c5
-rw-r--r--arch/arm64/configs/msm-auto-gvm-perf_defconfig2
-rw-r--r--arch/arm64/configs/msm-auto-gvm_defconfig2
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/mm/fault.c5
-rw-r--r--arch/mips/math-emu/dp_fmax.c84
-rw-r--r--arch/mips/math-emu/dp_fmin.c86
-rw-r--r--arch/mips/math-emu/sp_fmax.c84
-rw-r--r--arch/mips/math-emu/sp_fmin.c86
-rw-r--r--arch/powerpc/kernel/align.c119
-rw-r--r--arch/x86/include/asm/elf.h5
-rw-r--r--arch/x86/include/asm/io.h4
-rw-r--r--block/blk-core.c2
-rw-r--r--crypto/algif_skcipher.c13
-rw-r--r--drivers/android/Kconfig10
-rw-r--r--drivers/android/Makefile1
-rw-r--r--drivers/android/binder.c7
-rw-r--r--drivers/android/binder_alloc.c425
-rw-r--r--drivers/android/binder_alloc.h33
-rw-r--r--drivers/android/binder_alloc_selftest.c310
-rw-r--r--drivers/android/binder_trace.h55
-rw-r--r--drivers/ata/pata_amd.c1
-rw-r--r--drivers/ata/pata_cs5536.c1
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/block/skd_main.c21
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/char/diag/diag_memorydevice.c13
-rw-r--r--drivers/char/diag/diagfwd.c3
-rw-r--r--drivers/char/diag/diagfwd_peripheral.c124
-rw-r--r--drivers/char/diag/diagfwd_peripheral.h2
-rw-r--r--drivers/clk/msm/clock-dummy.c9
-rw-r--r--drivers/cpufreq/cpufreq-dt.c7
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c8
-rw-r--r--drivers/crypto/msm/qcedev.c2
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c71
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c5
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c8
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/i2c/busses/i2c-ismt.c6
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c4
-rw-r--r--drivers/input/mouse/trackpoint.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/touchscreen/st/fts.c28
-rw-r--r--drivers/irqchip/irq-mips-gic.c5
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/request.c6
-rw-r--r--drivers/md/bcache/super.c7
-rw-r--r--drivers/md/bcache/sysfs.c4
-rw-r--r--drivers/md/bcache/util.c50
-rw-r--r--drivers/md/bcache/writeback.c20
-rw-r--r--drivers/md/bcache/writeback.h21
-rw-r--r--drivers/md/bitmap.c5
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c2
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c39
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c4
-rw-r--r--drivers/misc/hdcp.c47
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c2
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/wireless/cnss/Kconfig2
-rw-r--r--drivers/net/wireless/cnss2/main.c39
-rw-r--r--drivers/net/wireless/cnss2/pci.c30
-rw-r--r--drivers/net/wireless/cnss2/pci.h1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/mwifiex/scan.c6
-rw-r--r--drivers/net/wireless/p54/fwio.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c1
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c2
-rw-r--r--drivers/power/supply/qcom/fg-core.h1
-rw-r--r--drivers/power/supply/qcom/fg-util.c29
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c86
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c33
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h17
-rw-r--r--drivers/s390/scsi/zfcp_fc.h6
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c7
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c16
-rw-r--r--drivers/scsi/isci/remote_node_context.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c8
-rw-r--r--drivers/scsi/sg.c233
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/ufs/ufs-qcom-debugfs.c12
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c41
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h4
-rw-r--r--drivers/soc/qcom/icnss.c29
-rw-r--r--drivers/soc/qcom/peripheral-loader.c27
-rw-r--r--drivers/soc/qcom/peripheral-loader.h6
-rw-r--r--drivers/soc/qcom/pil-msa.h1
-rw-r--r--drivers/soc/qcom/pil-q6v5-mss.c13
-rw-r--r--drivers/soc/qcom/subsys-pil-tz.c2
-rw-r--r--drivers/soundwire/swr-wcd-ctrl.c24
-rwxr-xr-xdrivers/soundwire/swr-wcd-ctrl.h1
-rw-r--r--drivers/staging/android/fiq_debugger/fiq_debugger.c2
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c2
-rw-r--r--drivers/tty/tty_buffer.c26
-rw-r--r--drivers/usb/core/devio.c4
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/gadget/function/f_gsi.c230
-rw-r--r--drivers/usb/gadget/function/f_qdss.c13
-rw-r--r--drivers/usb/host/pci-quirks.c35
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c75
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c42
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_host.c54
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/Makefile1
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/ceph/addr.c24
-rw-r--r--fs/ceph/cache.c12
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/cifs/smb2pdu.h4
-rw-r--r--fs/crypto/Kconfig18
-rw-r--r--fs/crypto/Makefile3
-rw-r--r--fs/crypto/crypto.c568
-rw-r--r--fs/crypto/fname.c414
-rw-r--r--fs/crypto/keyinfo.c333
-rw-r--r--fs/crypto/policy.c250
-rw-r--r--fs/dlm/user.c4
-rw-r--r--fs/eventpoll.c37
-rw-r--r--fs/ext4/crypto.c6
-rw-r--r--fs/ext4/ext4.h5
-rw-r--r--fs/ext4/extents.c12
-rw-r--r--fs/ext4/inode.c15
-rw-r--r--fs/ext4/super.c38
-rw-r--r--fs/f2fs/Kconfig21
-rw-r--r--fs/f2fs/Makefile2
-rw-r--r--fs/f2fs/acl.c23
-rw-r--r--fs/f2fs/acl.h3
-rw-r--r--fs/f2fs/checkpoint.c547
-rw-r--r--fs/f2fs/crypto.c491
-rw-r--r--fs/f2fs/crypto_fname.c440
-rw-r--r--fs/f2fs/data.c1330
-rw-r--r--fs/f2fs/debug.c88
-rw-r--r--fs/f2fs/dir.c491
-rw-r--r--fs/f2fs/extent_cache.c315
-rw-r--r--fs/f2fs/f2fs.h1112
-rw-r--r--fs/f2fs/file.c1331
-rw-r--r--fs/f2fs/gc.c382
-rw-r--r--fs/f2fs/gc.h8
-rw-r--r--fs/f2fs/hash.c7
-rw-r--r--fs/f2fs/inline.c280
-rw-r--r--fs/f2fs/inode.c199
-rw-r--r--fs/f2fs/namei.c411
-rw-r--r--fs/f2fs/node.c927
-rw-r--r--fs/f2fs/node.h134
-rw-r--r--fs/f2fs/recovery.c258
-rw-r--r--fs/f2fs/segment.c907
-rw-r--r--fs/f2fs/segment.h65
-rw-r--r--fs/f2fs/shrinker.c14
-rw-r--r--fs/f2fs/super.c991
-rw-r--r--fs/f2fs/trace.c6
-rw-r--r--fs/f2fs/xattr.c71
-rw-r--r--fs/f2fs/xattr.h3
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/pagelist.c26
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/nfsd/nfs4state.c10
-rw-r--r--fs/sdcardfs/derived_perm.c3
-rw-r--r--fs/sdcardfs/inode.c12
-rw-r--r--fs/sdcardfs/main.c7
-rw-r--r--fs/sdcardfs/sdcardfs.h1
-rw-r--r--fs/sdcardfs/super.c2
-rw-r--r--fs/xfs/xfs_linux.h9
-rw-r--r--include/asm-generic/topology.h6
-rw-r--r--include/linux/cpufreq.h10
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/f2fs_fs.h54
-rw-r--r--include/linux/fs.h7
-rw-r--r--include/linux/fscrypto.h411
-rw-r--r--include/linux/lightnvm.h1
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/linux/tty_flip.h3
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/net/inet_frag.h41
-rw-r--r--include/net/ip6_fib.h32
-rw-r--r--include/sound/apr_audio-v2.h67
-rw-r--r--include/sound/q6core.h6
-rw-r--r--include/trace/events/f2fs.h85
-rw-r--r--include/uapi/linux/fs.h18
-rw-r--r--kernel/cpuset.c1
-rw-r--r--kernel/fork.c53
-rw-r--r--kernel/gcov/base.c6
-rw-r--r--kernel/gcov/gcc_4_7.c4
-rw-r--r--kernel/locking/locktorture.c4
-rw-r--r--kernel/locking/osq_lock.c13
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/sched/cpufreq_sched.c14
-rw-r--r--kernel/sched/cpufreq_schedutil.c62
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/fair.c44
-rw-r--r--kernel/sched/sched.h19
-rw-r--r--kernel/sched/walt.c8
-rw-r--r--kernel/trace/ftrace.c10
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_selftest.c2
-rw-r--r--net/bluetooth/l2cap_core.c80
-rw-r--r--net/ieee802154/6lowpan/reassembly.c11
-rw-r--r--net/ipv4/inet_fragment.c4
-rw-r--r--net/ipv4/ip_fragment.c12
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ip6_fib.c56
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c12
-rw-r--r--net/ipv6/output_core.c6
-rw-r--r--net/ipv6/reassembly.c12
-rw-r--r--net/ipv6/route.c17
-rw-r--r--net/netfilter/xt_qtaguid.c20
-rw-r--r--net/xfrm/xfrm_policy.c6
-rw-r--r--sound/isa/msnd/msnd_midi.c30
-rw-r--r--sound/isa/msnd/msnd_pinnacle.c23
-rw-r--r--sound/pci/au88x0/au88x0_core.c14
-rw-r--r--sound/soc/msm/qdsp6v2/q6core.c136
243 files changed, 11844 insertions, 5863 deletions
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index b102b436563e..753dd4f96afe 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -102,14 +102,16 @@ background_gc=%s Turn on/off cleaning operations, namely garbage
collection, triggered in background when I/O subsystem is
idle. If background_gc=on, it will turn on the garbage
collection and if background_gc=off, garbage collection
- will be truned off. If background_gc=sync, it will turn
+ will be turned off. If background_gc=sync, it will turn
on synchronous garbage collection running in background.
Default value for this option is on. So garbage
collection is on by default.
disable_roll_forward Disable the roll-forward recovery routine
norecovery Disable the roll-forward recovery routine, mounted read-
only (i.e., -o ro,disable_roll_forward)
-discard Issue discard/TRIM commands when a segment is cleaned.
+discard/nodiscard Enable/disable real-time discard in f2fs, if discard is
+ enabled, f2fs will issue discard/TRIM commands when a
+ segment is cleaned.
no_heap Disable heap-style segment allocation which finds free
segments for data from the beginning of main area, while
for node from the end of main area.
@@ -129,6 +131,7 @@ inline_dentry Enable the inline dir feature: data in new created
directory entries can be written into inode block. The
space of inode block which is used to store inline
dentries is limited to ~3.4k.
+noinline_dentry Diable the inline dentry feature.
flush_merge Merge concurrent cache_flush commands as much as possible
to eliminate redundant command issues. If the underlying
device handles the cache_flush command relatively slowly,
@@ -145,10 +148,15 @@ extent_cache Enable an extent cache based on rb-tree, it can cache
as many as extent which map between contiguous logical
address and physical address per inode, resulting in
increasing the cache hit ratio. Set by default.
-noextent_cache Diable an extent cache based on rb-tree explicitly, see
+noextent_cache Disable an extent cache based on rb-tree explicitly, see
the above extent_cache mount option.
noinline_data Disable the inline data feature, inline data feature is
enabled by default.
+data_flush Enable data flushing before checkpoint in order to
+ persist data of regular and symlink.
+mode=%s Control block allocation mode which supports "adaptive"
+ and "lfs". In "lfs" mode, there should be no random
+ writes towards main area.
================================================================================
DEBUGFS ENTRIES
@@ -192,7 +200,7 @@ Files in /sys/fs/f2fs/<devname>
policy for garbage collection. Setting gc_idle = 0
(default) will disable this option. Setting
gc_idle = 1 will select the Cost Benefit approach
- & setting gc_idle = 2 will select the greedy aproach.
+ & setting gc_idle = 2 will select the greedy approach.
reclaim_segments This parameter controls the number of prefree
segments to be reclaimed. If the number of prefree
@@ -298,7 +306,7 @@ The dump.f2fs shows the information of specific inode and dumps SSA and SIT to
file. Each file is dump_ssa and dump_sit.
The dump.f2fs is used to debug on-disk data structures of the f2fs filesystem.
-It shows on-disk inode information reconized by a given inode number, and is
+It shows on-disk inode information recognized by a given inode number, and is
able to dump all the SSA and SIT entries into predefined files, ./dump_ssa and
./dump_sit respectively.
diff --git a/Makefile b/Makefile
index 919851a86eaa..6229b4d49877 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 85
+SUBLEVEL = 89
EXTRAVERSION =
NAME = Blurry Fish Butt
diff --git a/android/configs/README b/android/configs/README
deleted file mode 100644
index 8798731f8904..000000000000
--- a/android/configs/README
+++ /dev/null
@@ -1,15 +0,0 @@
-The files in this directory are meant to be used as a base for an Android
-kernel config. All devices should have the options in android-base.cfg enabled.
-While not mandatory, the options in android-recommended.cfg enable advanced
-Android features.
-
-Assuming you already have a minimalist defconfig for your device, a possible
-way to enable these options would be:
-
- ARCH=<arch> scripts/kconfig/merge_config.sh <path_to>/<device>_defconfig android/configs/android-base.cfg android/configs/android-recommended.cfg
-
-This will generate a .config that can then be used to save a new defconfig or
-compile a new kernel with Android features enabled.
-
-Because there is no tool to consistently generate these config fragments,
-lets keep them alphabetically sorted instead of random.
diff --git a/android/configs/android-base-arm64.cfg b/android/configs/android-base-arm64.cfg
deleted file mode 100644
index 43f23d6b5391..000000000000
--- a/android/configs/android-base-arm64.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-# KEEP ALPHABETICALLY SORTED
-CONFIG_ARMV8_DEPRECATED=y
-CONFIG_CP15_BARRIER_EMULATION=y
-CONFIG_SETEND_EMULATION=y
-CONFIG_SWP_EMULATION=y
diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg
deleted file mode 100644
index 419dca62542a..000000000000
--- a/android/configs/android-base.cfg
+++ /dev/null
@@ -1,164 +0,0 @@
-# KEEP ALPHABETICALLY SORTED
-# CONFIG_DEVKMEM is not set
-# CONFIG_DEVMEM is not set
-# CONFIG_FHANDLE is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_NFSD is not set
-# CONFIG_NFS_FS is not set
-# CONFIG_OABI_COMPAT is not set
-# CONFIG_SYSVIPC is not set
-# CONFIG_USELIB is not set
-CONFIG_ANDROID=y
-CONFIG_ANDROID_BINDER_IPC=y
-CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder
-CONFIG_ANDROID_LOW_MEMORY_KILLER=y
-CONFIG_ASHMEM=y
-CONFIG_AUDIT=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_DEFAULT_SECURITY_SELINUX=y
-CONFIG_EMBEDDED=y
-CONFIG_FB=y
-CONFIG_HARDENED_USERCOPY=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_INET6_AH=y
-CONFIG_INET6_ESP=y
-CONFIG_INET6_IPCOMP=y
-CONFIG_INET=y
-CONFIG_INET_DIAG_DESTROY=y
-CONFIG_INET_ESP=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_IP6_NF_FILTER=y
-CONFIG_IP6_NF_IPTABLES=y
-CONFIG_IP6_NF_MANGLE=y
-CONFIG_IP6_NF_RAW=y
-CONFIG_IP6_NF_TARGET_REJECT=y
-CONFIG_IPV6=y
-CONFIG_IPV6_MIP6=y
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IPV6_OPTIMISTIC_DAD=y
-CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_NF_ARPFILTER=y
-CONFIG_IP_NF_ARPTABLES=y
-CONFIG_IP_NF_ARP_MANGLE=y
-CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_IPTABLES=y
-CONFIG_IP_NF_MANGLE=y
-CONFIG_IP_NF_MATCH_AH=y
-CONFIG_IP_NF_MATCH_ECN=y
-CONFIG_IP_NF_MATCH_TTL=y
-CONFIG_IP_NF_NAT=y
-CONFIG_IP_NF_RAW=y
-CONFIG_IP_NF_SECURITY=y
-CONFIG_IP_NF_TARGET_MASQUERADE=y
-CONFIG_IP_NF_TARGET_NETMAP=y
-CONFIG_IP_NF_TARGET_REDIRECT=y
-CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_NET=y
-CONFIG_NETDEVICES=y
-CONFIG_NETFILTER=y
-CONFIG_NETFILTER_XT_MATCH_COMMENT=y
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
-CONFIG_NETFILTER_XT_MATCH_HELPER=y
-CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
-CONFIG_NETFILTER_XT_MATCH_LENGTH=y
-CONFIG_NETFILTER_XT_MATCH_LIMIT=y
-CONFIG_NETFILTER_XT_MATCH_MAC=y
-CONFIG_NETFILTER_XT_MATCH_MARK=y
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
-CONFIG_NETFILTER_XT_MATCH_POLICY=y
-CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
-CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
-CONFIG_NETFILTER_XT_MATCH_QUOTA=y
-CONFIG_NETFILTER_XT_MATCH_SOCKET=y
-CONFIG_NETFILTER_XT_MATCH_STATE=y
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
-CONFIG_NETFILTER_XT_MATCH_STRING=y
-CONFIG_NETFILTER_XT_MATCH_TIME=y
-CONFIG_NETFILTER_XT_MATCH_U32=y
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
-CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
-CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_MARK=y
-CONFIG_NETFILTER_XT_TARGET_NFLOG=y
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
-CONFIG_NETFILTER_XT_TARGET_SECMARK=y
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
-CONFIG_NETFILTER_XT_TARGET_TPROXY=y
-CONFIG_NETFILTER_XT_TARGET_TRACE=y
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_CLS_U32=y
-CONFIG_NET_EMATCH=y
-CONFIG_NET_EMATCH_U32=y
-CONFIG_NET_KEY=y
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_HTB=y
-CONFIG_NF_CONNTRACK=y
-CONFIG_NF_CONNTRACK_AMANDA=y
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CONNTRACK_FTP=y
-CONFIG_NF_CONNTRACK_H323=y
-CONFIG_NF_CONNTRACK_IPV4=y
-CONFIG_NF_CONNTRACK_IPV6=y
-CONFIG_NF_CONNTRACK_IRC=y
-CONFIG_NF_CONNTRACK_NETBIOS_NS=y
-CONFIG_NF_CONNTRACK_PPTP=y
-CONFIG_NF_CONNTRACK_SANE=y
-CONFIG_NF_CONNTRACK_SECMARK=y
-CONFIG_NF_CONNTRACK_TFTP=y
-CONFIG_NF_CT_NETLINK=y
-CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_SCTP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
-CONFIG_NF_NAT=y
-CONFIG_NO_HZ=y
-CONFIG_PACKET=y
-CONFIG_PM_AUTOSLEEP=y
-CONFIG_PM_WAKELOCKS=y
-CONFIG_PPP=y
-CONFIG_PPPOLAC=y
-CONFIG_PPPOPNS=y
-CONFIG_PPP_BSDCOMP=y
-CONFIG_PPP_DEFLATE=y
-CONFIG_PPP_MPPE=y
-CONFIG_PREEMPT=y
-CONFIG_PROFILING=y
-CONFIG_RANDOMIZE_BASE=y
-CONFIG_RTC_CLASS=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_SECCOMP=y
-CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
-CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
-CONFIG_SECURITY_SELINUX=y
-CONFIG_STAGING=y
-CONFIG_SYNC=y
-CONFIG_TUN=y
-CONFIG_UID_SYS_STATS=y
-CONFIG_UNIX=y
-CONFIG_USB_CONFIGFS=y
-CONFIG_USB_CONFIGFS_F_ACC=y
-CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
-CONFIG_USB_CONFIGFS_F_FS=y
-CONFIG_USB_CONFIGFS_F_MIDI=y
-CONFIG_USB_CONFIGFS_F_MTP=y
-CONFIG_USB_CONFIGFS_F_PTP=y
-CONFIG_USB_CONFIGFS_UEVENT=y
-CONFIG_USB_GADGET=y
-CONFIG_XFRM_USER=y
diff --git a/android/configs/android-recommended.cfg b/android/configs/android-recommended.cfg
deleted file mode 100644
index 6550d0423f50..000000000000
--- a/android/configs/android-recommended.cfg
+++ /dev/null
@@ -1,140 +0,0 @@
-# KEEP ALPHABETICALLY SORTED
-# CONFIG_AIO is not set
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_NF_CONNTRACK_SIP is not set
-# CONFIG_PM_WAKELOCKS_GC is not set
-# CONFIG_VT is not set
-CONFIG_ANDROID_TIMED_GPIO=y
-CONFIG_ARM64_SW_TTBR0_PAN=y
-CONFIG_ARM_KERNMEM_PERMS=y
-CONFIG_ARM64_SW_TTBR0_PAN=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BLK_DEV_DM=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_CC_STACKPROTECTOR_STRONG=y
-CONFIG_COMPACTION=y
-CONFIG_CPU_SW_DOMAIN_PAN=y
-CONFIG_DEBUG_RODATA=y
-CONFIG_DM_CRYPT=y
-CONFIG_DM_UEVENT=y
-CONFIG_DM_VERITY=y
-CONFIG_DM_VERITY_FEC=y
-CONFIG_DRAGONRISE_FF=y
-CONFIG_ENABLE_DEFAULT_TRACERS=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_SECURITY=y
-CONFIG_FUSE_FS=y
-CONFIG_GREENASIA_FF=y
-CONFIG_HIDRAW=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_ACRUX=y
-CONFIG_HID_ACRUX_FF=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_DRAGONRISE=y
-CONFIG_HID_ELECOM=y
-CONFIG_HID_EMS_FF=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_GREENASIA=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_HOLTEK=y
-CONFIG_HID_KENSINGTON=y
-CONFIG_HID_KEYTOUCH=y
-CONFIG_HID_KYE=y
-CONFIG_HID_LCPOWER=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_LOGITECH_DJ=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_MULTITOUCH=y
-CONFIG_HID_NTRIG=y
-CONFIG_HID_ORTEK=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_PICOLCD=y
-CONFIG_HID_PRIMAX=y
-CONFIG_HID_PRODIKEYS=y
-CONFIG_HID_ROCCAT=y
-CONFIG_HID_SAITEK=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SMARTJOYPLUS=y
-CONFIG_HID_SONY=y
-CONFIG_HID_SPEEDLINK=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_HID_THRUSTMASTER=y
-CONFIG_HID_TIVO=y
-CONFIG_HID_TOPSEED=y
-CONFIG_HID_TWINHAN=y
-CONFIG_HID_UCLOGIC=y
-CONFIG_HID_WACOM=y
-CONFIG_HID_WALTOP=y
-CONFIG_HID_WIIMOTE=y
-CONFIG_HID_ZEROPLUS=y
-CONFIG_HID_ZYDACRON=y
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_GPIO=y
-CONFIG_INPUT_JOYSTICK=y
-CONFIG_INPUT_KEYCHORD=y
-CONFIG_INPUT_KEYRESET=y
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_TABLET=y
-CONFIG_INPUT_UINPUT=y
-CONFIG_ION=y
-CONFIG_JOYSTICK_XPAD=y
-CONFIG_JOYSTICK_XPAD_FF=y
-CONFIG_JOYSTICK_XPAD_LEDS=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_KSM=y
-CONFIG_LOGIG940_FF=y
-CONFIG_LOGIRUMBLEPAD2_FF=y
-CONFIG_LOGITECH_FF=y
-CONFIG_MD=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_MEMORY_STATE_TIME=y
-CONFIG_MSDOS_FS=y
-CONFIG_PANIC_TIMEOUT=5
-CONFIG_PANTHERLORD_FF=y
-CONFIG_PERF_EVENTS=y
-CONFIG_PM_DEBUG=y
-CONFIG_PM_RUNTIME=y
-CONFIG_PM_WAKELOCKS_LIMIT=0
-CONFIG_POWER_SUPPLY=y
-CONFIG_PSTORE=y
-CONFIG_PSTORE_CONSOLE=y
-CONFIG_PSTORE_RAM=y
-CONFIG_QFMT_V2=y
-CONFIG_QUOTA=y
-CONFIG_QUOTACTL=y
-CONFIG_QUOTA_NETLINK_INTERFACE=y
-CONFIG_QUOTA_TREE=y
-CONFIG_SCHEDSTATS=y
-CONFIG_SMARTJOYPLUS_FF=y
-CONFIG_SND=y
-CONFIG_SOUND=y
-CONFIG_SUSPEND_TIME=y
-CONFIG_TABLET_USB_ACECAD=y
-CONFIG_TABLET_USB_AIPTEK=y
-CONFIG_TABLET_USB_GTCO=y
-CONFIG_TABLET_USB_HANWANG=y
-CONFIG_TABLET_USB_KBTAB=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_TASK_XACCT=y
-CONFIG_TIMER_STATS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_UHID=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_HIDDEV=y
-CONFIG_USB_USBNET=y
-CONFIG_VFAT_FS=y
diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h
index 4cb4b6d3452c..0bc66e1d3a7e 100644
--- a/arch/alpha/include/asm/types.h
+++ b/arch/alpha/include/asm/types.h
@@ -1,6 +1,6 @@
#ifndef _ALPHA_TYPES_H
#define _ALPHA_TYPES_H
-#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
#endif /* _ALPHA_TYPES_H */
diff --git a/arch/alpha/include/uapi/asm/types.h b/arch/alpha/include/uapi/asm/types.h
index 9fd3cd459777..8d1024d7be05 100644
--- a/arch/alpha/include/uapi/asm/types.h
+++ b/arch/alpha/include/uapi/asm/types.h
@@ -9,8 +9,18 @@
* need to be careful to avoid a name clashes.
*/
-#ifndef __KERNEL__
+/*
+ * This is here because we used to use l64 for alpha
+ * and we don't want to impact user mode with our change to ll64
+ * in the kernel.
+ *
+ * However, some user programs are fine with this. They can
+ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
+ */
+#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__)
#include <asm-generic/int-l64.h>
+#else
+#include <asm-generic/int-ll64.h>
#endif
#endif /* _UAPI_ALPHA_TYPES_H */
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 2efb0625331d..db1eee5fe502 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -104,6 +104,12 @@ ENTRY(EV_MachineCheck)
lr r0, [efa]
mov r1, sp
+ ; hardware auto-disables MMU, re-enable it to allow kernel vaddr
+ ; access for say stack unwinding of modules for crash dumps
+ lr r3, [ARC_REG_PID]
+ or r3, r3, MMU_ENABLE
+ sr r3, [ARC_REG_PID]
+
lsr r3, r2, 8
bmsk r3, r3, 7
brne r3, ECR_C_MCHK_DUP_TLB, 1f
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index daf2bf52b984..97e9582dcf99 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -885,9 +885,6 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
local_irq_save(flags);
- /* re-enable the MMU */
- write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
-
/* loop thru all sets of TLB */
for (set = 0; set < mmu->sets; set++) {
diff --git a/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi b/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi
index 5708fce44378..fbde8d21f22c 100644
--- a/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi
@@ -497,12 +497,16 @@
<&clock_mmss clk_mmss_mdss_dp_link_clk>,
<&clock_mmss clk_mmss_mdss_dp_link_intf_clk>,
<&clock_mmss clk_mmss_mdss_dp_crypto_clk>,
- <&clock_mmss clk_mmss_mdss_dp_pixel_clk>;
+ <&clock_mmss clk_mmss_mdss_dp_pixel_clk>,
+ <&mdss_dp_pll clk_vco_divided_clk_src_mux>,
+ <&mdss_dp_pll clk_vco_divsel_two_clk_src>,
+ <&mdss_dp_pll clk_vco_divsel_four_clk_src>;
clock-names = "core_mnoc_clk", "core_iface_clk", "core_bus_clk",
"core_mdp_core_clk", "core_alt_iface_clk",
"core_aux_clk", "core_ref_clk_src", "core_ref_clk",
"ctrl_link_clk", "ctrl_link_iface_clk",
- "ctrl_crypto_clk", "ctrl_pixel_clk";
+ "ctrl_crypto_clk", "ctrl_pixel_clk", "pixel_parent",
+ "pixel_clk_two_div", "pixel_clk_four_div";
qcom,dp-usbpd-detection = <&pmi8998_pdphy>;
diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi
index 76e3282d327e..132f3e7ce332 100644
--- a/arch/arm/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998.dtsi
@@ -292,7 +292,7 @@
dev = "/dev/block/platform/soc/1da4000.ufshc/by-name/vendor";
type = "ext4";
mnt_flags = "ro,barrier=1,discard";
- fsmgr_flags = "wait,slotselect";
+ fsmgr_flags = "wait,slotselect,verify";
status = "ok";
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi
index e918864a3df7..ae110fe22535 100644
--- a/arch/arm/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630.dtsi
@@ -309,7 +309,7 @@
dev = "/dev/block/platform/soc/c0c4000.sdhci/by-name/vendor";
type = "ext4";
mnt_flags = "ro,barrier=1,discard";
- fsmgr_flags = "wait,slotselect";
+ fsmgr_flags = "wait,slotselect,verify";
status = "ok";
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi
index c626698ffd51..48437ac9d31a 100644
--- a/arch/arm/boot/dts/qcom/sdm660.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660.dtsi
@@ -307,7 +307,7 @@
dev = "/dev/block/platform/soc/c0c4000.sdhci/by-name/vendor";
type = "ext4";
mnt_flags = "ro,barrier=1,discard";
- fsmgr_flags = "wait,slotselect";
+ fsmgr_flags = "wait,slotselect,verify";
status = "ok";
};
};
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts
index e6d9f7b7d2f2..8e0cb4e4efaf 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts
@@ -91,6 +91,25 @@
ranges = <0 0 0 0xffffffff>;
compatible = "simple-bus";
+ qcom,mpm2-sleep-counter@4a3000 {
+ compatible = "qcom,mpm2-sleep-counter";
+ reg = <0x004a3000 0x1000>;
+ clock-frequency = <32768>;
+ };
+
+ qcom,msm-imem@66bf000 {
+ compatible = "qcom,msm-imem";
+ reg = <0x66bf000 0x1000>; /* Address and size of IMEM */
+ ranges = <0x0 0x66bf000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ boot_stats@6b0 {
+ compatible = "qcom,msm-imem-boot_stats";
+ reg = <0x6b0 32>;
+ };
+ };
+
sound-adp-agave {
compatible = "qcom,apq8096-asoc-snd-adp-agave";
qcom,model = "apq8096-adp-agave-snd-card";
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 360cea172b06..4c055a63c9c6 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -831,24 +831,25 @@ void stage2_unmap_vm(struct kvm *kvm)
* Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
* underlying level-2 and level-3 tables before freeing the actual level-1 table
* and setting the struct pointer to NULL.
- *
- * Note we don't need locking here as this is only called when the VM is
- * destroyed, which can only be done once.
*/
void kvm_free_stage2_pgd(struct kvm *kvm)
{
- if (kvm->arch.pgd == NULL)
- return;
+ void *pgd = NULL;
+ void *hwpgd = NULL;
spin_lock(&kvm->mmu_lock);
- unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ if (kvm->arch.pgd) {
+ unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ pgd = READ_ONCE(kvm->arch.pgd);
+ hwpgd = kvm_get_hwpgd(kvm);
+ kvm->arch.pgd = NULL;
+ }
spin_unlock(&kvm->mmu_lock);
- kvm_free_hwpgd(kvm_get_hwpgd(kvm));
- if (KVM_PREALLOC_LEVEL > 0)
- kfree(kvm->arch.pgd);
-
- kvm->arch.pgd = NULL;
+ if (hwpgd)
+ kvm_free_hwpgd(hwpgd);
+ if (KVM_PREALLOC_LEVEL > 0 && pgd)
+ kfree(pgd);
}
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index c73f10c1984f..83519afe3254 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -314,8 +314,11 @@ retry:
* signal first. We do not need to release the mmap_sem because
* it would already be released in __lock_page_or_retry in
* mm/filemap.c. */
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ if (!user_mode(regs))
+ goto no_context;
return 0;
+ }
/*
* Major/minor page fault accounting is only done on the
diff --git a/arch/arm64/configs/msm-auto-gvm-perf_defconfig b/arch/arm64/configs/msm-auto-gvm-perf_defconfig
index 2e551218af2d..650b9d7bc127 100644
--- a/arch/arm64/configs/msm-auto-gvm-perf_defconfig
+++ b/arch/arm64/configs/msm-auto-gvm-perf_defconfig
@@ -256,6 +256,8 @@ CONFIG_MSM_SMP2P_TEST=y
CONFIG_MSM_QMI_INTERFACE=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_BOOT_TIME_MARKER=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
diff --git a/arch/arm64/configs/msm-auto-gvm_defconfig b/arch/arm64/configs/msm-auto-gvm_defconfig
index a6d36c314a4a..5b0fb5cd910b 100644
--- a/arch/arm64/configs/msm-auto-gvm_defconfig
+++ b/arch/arm64/configs/msm-auto-gvm_defconfig
@@ -261,6 +261,8 @@ CONFIG_MSM_SMP2P_TEST=y
CONFIG_MSM_QMI_INTERFACE=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_BOOT_TIME_MARKER=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 790d27e3b997..7950df171d86 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -202,9 +202,11 @@ void fpsimd_thread_switch(struct task_struct *next)
void fpsimd_flush_thread(void)
{
+ preempt_disable();
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE);
+ preempt_enable();
}
/*
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2720d47da366..1f9a2620c692 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -370,8 +370,11 @@ retry:
* signal first. We do not need to release the mmap_sem because it
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ if (!user_mode(regs))
+ goto no_context;
return 0;
+ }
/*
* Major/minor page fault accounting is only done on the initial
diff --git a/arch/mips/math-emu/dp_fmax.c b/arch/mips/math-emu/dp_fmax.c
index fd71b8daaaf2..5bec64f2884e 100644
--- a/arch/mips/math-emu/dp_fmax.c
+++ b/arch/mips/math-emu/dp_fmax.c
@@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
return ys ? x : y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
else if (xs < ys)
return x;
- /* Compare exponent */
- if (xe > ye)
- return x;
- else if (xe < ye)
- return y;
+ /* Signs of inputs are equal, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return y;
+ return x;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return y;
- return x;
+ return x;
+ return y;
}
union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
@@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -164,6 +202,9 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754dp_inf(xs & ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
@@ -171,7 +212,6 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
@@ -180,9 +220,7 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
return y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
return y;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
return y;
- return x;
+ else if (xm > ym)
+ return x;
+ else if (xs == 0)
+ return x;
+ return y;
}
diff --git a/arch/mips/math-emu/dp_fmin.c b/arch/mips/math-emu/dp_fmin.c
index c1072b0dfb95..a287b23818d8 100644
--- a/arch/mips/math-emu/dp_fmin.c
+++ b/arch/mips/math-emu/dp_fmin.c
@@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
return ys ? y : x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
else if (xs < ys)
return y;
- /* Compare exponent */
- if (xe > ye)
- return y;
- else if (xe < ye)
- return x;
+ /* Signs of inputs are the same, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return x;
+ return y;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return x;
- return y;
+ return y;
+ return x;
}
union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
@@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -164,25 +202,25 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754dp_inf(xs | ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
- return x;
+ return y;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
- return y;
+ return x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
return x;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
+ return x;
+ else if (xm > ym)
+ return y;
+ else if (xs == 1)
return x;
return y;
}
diff --git a/arch/mips/math-emu/sp_fmax.c b/arch/mips/math-emu/sp_fmax.c
index 4d000844e48e..74a5a00d2f22 100644
--- a/arch/mips/math-emu/sp_fmax.c
+++ b/arch/mips/math-emu/sp_fmax.c
@@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
return ys ? x : y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
else if (xs < ys)
return x;
- /* Compare exponent */
- if (xe > ye)
- return x;
- else if (xe < ye)
- return y;
+ /* Signs of inputs are equal, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return y;
+ return x;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return y;
- return x;
+ return x;
+ return y;
}
union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
@@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -164,6 +202,9 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754sp_inf(xs & ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
@@ -171,7 +212,6 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
@@ -180,9 +220,7 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
return y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
return y;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
return y;
- return x;
+ else if (xm > ym)
+ return x;
+ else if (xs == 0)
+ return x;
+ return y;
}
diff --git a/arch/mips/math-emu/sp_fmin.c b/arch/mips/math-emu/sp_fmin.c
index 4eb1bb9e9dec..c51385f46b09 100644
--- a/arch/mips/math-emu/sp_fmin.c
+++ b/arch/mips/math-emu/sp_fmin.c
@@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
return ys ? y : x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
else if (xs < ys)
return y;
- /* Compare exponent */
- if (xe > ye)
- return y;
- else if (xe < ye)
- return x;
+ /* Signs of inputs are the same, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return x;
+ return y;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return x;
- return y;
+ return y;
+ return x;
}
union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
@@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -164,25 +202,25 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754sp_inf(xs | ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
- return x;
+ return y;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
- return y;
+ return x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
return x;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
+ return x;
+ else if (xm > ym)
+ return y;
+ else if (xs == 1)
return x;
return y;
}
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 91e5c1758b5c..64e016abb2a5 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -236,6 +236,28 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
+#define __get_user_or_set_dar(_regs, _dest, _addr) \
+ ({ \
+ int rc = 0; \
+ typeof(_addr) __addr = (_addr); \
+ if (__get_user_inatomic(_dest, __addr)) { \
+ _regs->dar = (unsigned long)__addr; \
+ rc = -EFAULT; \
+ } \
+ rc; \
+ })
+
+#define __put_user_or_set_dar(_regs, _src, _addr) \
+ ({ \
+ int rc = 0; \
+ typeof(_addr) __addr = (_addr); \
+ if (__put_user_inatomic(_src, __addr)) { \
+ _regs->dar = (unsigned long)__addr; \
+ rc = -EFAULT; \
+ } \
+ rc; \
+ })
+
static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
unsigned int reg, unsigned int nb,
unsigned int flags, unsigned int instr,
@@ -264,9 +286,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
} else {
unsigned long pc = regs->nip ^ (swiz & 4);
- if (__get_user_inatomic(instr,
- (unsigned int __user *)pc))
+ if (__get_user_or_set_dar(regs, instr,
+ (unsigned int __user *)pc))
return -EFAULT;
+
if (swiz == 0 && (flags & SW))
instr = cpu_to_le32(instr);
nb = (instr >> 11) & 0x1f;
@@ -310,31 +333,31 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
((nb0 + 3) / 4) * sizeof(unsigned long));
for (i = 0; i < nb; ++i, ++p)
- if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__get_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = &regs->gpr[0];
addr += nb;
for (i = 0; i < nb0; ++i, ++p)
- if (__get_user_inatomic(REG_BYTE(rptr,
- i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__get_user_or_set_dar(regs,
+ REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
}
} else {
for (i = 0; i < nb; ++i, ++p)
- if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__put_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = &regs->gpr[0];
addr += nb;
for (i = 0; i < nb0; ++i, ++p)
- if (__put_user_inatomic(REG_BYTE(rptr,
- i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__put_user_or_set_dar(regs,
+ REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
}
}
@@ -346,29 +369,32 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
* Only POWER6 has these instructions, and it does true little-endian,
* so we don't need the address swizzling.
*/
-static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
- unsigned int flags)
+static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr,
+ unsigned int reg, unsigned int flags)
{
char *ptr0 = (char *) &current->thread.TS_FPR(reg);
char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
- int i, ret, sw = 0;
+ int i, sw = 0;
if (reg & 1)
return 0; /* invalid form: FRS/FRT must be even */
if (flags & SW)
sw = 7;
- ret = 0;
+
for (i = 0; i < 8; ++i) {
if (!(flags & ST)) {
- ret |= __get_user(ptr0[i^sw], addr + i);
- ret |= __get_user(ptr1[i^sw], addr + i + 8);
+ if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
} else {
- ret |= __put_user(ptr0[i^sw], addr + i);
- ret |= __put_user(ptr1[i^sw], addr + i + 8);
+ if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
}
}
- if (ret)
- return -EFAULT;
+
return 1; /* exception handled and fixed up */
}
@@ -378,24 +404,27 @@ static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr,
{
char *ptr0 = (char *)&regs->gpr[reg];
char *ptr1 = (char *)&regs->gpr[reg+1];
- int i, ret, sw = 0;
+ int i, sw = 0;
if (reg & 1)
return 0; /* invalid form: GPR must be even */
if (flags & SW)
sw = 7;
- ret = 0;
+
for (i = 0; i < 8; ++i) {
if (!(flags & ST)) {
- ret |= __get_user(ptr0[i^sw], addr + i);
- ret |= __get_user(ptr1[i^sw], addr + i + 8);
+ if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
} else {
- ret |= __put_user(ptr0[i^sw], addr + i);
- ret |= __put_user(ptr1[i^sw], addr + i + 8);
+ if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
}
}
- if (ret)
- return -EFAULT;
+
return 1; /* exception handled and fixed up */
}
#endif /* CONFIG_PPC64 */
@@ -688,9 +717,14 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
for (j = 0; j < length; j += elsize) {
for (i = 0; i < elsize; ++i) {
if (flags & ST)
- ret |= __put_user(ptr[i^sw], addr + i);
+ ret = __put_user_or_set_dar(regs, ptr[i^sw],
+ addr + i);
else
- ret |= __get_user(ptr[i^sw], addr + i);
+ ret = __get_user_or_set_dar(regs, ptr[i^sw],
+ addr + i);
+
+ if (ret)
+ return ret;
}
ptr += elsize;
#ifdef __LITTLE_ENDIAN__
@@ -740,7 +774,7 @@ int fix_alignment(struct pt_regs *regs)
unsigned int dsisr;
unsigned char __user *addr;
unsigned long p, swiz;
- int ret, i;
+ int i;
union data {
u64 ll;
double dd;
@@ -923,7 +957,7 @@ int fix_alignment(struct pt_regs *regs)
if (flags & F) {
/* Special case for 16-byte FP loads and stores */
PPC_WARN_ALIGNMENT(fp_pair, regs);
- return emulate_fp_pair(addr, reg, flags);
+ return emulate_fp_pair(regs, addr, reg, flags);
} else {
#ifdef CONFIG_PPC64
/* Special case for 16-byte loads and stores */
@@ -953,15 +987,12 @@ int fix_alignment(struct pt_regs *regs)
}
data.ll = 0;
- ret = 0;
p = (unsigned long)addr;
for (i = 0; i < nb; i++)
- ret |= __get_user_inatomic(data.v[start + i],
- SWIZ_PTR(p++));
-
- if (unlikely(ret))
- return -EFAULT;
+ if (__get_user_or_set_dar(regs, data.v[start + i],
+ SWIZ_PTR(p++)))
+ return -EFAULT;
} else if (flags & F) {
data.ll = current->thread.TS_FPR(reg);
@@ -1031,15 +1062,13 @@ int fix_alignment(struct pt_regs *regs)
break;
}
- ret = 0;
p = (unsigned long)addr;
for (i = 0; i < nb; i++)
- ret |= __put_user_inatomic(data.v[start + i],
- SWIZ_PTR(p++));
+ if (__put_user_or_set_dar(regs, data.v[start + i],
+ SWIZ_PTR(p++)))
+ return -EFAULT;
- if (unlikely(ret))
- return -EFAULT;
} else if (flags & F)
current->thread.TS_FPR(reg) = data.ll;
else
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index bcd3d6199464..bb16a58cf7e4 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -204,6 +204,7 @@ void set_personality_ia32(bool);
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
do { \
+ unsigned long base; \
unsigned v; \
(pr_reg)[0] = (regs)->r15; \
(pr_reg)[1] = (regs)->r14; \
@@ -226,8 +227,8 @@ do { \
(pr_reg)[18] = (regs)->flags; \
(pr_reg)[19] = (regs)->sp; \
(pr_reg)[20] = (regs)->ss; \
- (pr_reg)[21] = current->thread.fs; \
- (pr_reg)[22] = current->thread.gs; \
+ rdmsrl(MSR_FS_BASE, base); (pr_reg)[21] = base; \
+ rdmsrl(MSR_KERNEL_GS_BASE, base); (pr_reg)[22] = base; \
asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index de25aad07853..9016b4b70375 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -304,13 +304,13 @@ static inline unsigned type in##bwl##_p(int port) \
static inline void outs##bwl(int port, const void *addr, unsigned long count) \
{ \
asm volatile("rep; outs" #bwl \
- : "+S"(addr), "+c"(count) : "d"(port)); \
+ : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
} \
\
static inline void ins##bwl(int port, void *addr, unsigned long count) \
{ \
asm volatile("rep; ins" #bwl \
- : "+D"(addr), "+c"(count) : "d"(port)); \
+ : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
}
BUILDIO(b, b, char)
diff --git a/block/blk-core.c b/block/blk-core.c
index 3115494c4cb2..1bc9f212b3a2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -247,7 +247,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
**/
void blk_start_queue(struct request_queue *q)
{
- WARN_ON(!irqs_disabled());
+ WARN_ON(!in_interrupt() && !irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index f5e9f9310b48..d12782dc9683 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -86,8 +86,13 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
}
sgl = sreq->tsg;
n = sg_nents(sgl);
- for_each_sg(sgl, sg, n, i)
- put_page(sg_page(sg));
+ for_each_sg(sgl, sg, n, i) {
+ struct page *page = sg_page(sg);
+
+ /* some SGs may not have a page mapped */
+ if (page && atomic_read(&page->_count))
+ put_page(page);
+ }
kfree(sreq->tsg);
}
@@ -138,8 +143,10 @@ static int skcipher_alloc_sgl(struct sock *sk)
sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
sgl->cur = 0;
- if (sg)
+ if (sg) {
sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
+ sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
+ }
list_add_tail(&sgl->list, &ctx->tsgl);
}
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 4d4cdc1a6e25..01de42c8b74b 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -44,6 +44,16 @@ config ANDROID_BINDER_IPC_32BIT
Note that enabling this will break newer Android user-space.
+config ANDROID_BINDER_IPC_SELFTEST
+ bool "Android Binder IPC Driver Selftest"
+ depends on ANDROID_BINDER_IPC
+ ---help---
+ This feature allows binder selftest to run.
+
+ Binder selftest checks the allocation and free of binder buffers
+ exhaustively with combinations of various buffer sizes and
+ alignments.
+
endif # if ANDROID
endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index 4b7c726bb560..a01254c43ee3 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,3 +1,4 @@
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 34f45abe0181..3419cb0b4447 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2481,7 +2481,6 @@ static int binder_translate_handle(struct flat_binder_object *fp,
(u64)node->ptr);
binder_node_unlock(node);
} else {
- int ret;
struct binder_ref_data dest_rdata;
binder_node_unlock(node);
@@ -4581,6 +4580,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
+ binder_selftest_alloc(&proc->alloc);
+
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -5426,6 +5427,8 @@ static void print_binder_proc_stats(struct seq_file *m,
count = binder_alloc_get_allocated_count(&proc->alloc);
seq_printf(m, " buffers: %d\n", count);
+ binder_alloc_print_pages(m, &proc->alloc);
+
count = 0;
binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry) {
@@ -5622,6 +5625,8 @@ static int __init binder_init(void)
struct binder_device *device;
struct hlist_node *tmp;
+ binder_alloc_shrinker_init();
+
atomic_set(&binder_transaction_log.cur, ~0U);
atomic_set(&binder_transaction_log_failed.cur, ~0U);
binder_deferred_workqueue = create_singlethread_workqueue("binder");
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index aabfebac6e57..b95da16fd938 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -27,9 +27,12 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/list_lru.h>
#include "binder_alloc.h"
#include "binder_trace.h"
+struct list_lru binder_alloc_lru;
+
static DEFINE_MUTEX(binder_alloc_mmap_lock);
enum {
@@ -48,14 +51,23 @@ module_param_named(debug_mask, binder_alloc_debug_mask,
pr_info(x); \
} while (0)
+static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.next, struct binder_buffer, entry);
+}
+
+static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.prev, struct binder_buffer, entry);
+}
+
static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return alloc->buffer +
- alloc->buffer_size - (void *)buffer->data;
- return (size_t)list_entry(buffer->entry.next,
- struct binder_buffer, entry) - (size_t)buffer->data;
+ return (u8 *)alloc->buffer +
+ alloc->buffer_size - (u8 *)buffer->data;
+ return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
}
static void binder_insert_free_buffer(struct binder_alloc *alloc,
@@ -105,9 +117,9 @@ static void binder_insert_allocated_buffer_locked(
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (new_buffer < buffer)
+ if (new_buffer->data < buffer->data)
p = &parent->rb_left;
- else if (new_buffer > buffer)
+ else if (new_buffer->data > buffer->data)
p = &parent->rb_right;
else
BUG();
@@ -122,18 +134,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
{
struct rb_node *n = alloc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
- struct binder_buffer *kern_ptr;
+ void *kern_ptr;
- kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
- - offsetof(struct binder_buffer, data));
+ kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (kern_ptr < buffer)
+ if (kern_ptr < buffer->data)
n = n->rb_left;
- else if (kern_ptr > buffer)
+ else if (kern_ptr > buffer->data)
n = n->rb_right;
else {
/*
@@ -175,13 +186,14 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
}
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+ void *start, void *end)
{
void *page_addr;
unsigned long user_page_addr;
- struct page **page;
- struct mm_struct *mm;
+ struct binder_lru_page *page;
+ struct vm_area_struct *vma = NULL;
+ struct mm_struct *mm = NULL;
+ bool need_mm = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: %s pages %pK-%pK\n", alloc->pid,
@@ -192,25 +204,27 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
trace_binder_update_page_range(alloc, allocate, start, end);
- if (vma)
- mm = NULL;
- else
- mm = get_task_mm(alloc->tsk);
+ if (allocate == 0)
+ goto free_range;
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ if (!page->page_ptr) {
+ need_mm = true;
+ break;
+ }
+ }
+
+ /* Same as mmget_not_zero() in later kernel versions */
+ if (need_mm && atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users))
+ mm = alloc->vma_vm_mm;
if (mm) {
down_write(&mm->mmap_sem);
vma = alloc->vma;
- if (vma && mm != alloc->vma_vm_mm) {
- pr_err("%d: vma mm and task mm mismatch\n",
- alloc->pid);
- vma = NULL;
- }
}
- if (allocate == 0)
- goto free_range;
-
- if (vma == NULL) {
+ if (!vma && need_mm) {
pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
alloc->pid);
goto err_no_vma;
@@ -218,18 +232,40 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
+ bool on_lru;
+ size_t index;
- page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
- BUG_ON(*page);
- *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
- if (*page == NULL) {
+ if (page->page_ptr) {
+ trace_binder_alloc_lru_start(alloc, index);
+
+ on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
+ WARN_ON(!on_lru);
+
+ trace_binder_alloc_lru_end(alloc, index);
+ continue;
+ }
+
+ if (WARN_ON(!vma))
+ goto err_page_ptr_cleared;
+
+ trace_binder_alloc_page_start(alloc, index);
+ page->page_ptr = alloc_page(GFP_KERNEL |
+ __GFP_HIGHMEM |
+ __GFP_ZERO);
+ if (!page->page_ptr) {
pr_err("%d: binder_alloc_buf failed for page at %pK\n",
alloc->pid, page_addr);
goto err_alloc_page_failed;
}
+ page->alloc = alloc;
+ INIT_LIST_HEAD(&page->lru);
+
ret = map_kernel_range_noflush((unsigned long)page_addr,
- PAGE_SIZE, PAGE_KERNEL, page);
+ PAGE_SIZE, PAGE_KERNEL,
+ &page->page_ptr);
flush_cache_vmap((unsigned long)page_addr,
(unsigned long)page_addr + PAGE_SIZE);
if (ret != 1) {
@@ -239,12 +275,14 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
user_page_addr =
(uintptr_t)page_addr + alloc->user_buffer_offset;
- ret = vm_insert_page(vma, user_page_addr, page[0]);
+ ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
alloc->pid, user_page_addr);
goto err_vm_insert_page_failed;
}
+
+ trace_binder_alloc_page_end(alloc, index);
/* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
@@ -256,16 +294,27 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
free_range:
for (page_addr = end - PAGE_SIZE; page_addr >= start;
page_addr -= PAGE_SIZE) {
- page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
- if (vma)
- zap_page_range(vma, (uintptr_t)page_addr +
- alloc->user_buffer_offset, PAGE_SIZE, NULL);
+ bool ret;
+ size_t index;
+
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
+
+ trace_binder_free_lru_start(alloc, index);
+
+ ret = list_lru_add(&binder_alloc_lru, &page->lru);
+ WARN_ON(!ret);
+
+ trace_binder_free_lru_end(alloc, index);
+ continue;
+
err_vm_insert_page_failed:
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
- __free_page(*page);
- *page = NULL;
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
err_alloc_page_failed:
+err_page_ptr_cleared:
;
}
err_no_vma:
@@ -321,6 +370,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
return ERR_PTR(-ENOSPC);
}
+ /* Pad 0-size buffers so they get assigned unique addresses */
+ size = max(size, sizeof(void *));
+
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
@@ -380,32 +432,35 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
has_page_addr =
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
- if (n == NULL) {
- if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
- buffer_size = size; /* no room for other buffers */
- else
- buffer_size = size + sizeof(struct binder_buffer);
- }
+ WARN_ON(n && buffer_size != size);
end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
if (ret)
return ERR_PTR(ret);
- rb_erase(best_fit, &alloc->free_buffers);
- buffer->free = 0;
- buffer->free_in_progress = 0;
- binder_insert_allocated_buffer_locked(alloc, buffer);
if (buffer_size != size) {
- struct binder_buffer *new_buffer = (void *)buffer->data + size;
+ struct binder_buffer *new_buffer;
+ new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!new_buffer) {
+ pr_err("%s: %d failed to alloc new buffer struct\n",
+ __func__, alloc->pid);
+ goto err_alloc_buf_struct_failed;
+ }
+ new_buffer->data = (u8 *)buffer->data + size;
list_add(&new_buffer->entry, &buffer->entry);
new_buffer->free = 1;
binder_insert_free_buffer(alloc, new_buffer);
}
+
+ rb_erase(best_fit, &alloc->free_buffers);
+ buffer->free = 0;
+ buffer->free_in_progress = 0;
+ binder_insert_allocated_buffer_locked(alloc, buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got %pK\n",
alloc->pid, size, buffer);
@@ -420,6 +475,12 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
alloc->pid, size, alloc->free_async_space);
}
return buffer;
+
+err_alloc_buf_struct_failed:
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ end_page_addr);
+ return ERR_PTR(-ENOMEM);
}
/**
@@ -454,57 +515,58 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
static void *buffer_start_page(struct binder_buffer *buffer)
{
- return (void *)((uintptr_t)buffer & PAGE_MASK);
+ return (void *)((uintptr_t)buffer->data & PAGE_MASK);
}
-static void *buffer_end_page(struct binder_buffer *buffer)
+static void *prev_buffer_end_page(struct binder_buffer *buffer)
{
- return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+ return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
}
static void binder_delete_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
struct binder_buffer *prev, *next = NULL;
- int free_page_end = 1;
- int free_page_start = 1;
-
+ bool to_free = true;
BUG_ON(alloc->buffers.next == &buffer->entry);
- prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ prev = binder_buffer_prev(buffer);
BUG_ON(!prev->free);
- if (buffer_end_page(prev) == buffer_start_page(buffer)) {
- free_page_start = 0;
- if (buffer_end_page(prev) == buffer_end_page(buffer))
- free_page_end = 0;
+ if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
+ to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer, prev);
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer->data, prev->data);
}
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
- next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
- if (buffer_start_page(next) == buffer_end_page(buffer)) {
- free_page_end = 0;
- if (buffer_start_page(next) ==
- buffer_start_page(buffer))
- free_page_start = 0;
+ next = binder_buffer_next(buffer);
+ if (buffer_start_page(next) == buffer_start_page(buffer)) {
+ to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer, prev);
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid,
+ buffer->data,
+ next->data);
}
}
- list_del(&buffer->entry);
- if (free_page_start || free_page_end) {
+
+ if (PAGE_ALIGNED(buffer->data)) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
- alloc->pid, buffer, free_page_start ? "" : " end",
- free_page_end ? "" : " start", prev, next);
- binder_update_page_range(alloc, 0, free_page_start ?
- buffer_start_page(buffer) : buffer_end_page(buffer),
- (free_page_end ? buffer_end_page(buffer) :
- buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ "%d: merge free, buffer start %pK is page aligned\n",
+ alloc->pid, buffer->data);
+ to_free = false;
}
+
+ if (to_free) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
+ alloc->pid, buffer->data,
+ prev->data, next->data);
+ binder_update_page_range(alloc, 0, buffer_start_page(buffer),
+ buffer_start_page(buffer) + PAGE_SIZE);
+ }
+ list_del(&buffer->entry);
+ kfree(buffer);
}
static void binder_free_buf_locked(struct binder_alloc *alloc,
@@ -525,8 +587,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON((void *)buffer < alloc->buffer);
- BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->data < alloc->buffer);
+ BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer);
@@ -538,14 +600,12 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
- NULL);
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
- struct binder_buffer *next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
+ struct binder_buffer *next = binder_buffer_next(buffer);
if (next->free) {
rb_erase(&next->rb_node, &alloc->free_buffers);
@@ -553,8 +613,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
}
}
if (alloc->buffers.next != &buffer->entry) {
- struct binder_buffer *prev = list_entry(buffer->entry.prev,
- struct binder_buffer, entry);
+ struct binder_buffer *prev = binder_buffer_prev(buffer);
if (prev->free) {
binder_delete_free_buffer(alloc, buffer);
@@ -640,14 +699,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
}
alloc->buffer_size = vma->vm_end - vma->vm_start;
- if (binder_update_page_range(alloc, 1, alloc->buffer,
- alloc->buffer + PAGE_SIZE, vma)) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
ret = -ENOMEM;
- failure_string = "alloc small buf";
- goto err_alloc_small_buf_failed;
+ failure_string = "alloc buffer struct";
+ goto err_alloc_buf_struct_failed;
}
- buffer = alloc->buffer;
- INIT_LIST_HEAD(&alloc->buffers);
+
+ buffer->data = alloc->buffer;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
@@ -655,10 +714,12 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
barrier();
alloc->vma = vma;
alloc->vma_vm_mm = vma->vm_mm;
+ /* Same as mmgrab() in later kernel versions */
+ atomic_inc(&alloc->vma_vm_mm->mm_count);
return 0;
-err_alloc_small_buf_failed:
+err_alloc_buf_struct_failed:
kfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
@@ -678,14 +739,13 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
{
struct rb_node *n;
int buffers, page_count;
+ struct binder_buffer *buffer;
BUG_ON(alloc->vma);
buffers = 0;
mutex_lock(&alloc->mutex);
while ((n = rb_first(&alloc->allocated_buffers))) {
- struct binder_buffer *buffer;
-
buffer = rb_entry(n, struct binder_buffer, rb_node);
/* Transaction should already have been freed */
@@ -695,28 +755,44 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers++;
}
+ while (!list_empty(&alloc->buffers)) {
+ buffer = list_first_entry(&alloc->buffers,
+ struct binder_buffer, entry);
+ WARN_ON(!buffer->free);
+
+ list_del(&buffer->entry);
+ WARN_ON_ONCE(!list_empty(&alloc->buffers));
+ kfree(buffer);
+ }
+
page_count = 0;
if (alloc->pages) {
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
void *page_addr;
+ bool on_lru;
- if (!alloc->pages[i])
+ if (!alloc->pages[i].page_ptr)
continue;
+ on_lru = list_lru_del(&binder_alloc_lru,
+ &alloc->pages[i].lru);
page_addr = alloc->buffer + i * PAGE_SIZE;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%s: %d: page %d at %pK not freed\n",
- __func__, alloc->pid, i, page_addr);
+ "%s: %d: page %d at %pK %s\n",
+ __func__, alloc->pid, i, page_addr,
+ on_lru ? "on lru" : "active");
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
- __free_page(alloc->pages[i]);
+ __free_page(alloc->pages[i].page_ptr);
page_count++;
}
kfree(alloc->pages);
vfree(alloc->buffer);
}
mutex_unlock(&alloc->mutex);
+ if (alloc->vma_vm_mm)
+ mmdrop(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d buffers %d, pages %d\n",
@@ -754,6 +830,34 @@ void binder_alloc_print_allocated(struct seq_file *m,
}
/**
+ * binder_alloc_print_pages() - print page usage
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ */
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct binder_lru_page *page;
+ int i;
+ int active = 0;
+ int lru = 0;
+ int free = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
+ mutex_unlock(&alloc->mutex);
+ seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
+}
+
+/**
* binder_alloc_get_allocated_count() - return count of buffers
* @alloc: binder_alloc for this proc
*
@@ -783,10 +887,112 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
WRITE_ONCE(alloc->vma, NULL);
- WRITE_ONCE(alloc->vma_vm_mm, NULL);
}
/**
+ * binder_alloc_free_page() - shrinker callback to free pages
+ * @item: item to free
+ * @lock: lock protecting the item
+ * @cb_arg: callback argument
+ *
+ * Called from list_lru_walk() in binder_shrink_scan() to free
+ * up pages when the system is under memory pressure.
+ */
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock,
+ void *cb_arg)
+{
+ struct mm_struct *mm = NULL;
+ struct binder_lru_page *page = container_of(item,
+ struct binder_lru_page,
+ lru);
+ struct binder_alloc *alloc;
+ uintptr_t page_addr;
+ size_t index;
+ struct vm_area_struct *vma;
+
+ alloc = page->alloc;
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ if (!page->page_ptr)
+ goto err_page_already_freed;
+
+ index = page - alloc->pages;
+ page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+ vma = alloc->vma;
+ if (vma) {
+ /* Same as mmget_not_zero() in later kernel versions */
+ if (!atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users))
+ goto err_mmget;
+ mm = alloc->vma_vm_mm;
+ if (!down_write_trylock(&mm->mmap_sem))
+ goto err_down_write_mmap_sem_failed;
+ }
+
+ list_lru_isolate(lru, item);
+ spin_unlock(lock);
+
+ if (vma) {
+ trace_binder_unmap_user_start(alloc, index);
+
+ zap_page_range(vma,
+ page_addr +
+ alloc->user_buffer_offset,
+ PAGE_SIZE, NULL);
+
+ trace_binder_unmap_user_end(alloc, index);
+
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+
+ trace_binder_unmap_kernel_start(alloc, index);
+
+ unmap_kernel_range(page_addr, PAGE_SIZE);
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
+
+ trace_binder_unmap_kernel_end(alloc, index);
+
+ spin_lock(lock);
+ mutex_unlock(&alloc->mutex);
+ return LRU_REMOVED_RETRY;
+
+err_down_write_mmap_sem_failed:
+ mmput_async(mm);
+err_mmget:
+err_page_already_freed:
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ return LRU_SKIP;
+}
+
+static unsigned long
+binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret = list_lru_count(&binder_alloc_lru);
+ return ret;
+}
+
+static unsigned long
+binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret;
+
+ ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, sc->nr_to_scan);
+ return ret;
+}
+
+struct shrinker binder_shrinker = {
+ .count_objects = binder_shrink_count,
+ .scan_objects = binder_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+/**
* binder_alloc_init() - called by binder_open() for per-proc initialization
* @alloc: binder_alloc for this proc
*
@@ -795,8 +1001,13 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
*/
void binder_alloc_init(struct binder_alloc *alloc)
{
- alloc->tsk = current->group_leader;
alloc->pid = current->group_leader->pid;
mutex_init(&alloc->mutex);
+ INIT_LIST_HEAD(&alloc->buffers);
}
+void binder_alloc_shrinker_init(void)
+{
+ list_lru_init(&binder_alloc_lru);
+ register_shrinker(&binder_shrinker);
+}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 088e4ffc6230..2dd33b6df104 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -21,7 +21,9 @@
#include <linux/rtmutex.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/list_lru.h>
+extern struct list_lru binder_alloc_lru;
struct binder_transaction;
/**
@@ -57,7 +59,19 @@ struct binder_buffer {
size_t data_size;
size_t offsets_size;
size_t extra_buffers_size;
- uint8_t data[0];
+ void *data;
+};
+
+/**
+ * struct binder_lru_page - page object used for binder shrinker
+ * @page_ptr: pointer to physical page in mmap'd space
+ * @lru: entry in binder_alloc_lru
+ * @alloc: binder_alloc for a proc
+ */
+struct binder_lru_page {
+ struct list_head lru;
+ struct page *page_ptr;
+ struct binder_alloc *alloc;
};
/**
@@ -75,8 +89,7 @@ struct binder_buffer {
* @allocated_buffers: rb tree of allocated buffers sorted by address
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
- * @pages: array of physical page addresses for each
- * page of mmap'd space
+ * @pages: array of binder_lru_page
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
*
@@ -87,7 +100,6 @@ struct binder_buffer {
*/
struct binder_alloc {
struct mutex mutex;
- struct task_struct *tsk;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
void *buffer;
@@ -96,18 +108,27 @@ struct binder_alloc {
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
- struct page **pages;
+ struct binder_lru_page *pages;
size_t buffer_size;
uint32_t buffer_free;
int pid;
};
+#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
+void binder_selftest_alloc(struct binder_alloc *alloc);
+#else
+static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
+#endif
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock, void *cb_arg);
extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async);
extern void binder_alloc_init(struct binder_alloc *alloc);
+void binder_alloc_shrinker_init(void);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
extern struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
@@ -120,6 +141,8 @@ extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
extern void binder_alloc_print_allocated(struct seq_file *m,
struct binder_alloc *alloc);
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc);
/**
* binder_alloc_get_free_async_space() - get free space available for async
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
new file mode 100644
index 000000000000..8bd7bcef967d
--- /dev/null
+++ b/drivers/android/binder_alloc_selftest.c
@@ -0,0 +1,310 @@
+/* binder_alloc_selftest.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm_types.h>
+#include <linux/err.h>
+#include "binder_alloc.h"
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+static bool binder_selftest_run = true;
+static int binder_selftest_failures;
+static DEFINE_MUTEX(binder_selftest_lock);
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+ /**
+ * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+ * the same page as the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 ][ ...
+ * buf1 ]|[ buf2 ][ ...
+ */
+ SAME_PAGE_UNALIGNED = 0,
+ /**
+ * @SAME_PAGE_ALIGNED: When the end of the previous buffer
+ * is not page aligned, the end of this buffer is on the
+ * same page as the end of the previous buffer and is page
+ * aligned. When the previous buffer is page aligned, the
+ * end of this buffer is aligned to the next page boundary.
+ * Examples:
+ * buf1 ][ buf2 ]| ...
+ * buf1 ]|[ buf2 ]| ...
+ */
+ SAME_PAGE_ALIGNED,
+ /**
+ * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 ][ ...
+ */
+ NEXT_PAGE_UNALIGNED,
+ /**
+ * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ]| ...
+ * buf1 ]|[ buf2 | buf2 ]| ...
+ */
+ NEXT_PAGE_ALIGNED,
+ /**
+ * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+ * the page that follows the page after the end of the
+ * previous buffer and is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+ */
+ NEXT_NEXT_UNALIGNED,
+ LOOP_END,
+};
+
+static void pr_err_size_seq(size_t *sizes, int *seq)
+{
+ int i;
+
+ pr_err("alloc sizes: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%zu]", sizes[i]);
+ pr_cont("\n");
+ pr_err("free seq: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%d]", seq[i]);
+ pr_cont("\n");
+}
+
+static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ size_t size)
+{
+ void *page_addr, *end;
+ int page_index;
+
+ end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ page_addr = buffer->data;
+ for (; page_addr < end; page_addr += PAGE_SIZE) {
+ page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ if (!alloc->pages[page_index].page_ptr ||
+ !list_empty(&alloc->pages[page_index].lru)) {
+ pr_err("expect alloc but is %s at page index %d\n",
+ alloc->pages[page_index].page_ptr ?
+ "lru" : "free", page_index);
+ return false;
+ }
+ }
+ return true;
+}
+
+static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+ if (IS_ERR(buffers[i]) ||
+ !check_buffer_pages_allocated(alloc, buffers[i],
+ sizes[i])) {
+ pr_err_size_seq(sizes, seq);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq, size_t end)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+ for (i = 0; i < end / PAGE_SIZE; i++) {
+ /**
+ * Error message on a free page can be false positive
+ * if binder shrinker ran during binder_alloc_free_buf
+ * calls above.
+ */
+ if (list_empty(&alloc->pages[i].lru)) {
+ pr_err_size_seq(sizes, seq);
+ pr_err("expect lru but is %s at page index %d\n",
+ alloc->pages[i].page_ptr ? "alloc" : "free", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_page(struct binder_alloc *alloc)
+{
+ int i;
+ unsigned long count;
+
+ while ((count = list_lru_count(&binder_alloc_lru))) {
+ list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, count);
+ }
+
+ for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+ if (alloc->pages[i].page_ptr) {
+ pr_err("expect free but is %s at page index %d\n",
+ list_empty(&alloc->pages[i].lru) ?
+ "alloc" : "lru", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_alloc_free(struct binder_alloc *alloc,
+ size_t *sizes, int *seq, size_t end)
+{
+ struct binder_buffer *buffers[BUFFER_NUM];
+
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+
+ /* Allocate from lru. */
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ if (list_lru_count(&binder_alloc_lru))
+ pr_err("lru list should be empty but is not\n");
+
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+ binder_selftest_free_page(alloc);
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+ int i;
+
+ for (i = 0; i < index; i++) {
+ if (seq[i] == val)
+ return true;
+ }
+ return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void binder_selftest_free_seq(struct binder_alloc *alloc,
+ size_t *sizes, int *seq,
+ int index, size_t end)
+{
+ int i;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_free(alloc, sizes, seq, end);
+ return;
+ }
+ for (i = 0; i < BUFFER_NUM; i++) {
+ if (is_dup(seq, index, i))
+ continue;
+ seq[index] = i;
+ binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
+ }
+}
+
+static void binder_selftest_alloc_size(struct binder_alloc *alloc,
+ size_t *end_offset)
+{
+ int i;
+ int seq[BUFFER_NUM] = {0};
+ size_t front_sizes[BUFFER_NUM];
+ size_t back_sizes[BUFFER_NUM];
+ size_t last_offset, offset = 0;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ last_offset = offset;
+ offset = end_offset[i];
+ front_sizes[i] = offset - last_offset;
+ back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+ }
+ /*
+ * Buffers share the first or last few pages.
+ * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+ * we need one giant buffer before getting to the last page.
+ */
+ back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+ binder_selftest_free_seq(alloc, front_sizes, seq, 0,
+ end_offset[BUFFER_NUM - 1]);
+ binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
+}
+
+static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
+ size_t *end_offset, int index)
+{
+ int align;
+ size_t end, prev;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_size(alloc, end_offset);
+ return;
+ }
+ prev = index == 0 ? 0 : end_offset[index - 1];
+ end = prev;
+
+ BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+
+ for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+ if (align % 2)
+ end = ALIGN(end, PAGE_SIZE);
+ else
+ end += BUFFER_MIN_SIZE;
+ end_offset[index] = end;
+ binder_selftest_alloc_offset(alloc, end_offset, index + 1);
+ }
+}
+
+/**
+ * binder_selftest_alloc() - Test alloc and free of buffer pages.
+ * @alloc: Pointer to alloc struct.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * correctly allocated, put onto lru when buffers are freed, and
+ * are freed when binder_alloc_free_page is called.
+ */
+void binder_selftest_alloc(struct binder_alloc *alloc)
+{
+ size_t end_offset[BUFFER_NUM];
+
+ if (!binder_selftest_run)
+ return;
+ mutex_lock(&binder_selftest_lock);
+ if (!binder_selftest_run || !alloc->vma)
+ goto done;
+ pr_info("STARTED\n");
+ binder_selftest_alloc_offset(alloc, end_offset, 0);
+ binder_selftest_run = false;
+ if (binder_selftest_failures > 0)
+ pr_info("%d tests FAILED\n", binder_selftest_failures);
+ else
+ pr_info("PASSED\n");
+
+done:
+ mutex_unlock(&binder_selftest_lock);
+}
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 7967db16ba5a..76e3b9c8a8a2 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -291,6 +291,61 @@ TRACE_EVENT(binder_update_page_range,
__entry->offset, __entry->size)
);
+DECLARE_EVENT_CLASS(binder_lru_page_class,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index),
+ TP_STRUCT__entry(
+ __field(int, proc)
+ __field(size_t, page_index)
+ ),
+ TP_fast_assign(
+ __entry->proc = alloc->pid;
+ __entry->page_index = page_index;
+ ),
+ TP_printk("proc=%d page_index=%zu",
+ __entry->proc, __entry->page_index)
+);
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
TRACE_EVENT(binder_command,
TP_PROTO(uint32_t cmd),
TP_ARGS(cmd),
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 8d4d959a821c..8706533db57b 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -616,6 +616,7 @@ static const struct pci_device_id amd[] = {
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), 9 },
{ },
};
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 6c15a554efbe..dc1255294628 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -289,6 +289,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
static const struct pci_device_id cs5536[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), },
{ },
};
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 500592486e88..0346e46e2871 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -737,7 +737,7 @@ int bus_add_driver(struct device_driver *drv)
out_unregister:
kobject_put(&priv->kobj);
- kfree(drv->p);
+ /* drv->p is freed in driver_release() */
drv->p = NULL;
out_put_bus:
bus_put(bus);
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 586f9168ffa4..47d1e834f3f4 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2214,6 +2214,9 @@ static void skd_send_fitmsg(struct skd_device *skdev,
*/
qcmd |= FIT_QCMD_MSGSIZE_64;
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@@ -2260,6 +2263,9 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
qcmd = skspcl->mb_dma_address;
qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@@ -4679,15 +4685,16 @@ static void skd_free_disk(struct skd_device *skdev)
{
struct gendisk *disk = skdev->disk;
- if (disk != NULL) {
- struct request_queue *q = disk->queue;
+ if (disk && (disk->flags & GENHD_FL_UP))
+ del_gendisk(disk);
- if (disk->flags & GENHD_FL_UP)
- del_gendisk(disk);
- if (q)
- blk_cleanup_queue(q);
- put_disk(disk);
+ if (skdev->queue) {
+ blk_cleanup_queue(skdev->queue);
+ skdev->queue = NULL;
+ disk->queue = NULL;
}
+
+ put_disk(disk);
skdev->disk = NULL;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index cd6b141b9825..7bb8055bd10c 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -333,6 +333,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index a27f12883c8d..986aeed169f5 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -155,15 +155,20 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
return -EIO;
ch = &diag_md[id];
+ if (!ch)
+ return -EINVAL;
spin_lock_irqsave(&ch->lock, flags);
for (i = 0; i < ch->num_tbl_entries && !found; i++) {
if (ch->tbl[i].buf != buf)
continue;
found = 1;
- pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, ctxt: %d len: %d at i: %d back to the table, proc: %d, mode: %d\n",
- buf, ctx, ch->tbl[i].len,
- i, id, driver->logging_mode);
+ pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, len: %d, back to the table for p: %d, t: %d, buf_num: %d, proc: %d, i: %d\n",
+ buf, ch->tbl[i].len, GET_BUF_PERIPHERAL(ctx),
+ GET_BUF_TYPE(ctx), GET_BUF_NUM(ctx), id, i);
+ ch->tbl[i].buf = NULL;
+ ch->tbl[i].len = 0;
+ ch->tbl[i].ctx = 0;
}
spin_unlock_irqrestore(&ch->lock, flags);
@@ -227,7 +232,7 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
ch = &diag_md[i];
for (j = 0; j < ch->num_tbl_entries && !err; j++) {
entry = &ch->tbl[j];
- if (entry->len <= 0)
+ if (entry->len <= 0 || entry->buf == NULL)
continue;
peripheral = diag_md_get_peripheral(entry->ctx);
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 7dc2eabf1bb9..ef08f939c36e 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1572,6 +1572,9 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
switch (type) {
case TYPE_DATA:
if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free after write done p: %d, t: %d, buf_num: %d\n",
+ peripheral, type, num);
diagfwd_write_done(peripheral, type, num);
diag_ws_on_copy(DIAG_WS_MUX);
} else if (peripheral == APPS_DATA) {
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 0f94bab3bf84..b7dff47623de 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -31,6 +31,7 @@
#include "diag_mux.h"
#include "diag_ipc_logging.h"
#include "diagfwd_glink.h"
+#include "diag_memorydevice.h"
struct data_header {
uint8_t control_char;
@@ -188,8 +189,10 @@ static int diag_add_hdlc_encoding(unsigned char *dest_buf, int *dest_len,
static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
{
+ int i, ctx = 0;
uint32_t max_size = 0;
unsigned char *temp_buf = NULL;
+ struct diag_md_info *ch = NULL;
if (!buf || len == 0)
return -EINVAL;
@@ -203,11 +206,31 @@ static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
}
if (buf->len < max_size) {
+ if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE) {
+ ch = &diag_md[DIAG_LOCAL_PROC];
+ for (i = 0; ch != NULL &&
+ i < ch->num_tbl_entries; i++) {
+ if (ch->tbl[i].buf == buf->data) {
+ ctx = ch->tbl[i].ctx;
+ ch->tbl[i].buf = NULL;
+ ch->tbl[i].len = 0;
+ ch->tbl[i].ctx = 0;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Flushed mdlog table entries before reallocating data buffer, p:%d, t:%d\n",
+ GET_BUF_PERIPHERAL(ctx),
+ GET_BUF_TYPE(ctx));
+ break;
+ }
+ }
+ }
temp_buf = krealloc(buf->data, max_size +
APF_DIAG_PADDING,
GFP_KERNEL);
if (!temp_buf)
return -ENOMEM;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Reallocated data buffer: %pK with size: %d\n",
+ temp_buf, max_size);
buf->data = temp_buf;
buf->len = max_size;
}
@@ -360,6 +383,10 @@ end:
mutex_unlock(&fwd_info->data_mutex);
mutex_unlock(&driver->hdlc_disable_mutex);
if (buf) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(buf->ctxt));
diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
GET_BUF_NUM(buf->ctxt));
}
@@ -572,6 +599,10 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
end:
diag_ws_release();
if (temp_ptr_cpd) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(temp_ptr_cpd->ctxt));
diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
GET_BUF_NUM(temp_ptr_cpd->ctxt));
}
@@ -692,6 +723,10 @@ end:
mutex_unlock(&fwd_info->data_mutex);
mutex_unlock(&driver->hdlc_disable_mutex);
if (temp_buf) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(temp_buf->ctxt));
diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
GET_BUF_NUM(temp_buf->ctxt));
}
@@ -772,6 +807,16 @@ static void diagfwd_reset_buffers(struct diagfwd_info *fwd_info,
else if (fwd_info->buf_2 && fwd_info->buf_2->data_raw == buf)
atomic_set(&fwd_info->buf_2->in_busy, 0);
}
+ if (fwd_info->buf_1 && !atomic_read(&(fwd_info->buf_1->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+ if (fwd_info->buf_2 && !atomic_read(&(fwd_info->buf_2->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
}
int diagfwd_peripheral_init(void)
@@ -1158,10 +1203,18 @@ static void __diag_fwd_open(struct diagfwd_info *fwd_info)
if ((driver->logging_mode != DIAG_USB_MODE) ||
driver->usb_connected) {
- if (fwd_info->buf_1)
+ if (fwd_info->buf_1) {
atomic_set(&fwd_info->buf_1->in_busy, 0);
- if (fwd_info->buf_2)
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+ if (fwd_info->buf_2) {
atomic_set(&fwd_info->buf_2->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
}
if (fwd_info->p_ops && fwd_info->p_ops->open)
@@ -1285,10 +1338,18 @@ int diagfwd_channel_close(struct diagfwd_info *fwd_info)
if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
fwd_info->c_ops->close(fwd_info);
- if (fwd_info->buf_1 && fwd_info->buf_1->data)
+ if (fwd_info->buf_1 && fwd_info->buf_1->data) {
atomic_set(&fwd_info->buf_1->in_busy, 0);
- if (fwd_info->buf_2 && fwd_info->buf_2->data)
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+ if (fwd_info->buf_2 && fwd_info->buf_2->data) {
atomic_set(&fwd_info->buf_2->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
if (fwd_info->buf_ptr[i])
@@ -1314,6 +1375,9 @@ int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
* in_busy flags. No need to queue read in this case.
*/
if (len == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Read Length is 0, resetting the diag buffers p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
diagfwd_reset_buffers(fwd_info, buf);
diag_ws_release();
return 0;
@@ -1326,7 +1390,7 @@ int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
return 0;
}
-void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num)
{
struct diagfwd_info *fwd_info = NULL;
@@ -1334,8 +1398,10 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
return;
fwd_info = &peripheral_info[type][peripheral];
+ if (!fwd_info)
+ return;
- if (ctxt == 1 && fwd_info->buf_1) {
+ if (buf_num == 1 && fwd_info->buf_1) {
/* Buffer 1 for core PD is freed */
fwd_info->cpd_len_1 = 0;
@@ -1349,7 +1415,12 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
} else {
atomic_set(&fwd_info->buf_1->in_busy, 0);
}
- } else if (ctxt == 2 && fwd_info->buf_2) {
+ if (!atomic_read(&(fwd_info->buf_1->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
+ } else if (buf_num == 2 && fwd_info->buf_2) {
/* Buffer 2 for core PD is freed */
fwd_info->cpd_len_2 = 0;
@@ -1363,8 +1434,12 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
} else {
atomic_set(&fwd_info->buf_2->in_busy, 0);
}
-
- } else if (ctxt == 3 && fwd_info->buf_upd_1_a) {
+ if (!atomic_read(&(fwd_info->buf_2->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
+ } else if (buf_num == 3 && fwd_info->buf_upd_1_a && fwd_info->buf_1) {
/* Buffer 1 for user pd 1 is freed */
atomic_set(&fwd_info->buf_upd_1_a->in_busy, 0);
@@ -1382,9 +1457,14 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
if (!fwd_info->cpd_len_1)
atomic_set(&fwd_info->buf_1->in_busy, 0);
}
+ if (!atomic_read(&(fwd_info->buf_1->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
fwd_info->upd_len_1_a = 0;
- } else if (ctxt == 4 && fwd_info->buf_upd_1_b) {
+ } else if (buf_num == 4 && fwd_info->buf_upd_1_b && fwd_info->buf_2) {
/* Buffer 2 for user pd 1 is freed */
atomic_set(&fwd_info->buf_upd_1_b->in_busy, 0);
if (peripheral == PERIPHERAL_LPASS) {
@@ -1401,34 +1481,46 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
if (!fwd_info->cpd_len_2)
atomic_set(&fwd_info->buf_2->in_busy, 0);
}
+ if (!atomic_read(&(fwd_info->buf_2->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
fwd_info->upd_len_1_b = 0;
- } else if (ctxt == 5 && fwd_info->buf_upd_2_a) {
+ } else if (buf_num == 5 && fwd_info->buf_upd_2_a && fwd_info->buf_1) {
/* Buffer 1 for user pd 2 is freed */
atomic_set(&fwd_info->buf_upd_2_a->in_busy, 0);
/* if not data in cpd and other user pd
* free the core pd buffer for LPASS
*/
if (!fwd_info->cpd_len_1 &&
- !fwd_info->upd_len_1_a)
+ !fwd_info->upd_len_1_a) {
atomic_set(&fwd_info->buf_1->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
fwd_info->upd_len_2_a = 0;
- } else if (ctxt == 6 && fwd_info->buf_upd_2_b) {
+ } else if (buf_num == 6 && fwd_info->buf_upd_2_b && fwd_info->buf_2) {
/* Buffer 2 for user pd 2 is freed */
atomic_set(&fwd_info->buf_upd_2_b->in_busy, 0);
/* if not data in cpd and other user pd
* free the core pd buffer for LPASS
*/
if (!fwd_info->cpd_len_2 &&
- !fwd_info->upd_len_1_b)
+ !fwd_info->upd_len_1_b) {
atomic_set(&fwd_info->buf_2->in_busy, 0);
-
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
fwd_info->upd_len_2_b = 0;
} else
- pr_err("diag: In %s, invalid ctxt %d\n", __func__, ctxt);
+ pr_err("diag: In %s, invalid buf_num: %d\n", __func__, buf_num);
diagfwd_queue_read(fwd_info);
}
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
index eda70dcfdcd9..00621c178906 100644
--- a/drivers/char/diag/diagfwd_peripheral.h
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -117,7 +117,7 @@ int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt);
int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len);
-void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt);
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num);
void diagfwd_buffers_init(struct diagfwd_info *fwd_info);
/*
diff --git a/drivers/clk/msm/clock-dummy.c b/drivers/clk/msm/clock-dummy.c
index caa6a6ab7565..e874fccc7f6c 100644
--- a/drivers/clk/msm/clock-dummy.c
+++ b/drivers/clk/msm/clock-dummy.c
@@ -64,7 +64,6 @@ struct clk dummy_clk = {
static void *dummy_clk_dt_parser(struct device *dev, struct device_node *np)
{
struct clk *c;
- u32 rate;
c = devm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c) {
@@ -73,9 +72,6 @@ static void *dummy_clk_dt_parser(struct device *dev, struct device_node *np)
}
c->ops = &clk_ops_dummy;
- if (!of_property_read_u32(np, "clock-frequency", &rate))
- c->rate = rate;
-
return msmclk_generic_clk_init(dev, np, c);
}
MSMCLK_PARSER(dummy_clk_dt_parser, "qcom,dummy-clk", 0);
@@ -83,6 +79,11 @@ MSMCLK_PARSER(dummy_clk_dt_parser, "qcom,dummy-clk", 0);
static struct clk *of_dummy_get(struct of_phandle_args *clkspec,
void *data)
{
+ u32 rate;
+
+ if (!of_property_read_u32(clkspec->np, "clock-frequency", &rate))
+ dummy_clk.rate = rate;
+
return &dummy_clk;
}
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f951f911786e..a72ae98b4838 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -279,6 +279,13 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = transition_latency;
+ /*
+ * Android: set default parameters for parity between schedutil and
+ * schedfreq
+ */
+ policy->up_transition_delay_us = transition_latency / NSEC_PER_USEC;
+ policy->down_transition_delay_us = 50000; /* 50ms */
+
return 0;
out_free_cpufreq_table:
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index abbee61c99c8..ae65fbc3ceac 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -1825,6 +1825,7 @@ struct cpufreq_governor cpufreq_gov_interactive = {
static int __init cpufreq_interactive_init(void)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+ int ret = 0;
spin_lock_init(&speedchange_cpumask_lock);
mutex_init(&gov_lock);
@@ -1841,7 +1842,12 @@ static int __init cpufreq_interactive_init(void)
/* NB: wake up so the thread does not look hung to the freezer */
wake_up_process_no_notif(speedchange_task);
- return cpufreq_register_governor(&cpufreq_gov_interactive);
+ ret = cpufreq_register_governor(&cpufreq_gov_interactive);
+ if (ret) {
+ kthread_stop(speedchange_task);
+ put_task_struct(speedchange_task);
+ }
+ return ret;
}
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 20bf034bb193..a55f236961b8 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -273,8 +273,6 @@ void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
if (authdata) {
handle->sha_ctxt.auth_data[0] = auth32[0];
handle->sha_ctxt.auth_data[1] = auth32[1];
- handle->sha_ctxt.auth_data[2] = auth32[2];
- handle->sha_ctxt.auth_data[3] = auth32[3];
}
tasklet_schedule(&pdev->done_tasklet);
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 00416f23b5cb..dba5c0ea0827 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -36,7 +36,10 @@ struct adv7511 {
bool edid_read;
wait_queue_head_t wq;
+ struct work_struct hpd_work;
+
struct drm_encoder *encoder;
+ struct drm_connector connector;
bool embedded_sync;
enum adv7511_sync_polarity vsync_polarity;
@@ -48,6 +51,10 @@ struct adv7511 {
struct gpio_desc *gpio_pd;
};
+static const int edid_i2c_addr = 0x7e;
+static const int packet_i2c_addr = 0x70;
+static const int cec_i2c_addr = 0x78;
+
static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
{
return to_encoder_slave(encoder)->slave_priv;
@@ -362,12 +369,19 @@ static void adv7511_power_on(struct adv7511 *adv7511)
{
adv7511->current_edid_segment = -1;
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
- ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ /*
+ * Documentation says the INT_ENABLE registers are reset in
+ * POWER_DOWN mode. My 7511w preserved the bits, however.
+ * Still, let's be safe and stick to the documentation.
+ */
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
/*
* Per spec it is allowed to pulse the HDP signal to indicate that the
@@ -422,7 +436,27 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
return false;
}
-static int adv7511_irq_process(struct adv7511 *adv7511)
+static void adv7511_hpd_work(struct work_struct *work)
+{
+ struct adv7511 *adv7511 = container_of(work, struct adv7511, hpd_work);
+ enum drm_connector_status status;
+ unsigned int val;
+ int ret;
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
+ if (ret < 0)
+ status = connector_status_disconnected;
+ else if (val & ADV7511_STATUS_HPD)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ if (adv7511->connector.status != status) {
+ adv7511->connector.status = status;
+ drm_kms_helper_hotplug_event(adv7511->connector.dev);
+ }
+}
+
+static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
{
unsigned int irq0, irq1;
int ret;
@@ -438,8 +472,8 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
- if (irq0 & ADV7511_INT0_HDP && adv7511->encoder)
- drm_helper_hpd_irq_event(adv7511->encoder->dev);
+ if (process_hpd && irq0 & ADV7511_INT0_HDP && adv7511->encoder)
+ schedule_work(&adv7511->hpd_work);
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
adv7511->edid_read = true;
@@ -456,7 +490,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid)
struct adv7511 *adv7511 = devid;
int ret;
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, true);
return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
}
@@ -473,7 +507,7 @@ static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
adv7511->edid_read, msecs_to_jiffies(timeout));
} else {
for (; timeout > 0; timeout -= 25) {
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, false);
if (ret < 0)
break;
@@ -567,13 +601,18 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
- ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
adv7511->current_edid_segment = -1;
+ /* Reset the EDID_I2C_ADDR register as it might be cleared */
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
+ edid_i2c_addr);
}
edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
@@ -849,10 +888,6 @@ static int adv7511_parse_dt(struct device_node *np,
return 0;
}
-static const int edid_i2c_addr = 0x7e;
-static const int packet_i2c_addr = 0x70;
-static const int cec_i2c_addr = 0x78;
-
static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
struct adv7511_link_config link_config;
@@ -913,6 +948,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (!adv7511->i2c_edid)
return -ENOMEM;
+ INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
+
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index cc91ae832ffb..6fd7b50c5747 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -635,7 +635,8 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- i915.mmio_debug = mmio_debug_once--;
+ i915.mmio_debug = mmio_debug_once;
+ mmio_debug_once = false;
}
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c61753311771..b245a4c7c826 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -185,14 +185,9 @@ static void vblank_ctrl_worker(struct kthread_work *work)
struct msm_kms *kms = priv->kms;
struct vblank_event *vbl_ev, *tmp;
unsigned long flags;
- struct kthread_worker *worker = work->worker;
- struct msm_drm_commit *commit = container_of(worker,
- struct msm_drm_commit, worker);
spin_lock_irqsave(&vbl_ctrl->lock, flags);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
- if (vbl_ev->crtc_id != commit->crtc_id)
- continue;
list_del(&vbl_ev->node);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 30e9d688396f..e99eba0dadb7 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -204,10 +204,15 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
idx = left_crtc_zpos_cnt[pstate->stage]++;
}
+ /* stage plane on right LM if it crosses the boundary */
+ lm_right = (lm_idx == LEFT_MIXER) &&
+ (plane->state->crtc_x + plane->state->crtc_w >
+ crtc_split_width);
+
/*
* program each mixer with two hw pipes in dual mixer mode,
*/
- if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS) {
+ if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS && lm_right) {
stage_cfg->stage[LEFT_MIXER][pstate->stage][1] =
sde_plane_pipe(plane, 1);
@@ -218,10 +223,7 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
flush_mask |= ctl->ops.get_bitmask_sspp(ctl,
sde_plane_pipe(plane, lm_idx ? 1 : 0));
- /* stage plane on right LM if it crosses the boundary */
- lm_right = (lm_idx == LEFT_MIXER) &&
- (plane->state->crtc_x + plane->state->crtc_w >
- crtc_split_width);
+
stage_cfg->stage[lm_idx][pstate->stage][idx] =
sde_plane_pipe(plane, lm_idx ? 1 : 0);
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 6e2ccfa8e428..9cbee5243e6d 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1237,8 +1237,10 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
bool q16_data = true;
int idx;
struct sde_phy_plane *pp;
- uint32_t num_of_phy_planes = 0, maxlinewidth = 0xFFFF;
+ uint32_t num_of_phy_planes = 0;
int mode = 0;
+ uint32_t crtc_split_width;
+ bool is_across_mixer_boundary = false;
if (!plane) {
SDE_ERROR("invalid plane\n");
@@ -1252,6 +1254,7 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
pstate = to_sde_plane_state(plane->state);
crtc = state->crtc;
+ crtc_split_width = get_crtc_split_width(crtc);
fb = state->fb;
if (!crtc || !fb) {
SDE_ERROR_PLANE(psde, "invalid crtc %d or fb %d\n",
@@ -1348,17 +1351,17 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
}
}
- list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
- if (maxlinewidth > pp->pipe_sblk->maxlinewidth)
- maxlinewidth = pp->pipe_sblk->maxlinewidth;
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list)
num_of_phy_planes++;
- }
/*
* Only need to use one physical plane if plane width is still within
* the limitation.
*/
- if (maxlinewidth >= (src.x + src.w))
+ is_across_mixer_boundary = (plane->state->crtc_x < crtc_split_width) &&
+ (plane->state->crtc_x + plane->state->crtc_w >
+ crtc_split_width);
+ if (crtc_split_width >= (src.x + src.w) && !is_across_mixer_boundary)
num_of_phy_planes = 1;
if (num_of_phy_planes > 1) {
@@ -1369,9 +1372,10 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
/* Adjust offset for multi-pipe */
- src.x += src.w * pp->index;
- dst.x += dst.w * pp->index;
-
+ if (num_of_phy_planes > 1) {
+ src.x += src.w * pp->index;
+ dst.x += dst.w * pp->index;
+ }
pp->pipe_cfg.src_rect = src;
pp->pipe_cfg.dst_rect = dst;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index d671dcfaff3c..4896474da320 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -180,6 +180,10 @@ nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
}
}
+#ifdef __BIG_ENDIAN
+ pci->msi = false;
+#endif
+
pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
if (pci->msi && func->msi_rearm) {
pci->msi = pci_enable_msi(pci->pdev) == 0;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 025c429050c0..5d8dfe027b30 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -612,7 +612,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
} else {
pr_err("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
- list_for_each_entry(p, &pool->list, lru) {
+ list_for_each_entry(p, &new_pages, lru) {
++cpages;
}
list_splice(&new_pages, &pool->list);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 8c998a5d791b..c3b3ccf48a7b 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -2181,9 +2181,11 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->num_pwrlevels - 1].gpu_freq, clocks[0]);
- kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[6],
- clk_round_rate(pwr->grp_clks[6], KGSL_RBBMTIMER_CLK_FREQ),
- clocks[6]);
+ if (pwr->grp_clks[6] != NULL)
+ kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[6],
+ clk_round_rate(pwr->grp_clks[6],
+ KGSL_RBBMTIMER_CLK_FREQ),
+ clocks[6]);
_isense_clk_set_rate(pwr, pwr->num_pwrlevels - 1);
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index d57a2f75dccf..32c6a40a408f 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -72,6 +72,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
.driver_data = (kernel_ulong_t)0,
},
+ {
+ /* Cannon Lake H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa326),
+ .driver_data = (kernel_ulong_t)0,
+ },
+ {
+ /* Cannon Lake LP */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
+ .driver_data = (kernel_ulong_t)0,
+ },
{ 0 },
};
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 7ba795b24e75..639d1a9c8793 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -339,8 +339,10 @@ static int ismt_process_desc(const struct ismt_desc *desc,
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_I2C_BLOCK_DATA:
- memcpy(&data->block[1], dma_buffer, desc->rxbytes);
- data->block[0] = desc->rxbytes;
+ if (desc->rxbytes != dma_buffer[0] + 1)
+ return -EMSGSIZE;
+
+ memcpy(data->block, dma_buffer, desc->rxbytes);
break;
}
return 0;
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index f325663c27c5..4b58e8aaf5c5 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -786,10 +786,6 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
- i2c->cmd = 0;
- memset(i2c->cmd_buf, 0, BUFSIZE);
- memset(i2c->data_buf, 0, BUFSIZE);
-
i2c->irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index ce6ff9b301bb..7e2dc5e56632 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -381,8 +381,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
return 0;
if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
- psmouse_warn(psmouse, "failed to get extended button data\n");
- button_info = 0;
+ psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
+ button_info = 0x33;
}
psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 5be14ad29d46..dbf09836ff30 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -905,6 +905,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
},
},
{
+ /* Gigabyte P57 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
+ },
+ },
+ {
/* Schenker XMG C504 - Elantech touchpad */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c
index 08bfb83a9447..06f7f569f417 100644
--- a/drivers/input/touchscreen/st/fts.c
+++ b/drivers/input/touchscreen/st/fts.c
@@ -105,7 +105,7 @@ static void fts_interrupt_enable(struct fts_ts_info *info);
static int fts_init_hw(struct fts_ts_info *info);
static int fts_mode_handler(struct fts_ts_info *info, int force);
static int fts_command(struct fts_ts_info *info, unsigned char cmd);
-
+static void fts_unblank(struct fts_ts_info *info);
static int fts_chip_initialization(struct fts_ts_info *info);
void touch_callback(unsigned int status)
@@ -1487,8 +1487,12 @@ static int fts_init(struct fts_ts_info *info)
error = fts_interrupt_install(info);
- if (error != OK)
+ if (error != OK) {
logError(1, "%s Init (1) error (ERROR = %08X)\n", error);
+ return error;
+ }
+
+ fts_unblank(info);
return error;
}
@@ -1773,6 +1777,26 @@ static int fts_fb_state_chg_callback(struct notifier_block *nb, unsigned long va
}
+static void fts_unblank(struct fts_ts_info *info)
+{
+ int i;
+
+ for (i = 0; i < TOUCH_ID_MAX; i++) {
+ input_mt_slot(info->input_dev, i);
+ input_mt_report_slot_state(info->input_dev,
+ (i < FINGER_MAX) ? MT_TOOL_FINGER : MT_TOOL_PEN, 0);
+ }
+ input_sync(info->input_dev);
+
+ info->resume_bit = 1;
+
+ fts_mode_handler(info, 0);
+
+ info->sensor_sleep = false;
+
+ fts_enableInterrupt();
+}
+
static struct notifier_block fts_noti_block = {
.notifier_call = fts_fb_state_chg_callback,
};
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 9e17ef27a183..6f1dbd52ec91 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -915,8 +915,11 @@ static int __init gic_of_init(struct device_node *node,
gic_len = resource_size(&res);
}
- if (mips_cm_present())
+ if (mips_cm_present()) {
write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
+ /* Ensure GIC region is enabled before trying to access it */
+ __sync();
+ }
gic_present = true;
__gic_init(gic_base, gic_len, cpu_vec, 0, node);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index c3ea03c9a1a8..02619cabda8b 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -333,6 +333,7 @@ struct cached_dev {
/* Limit number of writeback bios in flight */
struct semaphore in_flight;
struct task_struct *writeback_thread;
+ struct workqueue_struct *writeback_write_wq;
struct keybuf writeback_keys;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 2410df1c2a05..6c4c7caea693 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -196,12 +196,12 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
- if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
- wake_up_gc(op->c);
-
if (op->bypass)
return bch_data_invalidate(cl);
+ if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
+ wake_up_gc(op->c);
+
/*
* Journal writes are marked REQ_FLUSH; if the original write was a
* flush, it'll wait on the journal write.
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 7b5880b8874c..c5ceea9222ff 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1023,7 +1023,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
}
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
- bch_sectors_dirty_init(dc);
+ bch_sectors_dirty_init(&dc->disk);
atomic_set(&dc->has_dirty, 1);
atomic_inc(&dc->count);
bch_writeback_queue(dc);
@@ -1056,6 +1056,8 @@ static void cached_dev_free(struct closure *cl)
cancel_delayed_work_sync(&dc->writeback_rate_update);
if (!IS_ERR_OR_NULL(dc->writeback_thread))
kthread_stop(dc->writeback_thread);
+ if (dc->writeback_write_wq)
+ destroy_workqueue(dc->writeback_write_wq);
mutex_lock(&bch_register_lock);
@@ -1227,6 +1229,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
goto err;
bcache_device_attach(d, c, u - c->uuids);
+ bch_sectors_dirty_init(d);
bch_flash_dev_request_init(d);
add_disk(d->disk);
@@ -1959,6 +1962,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
else
err = "device busy";
mutex_unlock(&bch_register_lock);
+ if (!IS_ERR(bdev))
+ bdput(bdev);
if (attr == &ksysfs_register_quiet)
goto out;
}
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index b3ff57d61dde..4fbb5532f24c 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -191,7 +191,7 @@ STORE(__cached_dev)
{
struct cached_dev *dc = container_of(kobj, struct cached_dev,
disk.kobj);
- unsigned v = size;
+ ssize_t v = size;
struct cache_set *c;
struct kobj_uevent_env *env;
@@ -226,7 +226,7 @@ STORE(__cached_dev)
bch_cached_dev_run(dc);
if (attr == &sysfs_cache_mode) {
- ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
+ v = bch_read_string_list(buf, bch_cache_modes + 1);
if (v < 0)
return v;
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index db3ae4c2b223..6c18e3ec3e48 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -73,24 +73,44 @@ STRTO_H(strtouint, unsigned int)
STRTO_H(strtoll, long long)
STRTO_H(strtoull, unsigned long long)
+/**
+ * bch_hprint() - formats @v to human readable string for sysfs.
+ *
+ * @v - signed 64 bit integer
+ * @buf - the (at least 8 byte) buffer to format the result into.
+ *
+ * Returns the number of bytes used by format.
+ */
ssize_t bch_hprint(char *buf, int64_t v)
{
static const char units[] = "?kMGTPEZY";
- char dec[4] = "";
- int u, t = 0;
-
- for (u = 0; v >= 1024 || v <= -1024; u++) {
- t = v & ~(~0 << 10);
- v >>= 10;
- }
-
- if (!u)
- return sprintf(buf, "%llu", v);
-
- if (v < 100 && v > -100)
- snprintf(dec, sizeof(dec), ".%i", t / 100);
-
- return sprintf(buf, "%lli%s%c", v, dec, units[u]);
+ int u = 0, t;
+
+ uint64_t q;
+
+ if (v < 0)
+ q = -v;
+ else
+ q = v;
+
+ /* For as long as the number is more than 3 digits, but at least
+ * once, shift right / divide by 1024. Keep the remainder for
+ * a digit after the decimal point.
+ */
+ do {
+ u++;
+
+ t = q & ~(~0 << 10);
+ q >>= 10;
+ } while (q >= 1000);
+
+ if (v < 0)
+ /* '-', up to 3 digits, '.', 1 digit, 1 character, null;
+ * yields 8 bytes.
+ */
+ return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
+ else
+ return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
}
ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index b9346cd9cda1..bbb1dc9e1639 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -21,7 +21,8 @@
static void __update_writeback_rate(struct cached_dev *dc)
{
struct cache_set *c = dc->disk.c;
- uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
+ uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
+ bcache_flash_devs_sectors_dirty(c);
uint64_t cache_dirty_target =
div_u64(cache_sectors * dc->writeback_percent, 100);
@@ -190,7 +191,7 @@ static void write_dirty(struct closure *cl)
closure_bio_submit(&io->bio, cl);
- continue_at(cl, write_dirty_finish, system_wq);
+ continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
}
static void read_dirty_endio(struct bio *bio)
@@ -210,7 +211,7 @@ static void read_dirty_submit(struct closure *cl)
closure_bio_submit(&io->bio, cl);
- continue_at(cl, write_dirty, system_wq);
+ continue_at(cl, write_dirty, io->dc->writeback_write_wq);
}
static void read_dirty(struct cached_dev *dc)
@@ -488,17 +489,17 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
return MAP_CONTINUE;
}
-void bch_sectors_dirty_init(struct cached_dev *dc)
+void bch_sectors_dirty_init(struct bcache_device *d)
{
struct sectors_dirty_init op;
bch_btree_op_init(&op.op, -1);
- op.inode = dc->disk.id;
+ op.inode = d->id;
- bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
+ bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0);
- dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
+ d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -522,6 +523,11 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
int bch_cached_dev_writeback_start(struct cached_dev *dc)
{
+ dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
+ WQ_MEM_RECLAIM, 0);
+ if (!dc->writeback_write_wq)
+ return -ENOMEM;
+
dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
"bcache_writeback");
if (IS_ERR(dc->writeback_thread))
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 073a042aed24..daec4fd782ea 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -14,6 +14,25 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret;
}
+static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
+{
+ uint64_t i, ret = 0;
+
+ mutex_lock(&bch_register_lock);
+
+ for (i = 0; i < c->nr_uuids; i++) {
+ struct bcache_device *d = c->devices[i];
+
+ if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+ ret += bcache_dev_sectors_dirty(d);
+ }
+
+ mutex_unlock(&bch_register_lock);
+
+ return ret;
+}
+
static inline unsigned offset_to_stripe(struct bcache_device *d,
uint64_t offset)
{
@@ -85,7 +104,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
-void bch_sectors_dirty_init(struct cached_dev *dc);
+void bch_sectors_dirty_init(struct bcache_device *);
void bch_cached_dev_writeback_init(struct cached_dev *);
int bch_cached_dev_writeback_start(struct cached_dev *);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 4f22e919787a..7a50728b9389 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1960,6 +1960,11 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
long pages;
struct bitmap_page *new_bp;
+ if (bitmap->storage.file && !init) {
+ pr_info("md: cannot resize file-based bitmap\n");
+ return -EINVAL;
+ }
+
if (chunksize == 0) {
/* If there is enough space, leave the chunk size unchanged,
* else increase by factor of two until there is enough space.
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8f60520c8392..5eac08ffc697 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5822,6 +5822,8 @@ static void raid5_do_work(struct work_struct *work)
spin_unlock_irq(&conf->device_lock);
+ r5l_flush_stripe_to_raid(conf->log);
+
async_tx_issue_pending_all();
blk_finish_plug(&plug);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 66c5ce11ea3d..5bcb3034b82a 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -2916,6 +2916,8 @@ static void __msm_isp_stop_axi_streams(struct vfe_device *vfe_dev,
* those state transitions instead of directly forcing stream to
* be INACTIVE
*/
+ memset(&stream_info->sw_skip, 0,
+ sizeof(struct msm_isp_sw_framskip));
intf = SRC_TO_INTF(stream_info->stream_src);
if (stream_info->lpm_mode == 0 &&
stream_info->state != PAUSED) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 2eaae18bc2e9..733aa4769941 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1198,6 +1198,8 @@ int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize)
{
struct msm_vidc_inst *inst = instance;
struct msm_vidc_capability *capability = NULL;
+ enum hal_video_codec codec;
+ int i;
if (!inst || !fsize) {
dprintk(VIDC_ERR, "%s: invalid parameter: %pK %pK\n",
@@ -1206,15 +1208,36 @@ int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize)
}
if (!inst->core)
return -EINVAL;
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ codec = get_hal_codec(fsize->pixel_format);
+ if (codec == HAL_UNUSED_CODEC)
+ return -EINVAL;
- capability = &inst->capability;
- fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise.min_width = capability->width.min;
- fsize->stepwise.max_width = capability->width.max;
- fsize->stepwise.step_width = capability->width.step_size;
- fsize->stepwise.min_height = capability->height.min;
- fsize->stepwise.max_height = capability->height.max;
- fsize->stepwise.step_height = capability->height.step_size;
+ for (i = 0; i < VIDC_MAX_SESSIONS; i++) {
+ if (inst->core->capabilities[i].codec == codec) {
+ capability = &inst->core->capabilities[i];
+ break;
+ }
+ }
+
+ if (capability) {
+ fsize->type = capability->width.step_size == 1 &&
+ capability->height.step_size == 1 ?
+ V4L2_FRMSIZE_TYPE_CONTINUOUS :
+ V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = capability->width.min;
+ fsize->stepwise.max_width = capability->width.max;
+ fsize->stepwise.step_width = capability->width.step_size;
+ fsize->stepwise.min_height = capability->height.min;
+ fsize->stepwise.max_height = capability->height.max;
+ fsize->stepwise.step_height = capability->height.step_size;
+ } else {
+ dprintk(VIDC_ERR, "%s: Invalid Pixel Fmt %#x\n",
+ __func__, fsize->pixel_format);
+ return -EINVAL;
+ }
return 0;
}
EXPORT_SYMBOL(msm_vidc_enum_framesizes);
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 57d2f89350d2..9532235b07de 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -2004,6 +2004,13 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
goto done;
}
+ /* Validate the user-provided bit-size and offset */
+ if (mapping->size > 32 ||
+ mapping->offset + mapping->size > ctrl->info.size * 8) {
+ ret = -EINVAL;
+ goto done;
+ }
+
list_for_each_entry(map, &ctrl->info.mappings, list) {
if (mapping->id == map->id) {
uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', "
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 2f1c03783414..18045a7e24e0 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -788,7 +788,8 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u
copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
put_user(kp->pending, &up->pending) ||
put_user(kp->sequence, &up->sequence) ||
- compat_put_timespec(&kp->timestamp, &up->timestamp) ||
+ put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
+ put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
put_user(kp->id, &up->id) ||
copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
return -EFAULT;
@@ -879,6 +880,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
int compatible_arg = 1;
long err = 0;
+ memset(&karg, 0, sizeof(karg));
/* First, convert the command. */
switch (cmd) {
case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break;
diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c
index c6f2dbfe573d..687f55bd5afd 100644
--- a/drivers/misc/hdcp.c
+++ b/drivers/misc/hdcp.c
@@ -116,7 +116,7 @@
#define HDCP_CREATE_DEVICE_ID(x) (HDCP_DEVICE_ID | (x))
#define HDCP_TXMTR_HDMI HDCP_CREATE_DEVICE_ID(1)
-
+#define HDCP_TXMTR_DP HDCP_CREATE_DEVICE_ID(2)
#define HDCP_TXMTR_SERVICE_ID 0x0001000
#define SERVICE_CREATE_CMD(x) (HDCP_TXMTR_SERVICE_ID | x)
@@ -639,7 +639,8 @@ static int hdcp_lib_get_next_message(struct hdcp_lib_handle *handle,
case LC_SEND_L_PRIME_MESSAGE_ID:
return SKE_SEND_EKS_MESSAGE_ID;
case SKE_SEND_EKS_MESSAGE_ID:
- if (!handle->repeater_flag)
+ if (!handle->repeater_flag &&
+ handle->device_type == HDCP_TXMTR_DP)
return SKE_SEND_TYPE_ID;
case SKE_SEND_TYPE_ID:
case REPEATER_AUTH_STREAM_READY_MESSAGE_ID:
@@ -1778,6 +1779,19 @@ exit:
return rc;
}
+static void hdcp_lib_prep_type_id(struct hdcp_lib_handle *handle,
+ struct hdmi_hdcp_wakeup_data *cdata)
+{
+ memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
+ handle->listener_buf[0] = SKE_SEND_TYPE_ID;
+ handle->msglen = 2;
+ cdata->cmd = HDMI_HDCP_WKUP_CMD_SEND_MESSAGE;
+ cdata->send_msg_buf = handle->listener_buf;
+ cdata->send_msg_len = handle->msglen;
+ handle->last_msg = hdcp_lib_get_next_message(handle,
+ cdata);
+}
+
static void hdcp_lib_msg_sent(struct hdcp_lib_handle *handle)
{
struct hdmi_hdcp_wakeup_data cdata = { HDMI_HDCP_WKUP_CMD_INVALID };
@@ -1802,18 +1816,29 @@ static void hdcp_lib_msg_sent(struct hdcp_lib_handle *handle)
cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL;
break;
case SKE_SEND_EKS_MESSAGE_ID:
+ /*
+ * a) if its a repeater irrespective of device type we
+ * start CMD_LINK_POLL to trigger repeater auth
+ * b) if its not a repeater and device is DP we
+ * first send the SKE_SEND_TYPE_ID and upon success
+ * enable encryption
+ * c) if its not a repeater and device is HDMI we
+ * dont send SKE_SEND_TYPE_ID and enable encryption
+ * and start part III of authentication
+ */
if (handle->repeater_flag) {
/* poll for link check */
cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL;
- } else {
- memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
- handle->listener_buf[0] = SKE_SEND_TYPE_ID;
- handle->msglen = 2;
- cdata.cmd = HDMI_HDCP_WKUP_CMD_SEND_MESSAGE;
- cdata.send_msg_buf = handle->listener_buf;
- cdata.send_msg_len = handle->msglen;
- handle->last_msg = hdcp_lib_get_next_message(handle,
- &cdata);
+ } else if (handle->device_type == HDCP_TXMTR_DP) {
+ hdcp_lib_prep_type_id(handle, &cdata);
+ } else if (handle->device_type == HDCP_TXMTR_HDMI) {
+ if (!hdcp_lib_enable_encryption(handle)) {
+ handle->authenticated = true;
+ cdata.cmd = HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS;
+ hdcp_lib_wakeup_client(handle, &cdata);
+ }
+ /* poll for link check */
+ cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL;
}
break;
case REPEATER_AUTH_SEND_ACK_MESSAGE_ID:
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4cd2a7d0124f..7923bfdc9b30 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3676,7 +3676,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
u32 tempval1 = gfar_read(&regs->maccfg1);
u32 tempval = gfar_read(&regs->maccfg2);
u32 ecntrl = gfar_read(&regs->ecntrl);
- u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
+ u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
if (phydev->duplex != priv->oldduplex) {
if (!(phydev->duplex))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 829be21f97b2..be258d90de9e 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
seg_hdr->cookie = MPI_COREDUMP_COOKIE;
seg_hdr->segNum = seg_number;
seg_hdr->segSize = seg_size;
- memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+ strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
/*
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 49d9f0a789fe..7d0690433ee0 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -541,9 +541,6 @@ void phy_stop_machine(struct phy_device *phydev)
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
-
- /* Now we can run the state machine synchronously */
- phy_state_machine(&phydev->state_queue.work);
}
/**
diff --git a/drivers/net/wireless/cnss/Kconfig b/drivers/net/wireless/cnss/Kconfig
index 863f766bccdb..051b709f53f0 100644
--- a/drivers/net/wireless/cnss/Kconfig
+++ b/drivers/net/wireless/cnss/Kconfig
@@ -12,7 +12,7 @@ config CNSS
config CNSS_ASYNC
bool "Enable/disable cnss pci platform driver asynchronous probe"
- depends on CNSS
+ depends on CNSS || CNSS2
---help---
If enabled, CNSS PCI platform driver would do asynchronous probe.
Using asynchronous probe will allow CNSS PCI platform driver to
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 715a92d52a4e..80e2c8d7c86a 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -564,9 +564,15 @@ out:
static int cnss_driver_call_probe(struct cnss_plat_data *plat_priv)
{
- int ret;
+ int ret = 0;
struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ cnss_pr_dbg("Skip driver probe\n");
+ goto out;
+ }
+
if (!plat_priv->driver_ops) {
cnss_pr_err("driver_ops is NULL\n");
ret = -EINVAL;
@@ -604,6 +610,13 @@ static int cnss_driver_call_remove(struct cnss_plat_data *plat_priv)
{
struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
+ test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Skip driver remove\n");
+ return 0;
+ }
+
if (!plat_priv->driver_ops) {
cnss_pr_err("driver_ops is NULL\n");
return -EINVAL;
@@ -997,11 +1010,6 @@ static int cnss_qca6174_powerup(struct cnss_plat_data *plat_priv)
return -ENODEV;
}
- if (!plat_priv->driver_ops) {
- cnss_pr_err("driver_ops is NULL!\n");
- return -EINVAL;
- }
-
ret = cnss_power_on_device(plat_priv);
if (ret) {
cnss_pr_err("Failed to power on device, err = %d\n", ret);
@@ -1035,15 +1043,8 @@ static int cnss_qca6174_shutdown(struct cnss_plat_data *plat_priv)
if (!pci_priv)
return -ENODEV;
- if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state))
- goto skip_driver_remove;
-
- if (!plat_priv->driver_ops)
- return -EINVAL;
-
cnss_driver_call_remove(plat_priv);
-skip_driver_remove:
cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
cnss_pci_set_monitor_wake_intr(pci_priv, false);
cnss_pci_set_auto_suspended(pci_priv, 0);
@@ -1140,17 +1141,8 @@ static int cnss_qca6290_shutdown(struct cnss_plat_data *plat_priv)
if (!pci_priv)
return -ENODEV;
- if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
- test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
- test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state))
- goto skip_driver_remove;
-
- if (!plat_priv->driver_ops)
- return -EINVAL;
-
cnss_driver_call_remove(plat_priv);
-skip_driver_remove:
cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
cnss_pci_set_monitor_wake_intr(pci_priv, false);
cnss_pci_set_auto_suspended(pci_priv, 0);
@@ -2318,6 +2310,9 @@ static struct platform_driver cnss_platform_driver = {
.name = "cnss2",
.owner = THIS_MODULE,
.of_match_table = cnss_of_match_table,
+#ifdef CONFIG_CNSS_ASYNC
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+#endif
},
};
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 2c297fba5c34..22fad9210945 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -60,7 +60,6 @@ MODULE_PARM_DESC(fbc_bypass,
static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
{
- int ret = 0;
struct pci_dev *pci_dev = pci_priv->pci_dev;
struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
bool link_down_or_recovery;
@@ -80,12 +79,8 @@ static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
}
} else {
if (link_down_or_recovery) {
- ret = msm_pcie_recover_config(pci_dev);
- if (ret) {
- cnss_pr_err("Failed to recover PCI config space, err = %d\n",
- ret);
- return ret;
- }
+ pci_load_saved_state(pci_dev, pci_priv->default_state);
+ pci_restore_state(pci_dev);
} else if (pci_priv->saved_state) {
pci_load_and_free_saved_state(pci_dev,
&pci_priv->saved_state);
@@ -100,27 +95,15 @@ static int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
{
int ret = 0;
struct pci_dev *pci_dev = pci_priv->pci_dev;
- struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
- bool link_down_or_recovery;
-
- if (!plat_priv)
- return -ENODEV;
-
- link_down_or_recovery = pci_priv->pci_link_down_ind ||
- (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
ret = msm_pcie_pm_control(link_up ? MSM_PCIE_RESUME :
MSM_PCIE_SUSPEND,
pci_dev->bus->number,
pci_dev, NULL,
- link_down_or_recovery ?
- PM_OPTIONS_LINK_DOWN :
PM_OPTIONS_DEFAULT);
if (ret) {
- cnss_pr_err("Failed to %s PCI link with %s option, err = %d\n",
- link_up ? "resume" : "suspend",
- link_down_or_recovery ? "link down" : "default",
- ret);
+ cnss_pr_err("Failed to %s PCI link with default option, err = %d\n",
+ link_up ? "resume" : "suspend", ret);
return ret;
}
@@ -1443,6 +1426,9 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
if (ret)
goto dereg_pci_event;
+ pci_save_state(pci_dev);
+ pci_priv->default_state = pci_store_saved_state(pci_dev);
+
switch (pci_dev->device) {
case QCA6174_DEVICE_ID:
pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET,
@@ -1514,6 +1500,8 @@ static void cnss_pci_remove(struct pci_dev *pci_dev)
break;
}
+ pci_load_and_free_saved_state(pci_dev, &pci_priv->saved_state);
+
cnss_pci_disable_bus(pci_priv);
cnss_dereg_pci_event(pci_priv);
if (pci_priv->smmu_mapping)
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 4dc29c3c1f10..e26e331e2f8a 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -65,6 +65,7 @@ struct cnss_pci_data {
bool pci_link_state;
bool pci_link_down_ind;
struct pci_saved_state *saved_state;
+ struct pci_saved_state *default_state;
struct msm_pcie_register_event msm_pci_event;
atomic_t auto_suspended;
bool monitor_wake_intr;
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index c3331d6201c3..9a8982f581c5 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -3740,7 +3740,7 @@ int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter)
if (adapter->config_bands & BAND_A)
n_channels_a = mwifiex_band_5ghz.n_channels;
- adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a);
+ adapter->num_in_chan_stats = n_channels_bg + n_channels_a;
adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) *
adapter->num_in_chan_stats);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index c20017ced566..fb98f42cb5e7 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -2170,6 +2170,12 @@ mwifiex_update_chan_statistics(struct mwifiex_private *priv,
sizeof(struct mwifiex_chan_stats);
for (i = 0 ; i < num_chan; i++) {
+ if (adapter->survey_idx >= adapter->num_in_chan_stats) {
+ mwifiex_dbg(adapter, WARN,
+ "FW reported too many channel results (max %d)\n",
+ adapter->num_in_chan_stats);
+ return;
+ }
chan_stats.chan_num = fw_chan_stats->chan_num;
chan_stats.bandcfg = fw_chan_stats->bandcfg;
chan_stats.flags = fw_chan_stats->flags;
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 257a9eadd595..4ac6764f4897 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -488,7 +488,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
entry += sizeof(__le16);
chan->pa_points_per_curve = 8;
- memset(chan->curve_data, 0, sizeof(*chan->curve_data));
+ memset(chan->curve_data, 0, sizeof(chan->curve_data));
memcpy(chan->curve_data, entry,
sizeof(struct p54_pa_curve_data_sample) *
min((u8)8, curve_data->points_per_channel));
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index a52230377e2c..c48b7e8ee0d6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -2269,7 +2269,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
/* find adapter */
if (!_rtl_pci_find_adapter(pdev, hw)) {
err = -ENODEV;
- goto fail3;
+ goto fail2;
}
/* Init IO handler */
@@ -2339,10 +2339,10 @@ fail3:
pci_set_drvdata(pdev, NULL);
rtl_deinit_core(hw);
+fail2:
if (rtlpriv->io.pci_mem_start != 0)
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
-fail2:
pci_release_regions(pdev);
complete(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index cd4777954f87..9bee3f11898a 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1567,6 +1567,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
wl->state = WL1251_STATE_OFF;
mutex_init(&wl->mutex);
+ spin_lock_init(&wl->wl_lock);
wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 7d223e9080ef..77dddee2753a 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -1062,6 +1062,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
if (rc) {
ctrl_info(ctrl, "Can't get msi for the hotplug controller\n");
ctrl_info(ctrl, "Use INTx for the hotplug controller\n");
+ } else {
+ pci_set_master(pdev);
}
rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 720a6c1945f6..b75d7db57c3e 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -412,6 +412,7 @@ struct fg_chip {
int batt_id_ohms;
int ki_coeff_full_soc;
int charge_status;
+ int prev_charge_status;
int charge_done;
int charge_type;
int online_status;
diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c
index 0cb1dea7113b..23dd9131d402 100644
--- a/drivers/power/supply/qcom/fg-util.c
+++ b/drivers/power/supply/qcom/fg-util.c
@@ -264,8 +264,7 @@ static inline bool fg_sram_address_valid(u16 address, int len)
int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
u8 *val, int len, int flags)
{
- int rc = 0;
- bool tried_again = false;
+ int rc = 0, tries = 0;
bool atomic_access = false;
if (!chip)
@@ -292,7 +291,7 @@ int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
} else {
flags = FG_IMA_DEFAULT;
}
-wait:
+
/*
* Atomic access mean waiting upon SOC_UPDATE interrupt from
* FG_ALG and do the transaction after that. This is to make
@@ -301,16 +300,20 @@ wait:
* FG cycle (~1.47 seconds).
*/
if (atomic_access) {
- /* Wait for SOC_UPDATE completion */
- rc = wait_for_completion_interruptible_timeout(
- &chip->soc_update,
- msecs_to_jiffies(SOC_UPDATE_WAIT_MS));
-
- /* If we were interrupted wait again one more time. */
- if (rc == -ERESTARTSYS && !tried_again) {
- tried_again = true;
- goto wait;
- } else if (rc <= 0) {
+ for (tries = 0; tries < 2; tries++) {
+ /* Wait for SOC_UPDATE completion */
+ rc = wait_for_completion_interruptible_timeout(
+ &chip->soc_update,
+ msecs_to_jiffies(SOC_UPDATE_WAIT_MS));
+ if (rc > 0) {
+ rc = 0;
+ break;
+ } else if (!rc) {
+ rc = -ETIMEDOUT;
+ }
+ }
+
+ if (rc < 0) {
pr_err("wait for soc_update timed out rc=%d\n", rc);
goto out;
}
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 79a80b6e6c7c..256d9ed8ada5 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -562,6 +562,21 @@ static int fg_get_charge_raw(struct fg_chip *chip, int *val)
return 0;
}
+#define BATT_SOC_32BIT GENMASK(31, 0)
+static int fg_get_charge_counter_shadow(struct fg_chip *chip, int *val)
+{
+ int rc, batt_soc;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+ if (rc < 0) {
+ pr_err("Error in getting BATT_SOC, rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = div_u64((u32)batt_soc * chip->cl.learned_cc_uah, BATT_SOC_32BIT);
+ return 0;
+}
+
static int fg_get_charge_counter(struct fg_chip *chip, int *val)
{
int rc, cc_soc;
@@ -1148,7 +1163,7 @@ static int fg_batt_miss_irq_en_cb(struct votable *votable, void *data,
enable_irq_wake(chip->irqs[BATT_MISSING_IRQ].irq);
} else {
disable_irq_wake(chip->irqs[BATT_MISSING_IRQ].irq);
- disable_irq(chip->irqs[BATT_MISSING_IRQ].irq);
+ disable_irq_nosync(chip->irqs[BATT_MISSING_IRQ].irq);
}
return 0;
@@ -1167,7 +1182,7 @@ static int fg_delta_bsoc_irq_en_cb(struct votable *votable, void *data,
enable_irq_wake(chip->irqs[BSOC_DELTA_IRQ].irq);
} else {
disable_irq_wake(chip->irqs[BSOC_DELTA_IRQ].irq);
- disable_irq(chip->irqs[BSOC_DELTA_IRQ].irq);
+ disable_irq_nosync(chip->irqs[BSOC_DELTA_IRQ].irq);
}
return 0;
@@ -1249,6 +1264,21 @@ static bool is_parallel_charger_available(struct fg_chip *chip)
return true;
}
+static int fg_prime_cc_soc_sw(struct fg_chip *chip, int cc_soc_sw)
+{
+ int rc;
+
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
+ chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
+ chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
+ if (rc < 0)
+ pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
+ else
+ fg_dbg(chip, FG_STATUS, "cc_soc_sw: %x\n", cc_soc_sw);
+
+ return rc;
+}
+
static int fg_save_learned_cap_to_sram(struct fg_chip *chip)
{
int16_t cc_mah;
@@ -1434,7 +1464,6 @@ static int fg_cap_learning_process_full_data(struct fg_chip *chip)
return 0;
}
-#define BATT_SOC_32BIT GENMASK(31, 0)
static int fg_cap_learning_begin(struct fg_chip *chip, u32 batt_soc)
{
int rc, cc_soc_sw, batt_soc_msb;
@@ -1453,16 +1482,13 @@ static int fg_cap_learning_begin(struct fg_chip *chip, u32 batt_soc)
/* Prime cc_soc_sw with battery SOC when capacity learning begins */
cc_soc_sw = div64_s64((int64_t)batt_soc * CC_SOC_30BIT,
BATT_SOC_32BIT);
- rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
- chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
- chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
+ rc = fg_prime_cc_soc_sw(chip, cc_soc_sw);
if (rc < 0) {
pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
goto out;
}
chip->cl.init_cc_soc_sw = cc_soc_sw;
- chip->cl.active = true;
fg_dbg(chip, FG_CAP_LEARN, "Capacity learning started @ battery SOC %d init_cc_soc_sw:%d\n",
batt_soc_msb, chip->cl.init_cc_soc_sw);
out:
@@ -1482,9 +1508,7 @@ static int fg_cap_learning_done(struct fg_chip *chip)
/* Write a FULL value to cc_soc_sw */
cc_soc_sw = CC_SOC_30BIT;
- rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
- chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
- chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
+ rc = fg_prime_cc_soc_sw(chip, cc_soc_sw);
if (rc < 0) {
pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
goto out;
@@ -1497,8 +1521,9 @@ out:
static void fg_cap_learning_update(struct fg_chip *chip)
{
- int rc, batt_soc, batt_soc_msb;
+ int rc, batt_soc, batt_soc_msb, cc_soc_sw;
bool input_present = is_input_present(chip);
+ bool prime_cc = false;
mutex_lock(&chip->cl.lock);
@@ -1511,6 +1536,9 @@ static void fg_cap_learning_update(struct fg_chip *chip)
goto out;
}
+ if (chip->charge_status == chip->prev_charge_status)
+ goto out;
+
rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
if (rc < 0) {
pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
@@ -1526,8 +1554,12 @@ static void fg_cap_learning_update(struct fg_chip *chip)
if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
rc = fg_cap_learning_begin(chip, batt_soc);
chip->cl.active = (rc == 0);
+ } else {
+ if ((chip->charge_status ==
+ POWER_SUPPLY_STATUS_DISCHARGING) ||
+ chip->charge_done)
+ prime_cc = true;
}
-
} else {
if (chip->charge_done) {
rc = fg_cap_learning_done(chip);
@@ -1545,6 +1577,7 @@ static void fg_cap_learning_update(struct fg_chip *chip)
batt_soc_msb);
chip->cl.active = false;
chip->cl.init_cc_uah = 0;
+ prime_cc = true;
}
}
@@ -1561,10 +1594,29 @@ static void fg_cap_learning_update(struct fg_chip *chip)
batt_soc_msb);
chip->cl.active = false;
chip->cl.init_cc_uah = 0;
+ prime_cc = true;
}
}
}
+ /*
+ * Prime CC_SOC_SW when the device is not charging or during charge
+ * termination when the capacity learning is not active.
+ */
+
+ if (prime_cc) {
+ if (chip->charge_done)
+ cc_soc_sw = CC_SOC_30BIT;
+ else
+ cc_soc_sw = div_u64((u32)batt_soc *
+ CC_SOC_30BIT, BATT_SOC_32BIT);
+
+ rc = fg_prime_cc_soc_sw(chip, cc_soc_sw);
+ if (rc < 0)
+ pr_err("Error in writing cc_soc_sw, rc=%d\n",
+ rc);
+ }
+
out:
mutex_unlock(&chip->cl.lock);
}
@@ -1779,7 +1831,8 @@ static int fg_charge_full_update(struct fg_chip *chip)
fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
msoc);
}
- } else if (msoc_raw <= recharge_soc && chip->charge_full) {
+ } else if ((msoc_raw <= recharge_soc || !chip->charge_done)
+ && chip->charge_full) {
if (chip->dt.linearize_soc) {
chip->delta_soc = FULL_CAPACITY - msoc;
@@ -2563,7 +2616,7 @@ static void status_change_work(struct work_struct *work)
}
fg_ttf_update(chip);
-
+ chip->prev_charge_status = chip->charge_status;
out:
fg_dbg(chip, FG_POWER_SUPPLY, "charge_status:%d charge_type:%d charge_done:%d\n",
chip->charge_status, chip->charge_type, chip->charge_done);
@@ -3552,6 +3605,9 @@ static int fg_psy_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
rc = fg_get_charge_counter(chip, &pval->intval);
break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW:
+ rc = fg_get_charge_counter_shadow(chip, &pval->intval);
+ break;
case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
rc = fg_get_time_to_full(chip, &pval->intval);
break;
@@ -3762,6 +3818,7 @@ static enum power_supply_property fg_psy_props[] = {
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW,
POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
POWER_SUPPLY_PROP_SOC_REPORTING_READY,
@@ -4958,6 +5015,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
chip->debug_mask = &fg_gen3_debug_mask;
chip->irqs = fg_irqs;
chip->charge_status = -EINVAL;
+ chip->prev_charge_status = -EINVAL;
chip->ki_coeff_full_soc = -EINVAL;
chip->online_status = -EINVAL;
chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index d5bf36ec8a75..34367d172961 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -447,6 +447,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
struct scatterlist *resp_entry = ct_els->resp;
+ struct fc_ct_hdr *resph;
struct fc_gpn_ft_resp *acc;
int max_entries, x, last = 0;
@@ -473,6 +474,13 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
return len; /* not GPN_FT response so do not cap */
acc = sg_virt(resp_entry);
+
+ /* cap all but accept CT responses to at least the CT header */
+ resph = (struct fc_ct_hdr *)acc;
+ if ((ct_els->status) ||
+ (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
+ return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
+
max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
* to account for header as 1st pseudo "entry" */;
@@ -555,8 +563,8 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
rec->scsi_retries = sc->retries;
rec->scsi_allowed = sc->allowed;
rec->scsi_id = sc->device->id;
- /* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
rec->scsi_lun = (u32)sc->device->lun;
+ rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
rec->host_scribble = (unsigned long)sc->host_scribble;
memcpy(rec->scsi_opcode, sc->cmnd,
@@ -564,19 +572,32 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
if (fsf) {
rec->fsf_req_id = fsf->req_id;
+ rec->pl_len = FCP_RESP_WITH_EXT;
fcp_rsp = (struct fcp_resp_with_ext *)
&(fsf->qtcb->bottom.io.fcp_rsp);
+ /* mandatory parts of FCP_RSP IU in this SCSI record */
memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
+ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
}
if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
- rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
- (u16)ZFCP_DBF_PAY_MAX_REC);
- zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
- "fcp_sns", fsf->req_id);
+ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
}
+ /* complete FCP_RSP IU in associated PAYload record
+ * but only if there are optional parts
+ */
+ if (fcp_rsp->resp.fr_flags != 0)
+ zfcp_dbf_pl_write(
+ dbf, fcp_rsp,
+ /* at least one full PAY record
+ * but not beyond hardware response field
+ */
+ min_t(u16, max_t(u16, rec->pl_len,
+ ZFCP_DBF_PAY_MAX_REC),
+ FSF_FCP_RSP_SIZE),
+ "fcp_riu", fsf->req_id);
}
debug_event(dbf->scsi, level, rec, sizeof(*rec));
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index db186d44cfaf..b60667c145fd 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
* zfcp device driver
* debug feature declarations
*
- * Copyright IBM Corp. 2008, 2016
+ * Copyright IBM Corp. 2008, 2017
*/
#ifndef ZFCP_DBF_H
@@ -204,7 +204,7 @@ enum zfcp_dbf_scsi_id {
* @id: unique number of recovery record type
* @tag: identifier string specifying the location of initiation
* @scsi_id: scsi device id
- * @scsi_lun: scsi device logical unit number
+ * @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit
* @scsi_result: scsi result
* @scsi_retries: current retry number of scsi request
* @scsi_allowed: allowed retries
@@ -214,6 +214,7 @@ enum zfcp_dbf_scsi_id {
* @host_scribble: LLD specific data attached to SCSI request
* @pl_len: length of paload stored as zfcp_dbf_pay
* @fsf_rsp: response for fsf request
+ * @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit
*/
struct zfcp_dbf_scsi {
u8 id;
@@ -230,6 +231,7 @@ struct zfcp_dbf_scsi {
u64 host_scribble;
u16 pl_len;
struct fcp_resp_with_ext fcp_rsp;
+ u32 scsi_lun_64_hi;
} __packed;
/**
@@ -323,7 +325,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
{
struct fsf_qtcb *qtcb = req->qtcb;
- if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+ if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
+ ZFCP_STATUS_FSFREQ_ERROR))) {
+ zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
+
+ } else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
(qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
@@ -401,7 +407,8 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
* @flag: indicates type of reset (Target Reset, Logical Unit Reset)
*/
static inline
-void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
+void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
+ struct zfcp_fsf_req *fsf_req)
{
char tmp_tag[ZFCP_DBF_TAG_LEN];
@@ -411,7 +418,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
memcpy(tmp_tag, "lr_", 3);
memcpy(&tmp_tag[3], tag, 4);
- _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
+ _zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req);
}
/**
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index df2b541c8287..a2275825186f 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -4,7 +4,7 @@
* Fibre Channel related definitions and inline functions for the zfcp
* device driver
*
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2017
*/
#ifndef ZFCP_FC_H
@@ -279,6 +279,10 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
!(rsp_flags & FCP_SNS_LEN_VAL) &&
fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
set_host_byte(scsi, DID_ERROR);
+ } else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
+ /* FCP_DL was not sufficient for SCSI data length */
+ if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+ set_host_byte(scsi, DID_ERROR);
}
}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 27ff38f839fc..1964391db904 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -928,8 +928,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_dbf_san_res("fsscth2", req);
ct->status = 0;
+ zfcp_dbf_san_res("fsscth2", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
@@ -1109,8 +1109,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_dbf_san_res("fsselh1", req);
send_els->status = 0;
+ zfcp_dbf_san_res("fsselh1", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
@@ -2258,7 +2258,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
- if (scsi_prot_sg_count(scsi_cmnd)) {
+ if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
+ scsi_prot_sg_count(scsi_cmnd)) {
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
scsi_prot_sg_count(scsi_cmnd));
retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 07ffdbb5107f..9bd9b9a29dfc 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -273,25 +273,29 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
- if (ret)
+ if (ret) {
+ zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
return ret;
+ }
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
- zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL);
return SUCCESS;
}
}
- if (!fsf_req)
+ if (!fsf_req) {
+ zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
return FAILED;
+ }
wait_for_completion(&fsf_req->completion);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
- zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req);
retval = FAILED;
} else {
- zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req);
zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
}
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 1910100638a2..00602abec0ea 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
{
static const char * const strings[] = RNC_STATES;
+ if (state >= ARRAY_SIZE(strings))
+ return "UNKNOWN";
+
return strings[state];
}
#undef C
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 0e6aaef9a038..c74f74ab981c 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1054,7 +1054,10 @@ stop_rr_fcf_flogi:
lpfc_sli4_unreg_all_rpis(vport);
}
}
- lpfc_issue_reg_vfi(vport);
+
+ /* Do not register VFI if the driver aborted FLOGI */
+ if (!lpfc_error_lost_link(irsp))
+ lpfc_issue_reg_vfi(vport);
lpfc_nlp_put(ndlp);
goto out;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 17c440b9d086..6835bae33ec4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1824,9 +1824,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
if (cmd_mfi->sync_cmd &&
- cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
+ (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
+ cmd_mfi->frame->hdr.cmd_status =
+ MFI_STAT_WRONG_STATE;
megasas_complete_cmd(instance,
cmd_mfi, DID_OK);
+ }
}
}
} else {
@@ -5094,6 +5097,14 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
prev_aen.word =
le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
+ if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
+ (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
+ dev_info(&instance->pdev->dev,
+ "%s %d out of range class %d send by application\n",
+ __func__, __LINE__, curr_aen.members.class);
+ return 0;
+ }
+
/*
* A class whose enum value is smaller is inclusive of all
* higher values. If a PROGRESS (= -1) was previously
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 1ed85dfc008d..ac12ee844bfc 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -404,6 +404,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
return -EINVAL;
if (start > ha->optrom_size)
return -EINVAL;
+ if (size > ha->optrom_size - start)
+ size = ha->optrom_size - start;
mutex_lock(&ha->optrom_mutex);
switch (val) {
@@ -429,8 +431,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
- ha->optrom_region_size = start + size > ha->optrom_size ?
- ha->optrom_size - start : size;
+ ha->optrom_region_size = start + size;
ha->optrom_state = QLA_SREADING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -503,8 +504,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
- ha->optrom_region_size = start + size > ha->optrom_size ?
- ha->optrom_size - start : size;
+ ha->optrom_region_size = start + size;
ha->optrom_state = QLA_SWRITING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 29d8c74e85e3..b0e2e292e3cb 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -133,7 +133,7 @@ struct sg_device; /* forward declarations */
struct sg_fd;
typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
- struct sg_request *nextrp; /* NULL -> tail request (slist) */
+ struct list_head entry; /* list entry */
struct sg_fd *parentfp; /* NULL -> not in use */
Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
@@ -153,11 +153,11 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
+ struct mutex f_mutex; /* protect against changes in this fd */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
- unsigned save_scat_len; /* original length of trunc. scat. element */
- Sg_request *headrp; /* head of request slist, NULL->empty */
+ struct list_head rq_list; /* head of request list */
struct fasync_struct *async_qp; /* used by asynchronous notification */
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
char low_dma; /* as in parent but possibly overridden to 1 */
@@ -166,6 +166,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
char mmap_called; /* 0 -> mmap() never called on this fd */
+ char res_in_use; /* 1 -> 'reserve' array in use */
struct kref f_ref;
struct execute_work ew;
} Sg_fd;
@@ -209,7 +210,6 @@ static void sg_remove_sfp(struct kref *);
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
-static int sg_res_in_use(Sg_fd * sfp);
static Sg_device *sg_get_dev(int dev);
static void sg_device_destroy(struct kref *kref);
@@ -625,6 +625,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
}
buf += SZ_SG_HEADER;
__get_user(opcode, buf);
+ mutex_lock(&sfp->f_mutex);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */
@@ -633,6 +634,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12;
}
+ mutex_unlock(&sfp->f_mutex);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */
@@ -732,7 +734,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
sg_remove_request(sfp, srp);
return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
}
- if (sg_res_in_use(sfp)) {
+ if (sfp->res_in_use) {
sg_remove_request(sfp, srp);
return -EBUSY; /* reserve buffer already being used */
}
@@ -837,6 +839,39 @@ static int max_sectors_bytes(struct request_queue *q)
return max_sectors << 9;
}
+static void
+sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
+{
+ Sg_request *srp;
+ int val;
+ unsigned int ms;
+
+ val = 0;
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
+ if (val > SG_MAX_QUEUE)
+ break;
+ rinfo[val].req_state = srp->done + 1;
+ rinfo[val].problem =
+ srp->header.masked_status &
+ srp->header.host_status &
+ srp->header.driver_status;
+ if (srp->done)
+ rinfo[val].duration =
+ srp->header.duration;
+ else {
+ ms = jiffies_to_msecs(jiffies);
+ rinfo[val].duration =
+ (ms > srp->header.duration) ?
+ (ms - srp->header.duration) : 0;
+ }
+ rinfo[val].orphan = srp->orphan;
+ rinfo[val].sg_io_owned = srp->sg_io_owned;
+ rinfo[val].pack_id = srp->header.pack_id;
+ rinfo[val].usr_ptr = srp->header.usr_ptr;
+ val++;
+ }
+}
+
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
@@ -902,7 +937,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return result;
if (val) {
sfp->low_dma = 1;
- if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
+ if ((0 == sfp->low_dma) && !sfp->res_in_use) {
val = (int) sfp->reserve.bufflen;
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
@@ -948,7 +983,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
@@ -961,7 +996,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return 0;
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
+ val = 0;
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned))
++val;
}
@@ -977,12 +1013,18 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -EINVAL;
val = min_t(int, val,
max_sectors_bytes(sdp->device->request_queue));
+ mutex_lock(&sfp->f_mutex);
if (val != sfp->reserve.bufflen) {
- if (sg_res_in_use(sfp) || sfp->mmap_called)
+ if (sfp->mmap_called ||
+ sfp->res_in_use) {
+ mutex_unlock(&sfp->f_mutex);
return -EBUSY;
+ }
+
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
+ mutex_unlock(&sfp->f_mutex);
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
@@ -1023,42 +1065,15 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -EFAULT;
else {
sg_req_info_t *rinfo;
- unsigned int ms;
- rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
- GFP_KERNEL);
+ rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
+ GFP_KERNEL);
if (!rinfo)
return -ENOMEM;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
- ++val, srp = srp ? srp->nextrp : srp) {
- memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
- if (srp) {
- rinfo[val].req_state = srp->done + 1;
- rinfo[val].problem =
- srp->header.masked_status &
- srp->header.host_status &
- srp->header.driver_status;
- if (srp->done)
- rinfo[val].duration =
- srp->header.duration;
- else {
- ms = jiffies_to_msecs(jiffies);
- rinfo[val].duration =
- (ms > srp->header.duration) ?
- (ms - srp->header.duration) : 0;
- }
- rinfo[val].orphan = srp->orphan;
- rinfo[val].sg_io_owned =
- srp->sg_io_owned;
- rinfo[val].pack_id =
- srp->header.pack_id;
- rinfo[val].usr_ptr =
- srp->header.usr_ptr;
- }
- }
+ sg_fill_request_table(sfp, rinfo);
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- result = __copy_to_user(p, rinfo,
+ result = __copy_to_user(p, rinfo,
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
result = result ? -EFAULT : 0;
kfree(rinfo);
@@ -1164,7 +1179,7 @@ sg_poll(struct file *filp, poll_table * wait)
return POLLERR;
poll_wait(filp, &sfp->read_wait, wait);
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
/* if any read waiting, flag it */
if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
res = POLLIN | POLLRDNORM;
@@ -1245,6 +1260,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
unsigned long req_sz, len, sa;
Sg_scatter_hold *rsv_schp;
int k, length;
+ int ret = 0;
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
return -ENXIO;
@@ -1255,8 +1271,11 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
if (vma->vm_pgoff)
return -EINVAL; /* want no offset */
rsv_schp = &sfp->reserve;
- if (req_sz > rsv_schp->bufflen)
- return -ENOMEM; /* cannot map more than reserved buffer */
+ mutex_lock(&sfp->f_mutex);
+ if (req_sz > rsv_schp->bufflen) {
+ ret = -ENOMEM; /* cannot map more than reserved buffer */
+ goto out;
+ }
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
@@ -1270,7 +1289,9 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
- return 0;
+out:
+ mutex_unlock(&sfp->f_mutex);
+ return ret;
}
static void
@@ -1734,13 +1755,25 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
md = &map_data;
if (md) {
- if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
+ mutex_lock(&sfp->f_mutex);
+ if (dxfer_len <= rsv_schp->bufflen &&
+ !sfp->res_in_use) {
+ sfp->res_in_use = 1;
sg_link_reserve(sfp, srp, dxfer_len);
- else {
+ } else if (hp->flags & SG_FLAG_MMAP_IO) {
+ res = -EBUSY; /* sfp->res_in_use == 1 */
+ if (dxfer_len > rsv_schp->bufflen)
+ res = -ENOMEM;
+ mutex_unlock(&sfp->f_mutex);
+ return res;
+ } else {
res = sg_build_indirect(req_schp, sfp, dxfer_len);
- if (res)
+ if (res) {
+ mutex_unlock(&sfp->f_mutex);
return res;
+ }
}
+ mutex_unlock(&sfp->f_mutex);
md->pages = req_schp->pages;
md->page_order = req_schp->page_order;
@@ -2029,8 +2062,9 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
req_schp->pages = NULL;
req_schp->page_order = 0;
req_schp->sglist_len = 0;
- sfp->save_scat_len = 0;
srp->res_used = 0;
+ /* Called without mutex lock to avoid deadlock */
+ sfp->res_in_use = 0;
}
static Sg_request *
@@ -2040,7 +2074,7 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
unsigned long iflags;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (resp = sfp->headrp; resp; resp = resp->nextrp) {
+ list_for_each_entry(resp, &sfp->rq_list, entry) {
/* look for requests that are ready + not SG_IO owned */
if ((1 == resp->done) && (!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
@@ -2058,70 +2092,45 @@ sg_add_request(Sg_fd * sfp)
{
int k;
unsigned long iflags;
- Sg_request *resp;
Sg_request *rp = sfp->req_arr;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- resp = sfp->headrp;
- if (!resp) {
- memset(rp, 0, sizeof (Sg_request));
- rp->parentfp = sfp;
- resp = rp;
- sfp->headrp = resp;
- } else {
- if (0 == sfp->cmd_q)
- resp = NULL; /* command queuing disallowed */
- else {
- for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
- if (!rp->parentfp)
- break;
- }
- if (k < SG_MAX_QUEUE) {
- memset(rp, 0, sizeof (Sg_request));
- rp->parentfp = sfp;
- while (resp->nextrp)
- resp = resp->nextrp;
- resp->nextrp = rp;
- resp = rp;
- } else
- resp = NULL;
+ if (!list_empty(&sfp->rq_list)) {
+ if (!sfp->cmd_q)
+ goto out_unlock;
+
+ for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
+ if (!rp->parentfp)
+ break;
}
+ if (k >= SG_MAX_QUEUE)
+ goto out_unlock;
}
- if (resp) {
- resp->nextrp = NULL;
- resp->header.duration = jiffies_to_msecs(jiffies);
- }
+ memset(rp, 0, sizeof (Sg_request));
+ rp->parentfp = sfp;
+ rp->header.duration = jiffies_to_msecs(jiffies);
+ list_add_tail(&rp->entry, &sfp->rq_list);
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ return rp;
+out_unlock:
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return NULL;
}
/* Return of 1 for found; 0 for not found */
static int
sg_remove_request(Sg_fd * sfp, Sg_request * srp)
{
- Sg_request *prev_rp;
- Sg_request *rp;
unsigned long iflags;
int res = 0;
- if ((!sfp) || (!srp) || (!sfp->headrp))
+ if (!sfp || !srp || list_empty(&sfp->rq_list))
return res;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- prev_rp = sfp->headrp;
- if (srp == prev_rp) {
- sfp->headrp = prev_rp->nextrp;
- prev_rp->parentfp = NULL;
+ if (!list_empty(&srp->entry)) {
+ list_del(&srp->entry);
+ srp->parentfp = NULL;
res = 1;
- } else {
- while ((rp = prev_rp->nextrp)) {
- if (srp == rp) {
- prev_rp->nextrp = rp->nextrp;
- rp->parentfp = NULL;
- res = 1;
- break;
- }
- prev_rp = rp;
- }
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return res;
@@ -2140,8 +2149,9 @@ sg_add_sfp(Sg_device * sdp)
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
-
+ INIT_LIST_HEAD(&sfp->rq_list);
kref_init(&sfp->f_ref);
+ mutex_init(&sfp->f_mutex);
sfp->timeout = SG_DEFAULT_TIMEOUT;
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
@@ -2180,10 +2190,13 @@ sg_remove_sfp_usercontext(struct work_struct *work)
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
+ Sg_request *srp;
/* Cleanup any responses which were never read(). */
- while (sfp->headrp)
- sg_finish_rem_req(sfp->headrp);
+ while (!list_empty(&sfp->rq_list)) {
+ srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
+ sg_finish_rem_req(srp);
+ }
if (sfp->reserve.bufflen > 0) {
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
@@ -2217,20 +2230,6 @@ sg_remove_sfp(struct kref *kref)
schedule_work(&sfp->ew.work);
}
-static int
-sg_res_in_use(Sg_fd * sfp)
-{
- const Sg_request *srp;
- unsigned long iflags;
-
- read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp)
- if (srp->res_used)
- break;
- read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return srp ? 1 : 0;
-}
-
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
@@ -2600,7 +2599,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
/* must be called while holding sg_index_lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
- int k, m, new_interface, blen, usg;
+ int k, new_interface, blen, usg;
Sg_request *srp;
Sg_fd *fp;
const sg_io_hdr_t *hp;
@@ -2620,13 +2619,11 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
(int) fp->cmd_q, (int) fp->force_packid,
(int) fp->keep_orphan);
- for (m = 0, srp = fp->headrp;
- srp != NULL;
- ++m, srp = srp->nextrp) {
+ list_for_each_entry(srp, &fp->rq_list, entry) {
hp = &srp->header;
new_interface = (hp->interface_id == '\0') ? 0 : 1;
if (srp->res_used) {
- if (new_interface &&
+ if (new_interface &&
(SG_FLAG_MMAP_IO & hp->flags))
cp = " mmap>> ";
else
@@ -2657,7 +2654,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
(int) srp->data.cmd_opcode);
}
- if (0 == m)
+ if (list_empty(&fp->rq_list))
seq_puts(s, " No requests active\n");
read_unlock(&fp->rq_list_lock);
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index cd5c1c060481..6df2841cb7f9 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1511,6 +1511,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
ret = storvsc_do_io(dev, cmd_request);
if (ret == -EAGAIN) {
+ if (payload_sz > sizeof(cmd_request->mpb))
+ kfree(payload);
/* no more space */
return SCSI_MLQUEUE_DEVICE_BUSY;
}
diff --git a/drivers/scsi/ufs/ufs-qcom-debugfs.c b/drivers/scsi/ufs/ufs-qcom-debugfs.c
index 4547a6dbdb23..da670c0e31fa 100644
--- a/drivers/scsi/ufs/ufs-qcom-debugfs.c
+++ b/drivers/scsi/ufs/ufs-qcom-debugfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -117,6 +117,9 @@ static ssize_t ufs_qcom_dbg_testbus_cfg_write(struct file *file,
int ret = 0;
int major;
int minor;
+ unsigned long flags;
+ struct ufs_hba *hba = host->hba;
+
ret = simple_write_to_buffer(configuration, TESTBUS_CFG_BUFF_LINE_SIZE,
&buff_pos, ubuf, cnt);
@@ -142,8 +145,15 @@ static ssize_t ufs_qcom_dbg_testbus_cfg_write(struct file *file,
goto out;
}
+ if (!ufs_qcom_testbus_cfg_is_ok(host, major, minor)) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
host->testbus.select_major = (u8)major;
host->testbus.select_minor = (u8)minor;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
* Sanity check of the {major, minor} tuple is done in the
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 47106f937371..f429547aef7b 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -2563,12 +2563,13 @@ static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
host->testbus.select_minor = 37;
}
-static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
+bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host,
+ u8 select_major, u8 select_minor)
{
- if (host->testbus.select_major >= TSTBUS_MAX) {
+ if (select_major >= TSTBUS_MAX) {
dev_err(host->hba->dev,
"%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
- __func__, host->testbus.select_major);
+ __func__, select_major);
return false;
}
@@ -2577,10 +2578,10 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
* mappings of select_minor, since there is no harm in
* configuring a non-existent select_minor
*/
- if (host->testbus.select_minor > 0xFF) {
+ if (select_minor > 0xFF) {
dev_err(host->hba->dev,
"%s: 0x%05X is not a legal testbus option\n",
- __func__, host->testbus.select_minor);
+ __func__, select_minor);
return false;
}
@@ -2594,16 +2595,16 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
*/
int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
{
- int reg;
- int offset;
+ int reg = 0;
+ int offset, ret = 0, testbus_sel_offset = 19;
u32 mask = TEST_BUS_SUB_SEL_MASK;
+ unsigned long flags;
+ struct ufs_hba *hba;
if (!host)
return -EINVAL;
-
- if (!ufs_qcom_testbus_cfg_is_ok(host))
- return -EPERM;
-
+ hba = host->hba;
+ spin_lock_irqsave(hba->host->host_lock, flags);
switch (host->testbus.select_major) {
case TSTBUS_UAWM:
reg = UFS_TEST_BUS_CTRL_0;
@@ -2661,21 +2662,27 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
*/
}
mask <<= offset;
-
- ufshcd_rmwl(host->hba, TEST_BUS_SEL,
- (u32)host->testbus.select_major << 19,
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (reg) {
+ ufshcd_rmwl(host->hba, TEST_BUS_SEL,
+ (u32)host->testbus.select_major << testbus_sel_offset,
REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, mask,
+ ufshcd_rmwl(host->hba, mask,
(u32)host->testbus.select_minor << offset,
reg);
+ } else {
+ dev_err(hba->dev, "%s: Problem setting minor\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
ufs_qcom_enable_test_bus(host);
/*
* Make sure the test bus configuration is
* committed before returning.
*/
mb();
-
- return 0;
+out:
+ return ret;
}
static void ufs_qcom_testbus_read(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 9d532691f001..fd98a3381d61 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -100,7 +100,7 @@ enum {
/* bit definitions for REG_UFS_CFG1 register */
#define QUNIPRO_SEL UFS_BIT(0)
#define TEST_BUS_EN BIT(18)
-#define TEST_BUS_SEL GENMASK(22, 19)
+#define TEST_BUS_SEL 0x780000
#define UFS_REG_TEST_BUS_EN BIT(30)
/* bit definitions for REG_UFS_CFG2 register */
@@ -391,6 +391,8 @@ ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
+bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host, u8 select_major,
+ u8 select_minor);
int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 2326487302fd..e9a097151141 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -73,6 +73,8 @@ module_param(qmi_timeout, ulong, 0600);
#define ICNSS_THRESHOLD_LOW 3450000
#define ICNSS_THRESHOLD_GUARD 20000
+#define ICNSS_MAX_PROBE_CNT 2
+
#define icnss_ipc_log_string(_x...) do { \
if (icnss_ipc_log_context) \
ipc_log_string(icnss_ipc_log_context, _x); \
@@ -2105,7 +2107,8 @@ static int icnss_driver_event_server_exit(void *data)
static int icnss_call_driver_probe(struct icnss_priv *priv)
{
- int ret;
+ int ret = 0;
+ int probe_cnt = 0;
if (!priv->ops || !priv->ops->probe)
return 0;
@@ -2117,10 +2120,15 @@ static int icnss_call_driver_probe(struct icnss_priv *priv)
icnss_hw_power_on(priv);
- ret = priv->ops->probe(&priv->pdev->dev);
+ while (probe_cnt < ICNSS_MAX_PROBE_CNT) {
+ ret = priv->ops->probe(&priv->pdev->dev);
+ probe_cnt++;
+ if (ret != -EPROBE_DEFER)
+ break;
+ }
if (ret < 0) {
- icnss_pr_err("Driver probe failed: %d, state: 0x%lx\n",
- ret, priv->state);
+ icnss_pr_err("Driver probe failed: %d, state: 0x%lx, probe_cnt: %d\n",
+ ret, priv->state, probe_cnt);
goto out;
}
@@ -2228,6 +2236,7 @@ out:
static int icnss_driver_event_register_driver(void *data)
{
int ret = 0;
+ int probe_cnt = 0;
if (penv->ops)
return -EEXIST;
@@ -2247,11 +2256,15 @@ static int icnss_driver_event_register_driver(void *data)
if (ret)
goto out;
- ret = penv->ops->probe(&penv->pdev->dev);
-
+ while (probe_cnt < ICNSS_MAX_PROBE_CNT) {
+ ret = penv->ops->probe(&penv->pdev->dev);
+ probe_cnt++;
+ if (ret != -EPROBE_DEFER)
+ break;
+ }
if (ret) {
- icnss_pr_err("Driver probe failed: %d, state: 0x%lx\n",
- ret, penv->state);
+ icnss_pr_err("Driver probe failed: %d, state: 0x%lx, probe_cnt: %d\n",
+ ret, penv->state, probe_cnt);
goto power_off;
}
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index d77a12626330..d82c36480159 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -164,12 +164,17 @@ static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
int ss_mdump_seg_cnt;
int ret, i;
+ if (!ramdump_dev)
+ return -ENODEV;
+
memcpy(&offset, &priv->minidump, sizeof(priv->minidump));
offset = offset + sizeof(priv->minidump->md_ss_smem_regions_baseptr);
/* There are 3 encryption keys which also need to be dumped */
ss_mdump_seg_cnt = readb_relaxed(offset) +
NUM_OF_ENCRYPTED_KEY;
+ pr_debug("SMEM base to read minidump segments is 0x%x\n",
+ __raw_readl(priv->minidump));
subsys_smem_base = ioremap(__raw_readl(priv->minidump),
ss_mdump_seg_cnt * sizeof(*region_info));
region_info =
@@ -191,6 +196,9 @@ static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
s->address = __raw_readl(offset);
offset = offset + sizeof(region_info->region_base_address);
s->size = __raw_readl(offset);
+ pr_debug("Dumping segment %s with address %pK and size 0x%x\n",
+ s->name, (void *)s->address,
+ (unsigned int)s->size);
s++;
region_info++;
}
@@ -199,7 +207,6 @@ static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
if (ret)
pil_err(desc, "%s: Ramdump collection failed for subsys %s rc:%d\n",
__func__, desc->name, ret);
- writel_relaxed(0, &priv->minidump->md_ss_smem_regions_baseptr);
writeb_relaxed(1, &priv->minidump->md_ss_ssr_cause);
if (desc->subsys_vmid > 0)
@@ -216,16 +223,28 @@ static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
* Calls the ramdump API with a list of segments generated from the addresses
* that the descriptor corresponds to.
*/
-int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+int pil_do_ramdump(struct pil_desc *desc,
+ void *ramdump_dev, void *minidump_dev)
{
struct pil_priv *priv = desc->priv;
struct pil_seg *seg;
int count = 0, ret;
struct ramdump_segment *ramdump_segs, *s;
+ void __iomem *offset;
- if (priv->minidump && (__raw_readl(priv->minidump) > 0))
- return pil_do_minidump(desc, ramdump_dev);
+ memcpy(&offset, &priv->minidump, sizeof(priv->minidump));
+ /*
+ * Collect minidump if smem base is initialized,
+ * ssr cause is 0. No need to check encryption status
+ */
+ if (priv->minidump
+ && (__raw_readl(priv->minidump) != 0)
+ && (readb_relaxed(offset + sizeof(u32) + 2 * sizeof(u8)) == 0)) {
+ pr_debug("Dumping Minidump for %s\n", desc->name);
+ return pil_do_minidump(desc, minidump_dev);
+ }
+ pr_debug("Continuing with full SSR dump for %s\n", desc->name);
list_for_each_entry(seg, &priv->segs, list)
count++;
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
index 908ab78124f7..6e74743c8c21 100644
--- a/drivers/soc/qcom/peripheral-loader.h
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -134,7 +134,8 @@ extern void pil_shutdown(struct pil_desc *desc);
extern void pil_free_memory(struct pil_desc *desc);
extern void pil_desc_release(struct pil_desc *desc);
extern phys_addr_t pil_get_entry_addr(struct pil_desc *desc);
-extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev);
+extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev,
+ void *minidump_dev);
extern int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
size_t size);
extern int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
@@ -154,7 +155,8 @@ static inline phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
{
return 0;
}
-static inline int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+static inline int pil_do_ramdump(struct pil_desc *desc,
+ void *ramdump_dev, void *minidump_dev)
{
return 0;
}
diff --git a/drivers/soc/qcom/pil-msa.h b/drivers/soc/qcom/pil-msa.h
index 896f0c7c232b..b1a5311859ff 100644
--- a/drivers/soc/qcom/pil-msa.h
+++ b/drivers/soc/qcom/pil-msa.h
@@ -24,6 +24,7 @@ struct modem_data {
struct subsys_device *subsys;
struct subsys_desc subsys_desc;
void *ramdump_dev;
+ void *minidump_dev;
bool crash_shutdown;
u32 pas_id;
bool ignore_errors;
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index bbcff5923c53..45712457de73 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -171,7 +171,8 @@ static int modem_ramdump(int enable, const struct subsys_desc *subsys)
if (ret)
return ret;
- ret = pil_do_ramdump(&drv->q6->desc, drv->ramdump_dev);
+ ret = pil_do_ramdump(&drv->q6->desc,
+ drv->ramdump_dev, drv->minidump_dev);
if (ret < 0)
pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
@@ -230,9 +231,18 @@ static int pil_subsys_init(struct modem_data *drv,
ret = -ENOMEM;
goto err_ramdump;
}
+ drv->minidump_dev = create_ramdump_device("md_modem", &pdev->dev);
+ if (!drv->minidump_dev) {
+ pr_err("%s: Unable to create a modem minidump device.\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_minidump;
+ }
return 0;
+err_minidump:
+ destroy_ramdump_device(drv->ramdump_dev);
err_ramdump:
subsys_unregister(drv->subsys);
err_subsys:
@@ -414,6 +424,7 @@ static int pil_mss_driver_exit(struct platform_device *pdev)
subsys_unregister(drv->subsys);
destroy_ramdump_device(drv->ramdump_dev);
+ destroy_ramdump_device(drv->minidump_dev);
pil_desc_release(&drv->q6->desc);
return 0;
}
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index f85c4ba06b47..8bf5f8eb64ad 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -850,7 +850,7 @@ static int subsys_ramdump(int enable, const struct subsys_desc *subsys)
if (!enable)
return 0;
- return pil_do_ramdump(&d->desc, d->ramdump_dev);
+ return pil_do_ramdump(&d->desc, d->ramdump_dev, NULL);
}
static void subsys_free_memory(const struct subsys_desc *subsys)
diff --git a/drivers/soundwire/swr-wcd-ctrl.c b/drivers/soundwire/swr-wcd-ctrl.c
index cdaf009c5b1f..1dcaba2e79f6 100644
--- a/drivers/soundwire/swr-wcd-ctrl.c
+++ b/drivers/soundwire/swr-wcd-ctrl.c
@@ -397,11 +397,17 @@ static int swrm_clk_request(struct swr_mstr_ctrl *swrm, bool enable)
return -EINVAL;
if (enable) {
- swrm->clk(swrm->handle, true);
- swrm->state = SWR_MSTR_UP;
- } else {
+ swrm->clk_ref_count++;
+ if (swrm->clk_ref_count == 1) {
+ swrm->clk(swrm->handle, true);
+ swrm->state = SWR_MSTR_UP;
+ }
+ } else if (--swrm->clk_ref_count == 0) {
swrm->clk(swrm->handle, false);
swrm->state = SWR_MSTR_DOWN;
+ } else if (swrm->clk_ref_count < 0) {
+ pr_err("%s: swrm clk count mismatch\n", __func__);
+ swrm->clk_ref_count = 0;
}
return 0;
}
@@ -1169,7 +1175,10 @@ static irqreturn_t swr_mstr_interrupt(int irq, void *dev)
u8 devnum = 0;
int ret = IRQ_HANDLED;
- pm_runtime_get_sync(&swrm->pdev->dev);
+ mutex_lock(&swrm->reslock);
+ swrm_clk_request(swrm, true);
+ mutex_unlock(&swrm->reslock);
+
intr_sts = swrm->read(swrm->handle, SWRM_INTERRUPT_STATUS);
intr_sts &= SWRM_INTERRUPT_STATUS_RMSK;
for (i = 0; i < SWRM_INTERRUPT_MAX; i++) {
@@ -1257,8 +1266,10 @@ static irqreturn_t swr_mstr_interrupt(int irq, void *dev)
break;
}
}
- pm_runtime_mark_last_busy(&swrm->pdev->dev);
- pm_runtime_put_autosuspend(&swrm->pdev->dev);
+
+ mutex_lock(&swrm->reslock);
+ swrm_clk_request(swrm, false);
+ mutex_unlock(&swrm->reslock);
return ret;
}
@@ -1448,6 +1459,7 @@ static int swrm_probe(struct platform_device *pdev)
swrm->wcmd_id = 0;
swrm->slave_status = 0;
swrm->num_rx_chs = 0;
+ swrm->clk_ref_count = 0;
swrm->state = SWR_MSTR_RESUME;
init_completion(&swrm->reset);
init_completion(&swrm->broadcast);
diff --git a/drivers/soundwire/swr-wcd-ctrl.h b/drivers/soundwire/swr-wcd-ctrl.h
index b7a3edac3e00..104ac8f6f510 100755
--- a/drivers/soundwire/swr-wcd-ctrl.h
+++ b/drivers/soundwire/swr-wcd-ctrl.h
@@ -78,6 +78,7 @@ struct swr_mstr_ctrl {
struct device *dev;
struct resource *supplies;
struct clk *mclk;
+ int clk_ref_count;
struct completion reset;
struct completion broadcast;
struct mutex mlock;
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c
index 15ff4e149d75..08113a342eed 100644
--- a/drivers/staging/android/fiq_debugger/fiq_debugger.c
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger.c
@@ -402,7 +402,7 @@ static void fiq_debugger_work(struct work_struct *work)
cmd += 6;
while (*cmd == ' ')
cmd++;
- if ((cmd != '\0') && sysrq_on())
+ if ((*cmd != '\0') && sysrq_on())
kernel_restart(cmd);
else
kernel_restart(NULL);
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index 60871f3022b1..12a3893b98fd 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -414,7 +414,7 @@ void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
sense->ascq = ascq;
if (sns_key_info0 != 0) {
sense->sns_key_info[0] = SKSV | sns_key_info0;
- sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
+ sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 4;
sense->sns_key_info[2] = sns_key_info1 & 0x0f;
}
}
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index fb31eecb708d..8f3566cde3eb 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -362,6 +362,32 @@ int tty_insert_flip_string_flags(struct tty_port *port,
EXPORT_SYMBOL(tty_insert_flip_string_flags);
/**
+ * __tty_insert_flip_char - Add one character to the tty buffer
+ * @port: tty port
+ * @ch: character
+ * @flag: flag byte
+ *
+ * Queue a single byte to the tty buffering, with an optional flag.
+ * This is the slow path of tty_insert_flip_char.
+ */
+int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
+{
+ struct tty_buffer *tb;
+ int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
+
+ if (!__tty_buffer_request_room(port, 1, flags))
+ return 0;
+
+ tb = port->buf.tail;
+ if (~tb->flags & TTYB_NORMAL)
+ *flag_buf_ptr(tb, tb->used) = flag;
+ *char_buf_ptr(tb, tb->used++) = ch;
+
+ return 1;
+}
+EXPORT_SYMBOL(__tty_insert_flip_char);
+
+/**
* tty_schedule_flip - push characters to ldisc
* @port: tty port to push from
*
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 54d2d6b604c0..873ba02d59e6 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -519,6 +519,8 @@ static void async_completed(struct urb *urb)
if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
as->status != -ENOENT)
cancel_bulk_urbs(ps, as->bulk_addr);
+
+ wake_up(&ps->wait);
spin_unlock(&ps->lock);
if (signr) {
@@ -526,8 +528,6 @@ static void async_completed(struct urb *urb)
put_pid(pid);
put_cred(cred);
}
-
- wake_up(&ps->wait);
}
static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 574da2b4529c..82806e311202 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -57,8 +57,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
- /* Logitech HD Pro Webcams C920 and C930e */
+ /* Logitech HD Pro Webcams C920, C920-C and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech ConferenceCam CC3000e */
@@ -217,6 +218,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Corsair Strafe RGB */
+ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Acer C120 LED Projector */
{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 3b925d9b000e..18e3c5cdcc24 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -40,6 +40,15 @@ MODULE_PARM_DESC(qti_packet_debug, "Print QTI Packet's Raw Data");
static struct workqueue_struct *ipa_usb_wq;
+static struct gsi_inst_status {
+ struct mutex gsi_lock;
+ bool inst_exist;
+ struct gsi_opts *opts;
+} inst_status[IPA_USB_MAX_TETH_PROT_SIZE];
+
+/* Deregister misc device and free instance structures */
+static void gsi_inst_clean(struct gsi_opts *opts);
+
static void ipa_disconnect_handler(struct gsi_data_port *d_port);
static int gsi_ctrl_send_notification(struct f_gsi *gsi);
static int gsi_alloc_trb_buffer(struct f_gsi *gsi);
@@ -946,38 +955,71 @@ static int gsi_ctrl_dev_open(struct inode *ip, struct file *fp)
struct gsi_ctrl_port *c_port = container_of(fp->private_data,
struct gsi_ctrl_port,
ctrl_device);
+ struct f_gsi *gsi;
+ struct gsi_inst_status *inst_cur;
if (!c_port) {
- log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
+ pr_err_ratelimited("%s: gsi ctrl port %pK", __func__, c_port);
return -ENODEV;
}
- log_event_dbg("%s: open ctrl dev %s", __func__, c_port->name);
+ pr_devel_ratelimited("%s: open ctrl dev %s", __func__, c_port->name);
+
+ gsi = container_of(c_port, struct f_gsi, c_port);
+ inst_cur = &inst_status[gsi->prot_id];
+
+ mutex_lock(&inst_cur->gsi_lock);
+
+ fp->private_data = &gsi->prot_id;
+
+ if (!inst_cur->inst_exist) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: [prot_id = %d], GSI instance freed already\n",
+ __func__, gsi->prot_id);
+ return -ENODEV;
+ }
if (c_port->is_open) {
- log_event_err("%s: Already opened", __func__);
+ mutex_unlock(&inst_cur->gsi_lock);
+ log_event_err("%s: Already opened\n", __func__);
return -EBUSY;
}
c_port->is_open = true;
+ mutex_unlock(&inst_cur->gsi_lock);
+
return 0;
}
static int gsi_ctrl_dev_release(struct inode *ip, struct file *fp)
{
- struct gsi_ctrl_port *c_port = container_of(fp->private_data,
- struct gsi_ctrl_port,
- ctrl_device);
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
- if (!c_port) {
- log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
+ mutex_lock(&inst_cur->gsi_lock);
+
+ if (unlikely(inst_cur->inst_exist == false)) {
+ if (inst_cur->opts) {
+ /* GSI instance clean up */
+ gsi_inst_clean(inst_cur->opts);
+ inst_cur->opts = NULL;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: [prot_id = %d], Delayed free instance memory\n",
+ __func__, prot_id);
return -ENODEV;
}
- log_event_dbg("close ctrl dev %s", c_port->name);
+ inst_cur->opts->gsi->c_port.is_open = false;
+
+ mutex_unlock(&inst_cur->gsi_lock);
- c_port->is_open = false;
+ log_event_dbg("close ctrl dev %s\n",
+ inst_cur->opts->gsi->c_port.name);
return 0;
}
@@ -985,16 +1027,27 @@ static int gsi_ctrl_dev_release(struct inode *ip, struct file *fp)
static ssize_t
gsi_ctrl_dev_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
{
- struct gsi_ctrl_port *c_port = container_of(fp->private_data,
- struct gsi_ctrl_port,
- ctrl_device);
-
+ struct gsi_ctrl_port *c_port;
struct gsi_ctrl_pkt *cpkt = NULL;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
unsigned long flags;
int ret = 0;
log_event_dbg("%s: Enter %zu", __func__, count);
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ c_port = &inst_cur->opts->gsi->c_port;
if (!c_port) {
log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
return -ENODEV;
@@ -1062,14 +1115,29 @@ static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
int ret = 0;
unsigned long flags;
struct gsi_ctrl_pkt *cpkt;
- struct gsi_ctrl_port *c_port = container_of(fp->private_data,
- struct gsi_ctrl_port,
- ctrl_device);
- struct f_gsi *gsi = c_port_to_gsi(c_port);
- struct usb_request *req = c_port->notify_req;
+ struct gsi_ctrl_port *c_port;
+ struct usb_request *req;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
+ struct f_gsi *gsi;
log_event_dbg("Enter %zu", count);
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ gsi = inst_cur->opts->gsi;
+ c_port = &gsi->c_port;
+ req = c_port->notify_req;
+
if (!c_port || !req || !req->buf) {
log_event_err("%s: c_port %pK req %pK req->buf %pK",
__func__, c_port, req, req ? req->buf : req);
@@ -1126,15 +1194,29 @@ static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
static long gsi_ctrl_dev_ioctl(struct file *fp, unsigned cmd,
unsigned long arg)
{
- struct gsi_ctrl_port *c_port = container_of(fp->private_data,
- struct gsi_ctrl_port,
- ctrl_device);
- struct f_gsi *gsi = c_port_to_gsi(c_port);
+ struct gsi_ctrl_port *c_port;
+ struct f_gsi *gsi;
struct gsi_ctrl_pkt *cpkt;
struct ep_info info;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
int val, ret = 0;
unsigned long flags;
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ gsi = inst_cur->opts->gsi;
+ c_port = &gsi->c_port;
+
if (!c_port) {
log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
return -ENODEV;
@@ -1255,12 +1337,24 @@ exit_ioctl:
static unsigned int gsi_ctrl_dev_poll(struct file *fp, poll_table *wait)
{
- struct gsi_ctrl_port *c_port = container_of(fp->private_data,
- struct gsi_ctrl_port,
- ctrl_device);
+ struct gsi_ctrl_port *c_port;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
unsigned long flags;
unsigned int mask = 0;
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ c_port = &inst_cur->opts->gsi->c_port;
if (!c_port) {
log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
return -ENODEV;
@@ -1578,6 +1672,7 @@ static void gsi_rndis_command_complete(struct usb_ep *ep,
struct usb_request *req)
{
struct f_gsi *rndis = req->context;
+ rndis_init_msg_type *buf;
int status;
if (req->status != 0) {
@@ -1590,6 +1685,16 @@ static void gsi_rndis_command_complete(struct usb_ep *ep,
if (status < 0)
log_event_err("RNDIS command error %d, %d/%d",
status, req->actual, req->length);
+
+ buf = (rndis_init_msg_type *)req->buf;
+ if (buf->MessageType == RNDIS_MSG_INIT) {
+ rndis->d_port.in_aggr_size = min_t(u32,
+ rndis->d_port.in_aggr_size,
+ rndis->params->dl_max_xfer_size);
+ log_event_dbg("RNDIS host dl_aggr_size:%d in_aggr_size:%d\n",
+ rndis->params->dl_max_xfer_size,
+ rndis->d_port.in_aggr_size);
+ }
}
static void
@@ -2749,7 +2854,7 @@ static void gsi_unbind(struct usb_configuration *c, struct usb_function *f)
static void gsi_free_func(struct usb_function *f)
{
- pr_debug("%s\n", __func__);
+ log_event_dbg("%s\n", __func__);
}
int gsi_bind_config(struct f_gsi *gsi)
@@ -2845,6 +2950,7 @@ static void gsi_opts_release(struct config_item *item)
{
struct gsi_opts *opts = to_gsi_opts(item);
+ log_event_dbg("Release GSI: %s\n", __func__);
usb_put_function_instance(&opts->func_inst);
}
@@ -3044,48 +3150,89 @@ static struct config_item_type gsi_func_type = {
.ct_owner = THIS_MODULE,
};
+static void gsi_inst_clean(struct gsi_opts *opts)
+{
+ if (opts->gsi->c_port.ctrl_device.fops)
+ misc_deregister(&opts->gsi->c_port.ctrl_device);
+
+ kfree(opts->gsi);
+ kfree(opts);
+}
+
static int gsi_set_inst_name(struct usb_function_instance *fi,
const char *name)
{
- int ret, name_len;
+ int prot_id, name_len;
struct f_gsi *gsi;
- struct gsi_opts *opts = container_of(fi, struct gsi_opts, func_inst);
+ struct gsi_opts *opts, *opts_prev;
+
+ opts = container_of(fi, struct gsi_opts, func_inst);
name_len = strlen(name) + 1;
if (name_len > MAX_INST_NAME_LEN)
return -ENAMETOOLONG;
- ret = name_to_prot_id(name);
- if (ret < 0) {
- pr_err("%s: failed to find prot id for %s instance\n",
+ prot_id = name_to_prot_id(name);
+ if (prot_id < 0) {
+ log_event_err("%s: failed to find prot id for %s instance\n",
__func__, name);
return -EINVAL;
}
- if (ret == IPA_USB_RNDIS)
+ mutex_lock(&inst_status[prot_id].gsi_lock);
+ opts_prev = inst_status[prot_id].opts;
+ if (opts_prev) {
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+ log_event_err("%s: prot_id = %d, prev inst do not freed yet\n",
+ __func__, prot_id);
+ return -EBUSY;
+ }
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+
+ if (prot_id == IPA_USB_RNDIS)
config_group_init_type_name(&opts->func_inst.group, "",
&gsi_func_rndis_type);
- gsi = gsi_function_init(ret);
+ gsi = gsi_function_init(prot_id);
if (IS_ERR(gsi))
return PTR_ERR(gsi);
opts->gsi = gsi;
+ /* Set instance status */
+ mutex_lock(&inst_status[prot_id].gsi_lock);
+ inst_status[prot_id].inst_exist = true;
+ inst_status[prot_id].opts = opts;
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+
return 0;
}
static void gsi_free_inst(struct usb_function_instance *f)
{
struct gsi_opts *opts = container_of(f, struct gsi_opts, func_inst);
+ enum ipa_usb_teth_prot prot_id;
if (!opts->gsi)
return;
- if (opts->gsi->c_port.ctrl_device.fops)
- misc_deregister(&opts->gsi->c_port.ctrl_device);
+ prot_id = opts->gsi->prot_id;
- kfree(opts->gsi);
- kfree(opts);
+ mutex_lock(&inst_status[prot_id].gsi_lock);
+ if (opts->gsi->c_port.is_open) {
+ /* Mark instance exist as false */
+ inst_status[prot_id].inst_exist = false;
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+ log_event_err(
+ "%s: [prot_id = %d] Dev is open, free mem when dev close\n",
+ __func__, prot_id);
+ return;
+ }
+
+ /* Clear instance status */
+ gsi_inst_clean(opts);
+ inst_status[prot_id].inst_exist = false;
+ inst_status[prot_id].opts = NULL;
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
}
static struct usb_function_instance *gsi_alloc_inst(void)
@@ -3099,7 +3246,7 @@ static struct usb_function_instance *gsi_alloc_inst(void)
opts->func_inst.set_inst_name = gsi_set_inst_name;
opts->func_inst.free_func_inst = gsi_free_inst;
config_group_init_type_name(&opts->func_inst.group, "",
- &gsi_func_type);
+ &gsi_func_type);
return &opts->func_inst;
}
@@ -3124,6 +3271,8 @@ MODULE_DESCRIPTION("GSI function driver");
static int fgsi_init(void)
{
+ int i;
+
ipa_usb_wq = alloc_workqueue("k_ipa_usb",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE, 1);
if (!ipa_usb_wq) {
@@ -3131,6 +3280,9 @@ static int fgsi_init(void)
return -ENOMEM;
}
+ for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++)
+ mutex_init(&inst_status[i].gsi_lock);
+
return usb_function_register(&gsiusb_func);
}
module_init(fgsi_init);
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 8536f10a2e35..5189f6d0f09d 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -374,7 +374,7 @@ static int qdss_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_gadget *gadget = c->cdev->gadget;
struct f_qdss *qdss = func_to_qdss(f);
struct usb_ep *ep;
- int iface;
+ int iface, id;
pr_debug("qdss_bind\n");
@@ -392,6 +392,12 @@ static int qdss_bind(struct usb_configuration *c, struct usb_function *f)
qdss_data_intf_desc.bInterfaceNumber = iface;
qdss->data_iface_id = iface;
+ id = usb_string_id(c->cdev);
+ if (id < 0)
+ return id;
+ qdss_string_defs[QDSS_DATA_IDX].id = id;
+ qdss_data_intf_desc.iInterface = id;
+
if (qdss->debug_inface_enabled) {
/* Allocate ctrl I/F */
iface = usb_interface_id(c, f);
@@ -401,6 +407,11 @@ static int qdss_bind(struct usb_configuration *c, struct usb_function *f)
}
qdss_ctrl_intf_desc.bInterfaceNumber = iface;
qdss->ctrl_iface_id = iface;
+ id = usb_string_id(c->cdev);
+ if (id < 0)
+ return id;
+ qdss_string_defs[QDSS_CTRL_IDX].id = id;
+ qdss_ctrl_intf_desc.iInterface = id;
}
ep = usb_ep_autoconfig_ss(gadget, &qdss_ss_data_desc,
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 03b9a372636f..1fc6f478a02c 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -133,29 +133,30 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
pinfo->sb_type.gen = AMD_CHIPSET_SB700;
else if (rev >= 0x40 && rev <= 0x4f)
pinfo->sb_type.gen = AMD_CHIPSET_SB800;
- }
- pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
- 0x145c, NULL);
- if (pinfo->smbus_dev) {
- pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
} else {
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
- if (!pinfo->smbus_dev) {
- pinfo->sb_type.gen = NOT_AMD_CHIPSET;
- return 0;
+ if (pinfo->smbus_dev) {
+ rev = pinfo->smbus_dev->revision;
+ if (rev >= 0x11 && rev <= 0x14)
+ pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
+ else if (rev >= 0x15 && rev <= 0x18)
+ pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
+ else if (rev >= 0x39 && rev <= 0x3a)
+ pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
+ } else {
+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 0x145c, NULL);
+ if (pinfo->smbus_dev) {
+ rev = pinfo->smbus_dev->revision;
+ pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
+ } else {
+ pinfo->sb_type.gen = NOT_AMD_CHIPSET;
+ return 0;
+ }
}
-
- rev = pinfo->smbus_dev->revision;
- if (rev >= 0x11 && rev <= 0x14)
- pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
- else if (rev >= 0x15 && rev <= 0x18)
- pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
- else if (rev >= 0x39 && rev <= 0x3a)
- pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
}
-
pinfo->sb_type.rev = rev;
return 1;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index fe123153b1a5..2a9944326210 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2023,6 +2023,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index bc325a91a9bf..71584dff75cb 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -422,6 +422,22 @@ static int mdss_dp_clk_init(struct mdss_dp_drv_pdata *dp_drv,
__func__);
dp_drv->pixel_parent = NULL;
}
+
+ dp_drv->pixel_clk_two_div = devm_clk_get(dev,
+ "pixel_clk_two_div");
+ if (IS_ERR(dp_drv->pixel_clk_two_div)) {
+ pr_debug("%s: Unable to get DP pixel two div clk\n",
+ __func__);
+ dp_drv->pixel_clk_two_div = NULL;
+ }
+
+ dp_drv->pixel_clk_four_div = devm_clk_get(dev,
+ "pixel_clk_four_div");
+ if (IS_ERR(dp_drv->pixel_clk_four_div)) {
+ pr_debug("%s: Unable to get DP pixel four div clk\n",
+ __func__);
+ dp_drv->pixel_clk_four_div = NULL;
+ }
} else {
if (dp_drv->pixel_parent)
devm_clk_put(dev, dp_drv->pixel_parent);
@@ -1418,6 +1434,16 @@ static int mdss_dp_enable_mainlink_clocks(struct mdss_dp_drv_pdata *dp)
return ret;
}
+ if (dp->pixel_parent && dp->pixel_clk_two_div &&
+ dp->pixel_clk_four_div) {
+ if (dp->link_rate == DP_LINK_RATE_540)
+ clk_set_parent(dp->pixel_parent,
+ dp->pixel_clk_four_div);
+ else
+ clk_set_parent(dp->pixel_parent,
+ dp->pixel_clk_two_div);
+ }
+
mdss_dp_set_clock_rate(dp, "ctrl_link_clk",
(dp->link_rate * DP_LINK_RATE_MULTIPLIER) / DP_KHZ_TO_HZ);
@@ -1542,6 +1568,19 @@ static int mdss_dp_on_irq(struct mdss_dp_drv_pdata *dp_drv, bool lt_needed)
{
int ret = 0;
char ln_map[4];
+ bool connected;
+
+ mutex_lock(&dp_drv->attention_lock);
+ connected = dp_drv->cable_connected;
+ mutex_unlock(&dp_drv->attention_lock);
+
+ /*
+ * If DP cable disconnected, Avoid link training or turning on DP Path
+ */
+ if (!connected) {
+ pr_err("DP sink not connected\n");
+ return -EINVAL;
+ }
/* wait until link training is completed */
pr_debug("enter, lt_needed=%s\n", lt_needed ? "true" : "false");
@@ -1583,6 +1622,13 @@ static int mdss_dp_on_irq(struct mdss_dp_drv_pdata *dp_drv, bool lt_needed)
dp_drv->power_on = true;
ret = mdss_dp_setup_main_link(dp_drv, lt_needed);
+ if (ret) {
+ if (ret == -ENODEV || ret == -EINVAL) {
+ pr_err("main link setup failed\n");
+ mutex_unlock(&dp_drv->train_mutex);
+ return ret;
+ }
+ }
exit_loop:
mutex_unlock(&dp_drv->train_mutex);
@@ -2183,7 +2229,7 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp)
ret = mdss_dp_dpcd_cap_read(dp);
if (ret || !mdss_dp_aux_is_link_rate_valid(dp->dpcd.max_link_rate) ||
!mdss_dp_aux_is_lane_count_valid(dp->dpcd.max_lane_count)) {
- if (ret == EDP_AUX_ERR_TOUT) {
+ if ((ret == -ENODEV) || (ret == EDP_AUX_ERR_TOUT)) {
pr_err("DPCD read timedout, skip connect notification\n");
goto end;
}
@@ -2215,6 +2261,9 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp)
read_edid:
ret = mdss_dp_edid_read(dp);
if (ret) {
+ if (ret == -ENODEV)
+ goto end;
+
pr_err("edid read error, setting default resolution\n");
goto notify;
}
@@ -3532,9 +3581,33 @@ static void mdss_dp_reset_event_list(struct mdss_dp_drv_pdata *dp)
static void mdss_dp_reset_sw_state(struct mdss_dp_drv_pdata *dp)
{
+ int ret = 0;
+
pr_debug("enter\n");
mdss_dp_reset_event_list(dp);
+
+ /*
+ * IRQ_HPD attention event handler first turns on DP path and then
+ * notifies CONNECT_IRQ_HPD and waits for userspace to trigger UNBLANK.
+ * In such cases, before UNBLANK call, if cable is disconnected, if
+ * DISCONNECT is notified immediately, userspace might not sense any
+ * change in connection status, leaving DP controller ON.
+ *
+ * To avoid such cases, wait for the connection event to complete before
+ * sending disconnection event
+ */
+ if (atomic_read(&dp->notification_pending)) {
+ pr_debug("waiting for the pending notitfication\n");
+ ret = wait_for_completion_timeout(&dp->notification_comp, HZ);
+ if (ret <= 0) {
+ pr_err("%s timed out\n",
+ mdss_dp_notification_status_to_string(
+ dp->hpd_notification_status));
+ }
+ }
+
atomic_set(&dp->notification_pending, 0);
+ /* complete any waiting completions */
complete_all(&dp->notification_comp);
}
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index 983f5e34a515..f3f9ca277762 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -614,6 +614,8 @@ struct mdss_dp_drv_pdata {
/* DP Pixel clock RCG and PLL parent */
struct clk *pixel_clk_rcg;
struct clk *pixel_parent;
+ struct clk *pixel_clk_two_div;
+ struct clk *pixel_clk_four_div;
/* regulators */
struct dss_module_power power_data[DP_MAX_PM];
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index 86946adfeeb0..407f230ca71e 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -411,7 +411,8 @@ retry:
if (!connected) {
pr_err("dp cable disconnected\n");
- break;
+ ret = -ENODEV;
+ goto end;
}
dp->aux_error_num = EDP_AUX_ERR_NONE;
@@ -877,7 +878,7 @@ void dp_extract_edid_detailed_timing_description(struct edp_edid *edid,
static int dp_aux_chan_ready(struct mdss_dp_drv_pdata *ep)
{
- int cnt, ret;
+ int cnt, ret = 0;
char data = 0;
for (cnt = 5; cnt; cnt--) {
@@ -886,6 +887,10 @@ static int dp_aux_chan_ready(struct mdss_dp_drv_pdata *ep)
ret, mdss_dp_get_aux_error(ep->aux_error_num));
if (ret >= 0)
break;
+
+ if (ret == -ENODEV)
+ return ret;
+
msleep(100);
}
@@ -973,6 +978,7 @@ int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
u32 checksum = 0;
bool phy_aux_update_requested = false;
bool ext_block_parsing_done = false;
+ bool connected = false;
ret = dp_aux_chan_ready(dp);
if (ret) {
@@ -992,6 +998,15 @@ int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
u8 segment;
u8 edid_buf[EDID_BLOCK_SIZE] = {0};
+ mutex_lock(&dp->attention_lock);
+ connected = dp->cable_connected;
+ mutex_unlock(&dp->attention_lock);
+
+ if (!connected) {
+ pr_err("DP sink not connected\n");
+ return -ENODEV;
+ }
+
/*
* Write the segment first.
* Segment = 0, for blocks 0 and 1
@@ -1243,7 +1258,7 @@ int mdss_dp_aux_link_status_read(struct mdss_dp_drv_pdata *ep, int len)
rlen = dp_aux_read_buf(ep, 0x202, len, 0);
if (rlen < len) {
pr_err("edp aux read failed\n");
- return 0;
+ return rlen;
}
rp = &ep->rxp;
bp = rp->data;
@@ -2459,21 +2474,24 @@ static int dp_start_link_train_1(struct mdss_dp_drv_pdata *ep)
usleep_time = ep->dpcd.training_read_interval;
usleep_range(usleep_time, usleep_time);
- mdss_dp_aux_link_status_read(ep, 6);
+ ret = mdss_dp_aux_link_status_read(ep, 6);
+ if (ret == -ENODEV)
+ break;
+
if (mdss_dp_aux_clock_recovery_done(ep)) {
ret = 0;
break;
}
if (ep->v_level == DPCD_LINK_VOLTAGE_MAX) {
- ret = -1;
+ ret = -EAGAIN;
break; /* quit */
}
if (old_v_level == ep->v_level) {
tries++;
if (tries >= maximum_retries) {
- ret = -1;
+ ret = -EAGAIN;
break; /* quit */
}
} else {
@@ -2511,7 +2529,9 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
usleep_time = ep->dpcd.training_read_interval;
usleep_range(usleep_time, usleep_time);
- mdss_dp_aux_link_status_read(ep, 6);
+ ret = mdss_dp_aux_link_status_read(ep, 6);
+ if (ret == -ENODEV)
+ break;
if (mdss_dp_aux_channel_eq_done(ep)) {
ret = 0;
@@ -2519,7 +2539,7 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
}
if (tries > maximum_retries) {
- ret = -1;
+ ret = -EAGAIN;
break;
}
tries++;
@@ -2584,7 +2604,7 @@ int mdss_dp_link_train(struct mdss_dp_drv_pdata *dp)
ret = dp_start_link_train_1(dp);
if (ret < 0) {
- if (!dp_link_rate_down_shift(dp)) {
+ if ((ret == -EAGAIN) && !dp_link_rate_down_shift(dp)) {
pr_debug("retry with lower rate\n");
dp_clear_training_pattern(dp);
return -EAGAIN;
@@ -2603,7 +2623,7 @@ int mdss_dp_link_train(struct mdss_dp_drv_pdata *dp)
ret = dp_start_link_train_2(dp);
if (ret < 0) {
- if (!dp_link_rate_down_shift(dp)) {
+ if ((ret == -EAGAIN) && !dp_link_rate_down_shift(dp)) {
pr_debug("retry with lower rate\n");
dp_clear_training_pattern(dp);
return -EAGAIN;
@@ -2640,7 +2660,7 @@ int mdss_dp_dpcd_status_read(struct mdss_dp_drv_pdata *ep)
ret = mdss_dp_aux_link_status_read(ep, 6);
- if (ret) {
+ if (ret > 0) {
sp = &ep->link_status;
ret = sp->port_0_in_sync; /* 1 == sync */
}
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.c b/drivers/video/fbdev/msm/mdss_dp_util.c
index f7b3d4664e86..037741df4382 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.c
+++ b/drivers/video/fbdev/msm/mdss_dp_util.c
@@ -835,6 +835,8 @@ void mdss_dp_sw_config_msa(struct mdss_dp_drv_pdata *dp)
pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
mvid = (pixel_m & 0xFFFF) * 5;
nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
+ if (dp->link_rate == DP_LINK_RATE_540)
+ nvid *= 2;
}
pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index c766ff983045..fca1d37b40bb 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -1512,6 +1512,34 @@ static int mdss_dsi_wait4video_eng_busy(struct mdss_dsi_ctrl_pdata *ctrl)
return ret;
}
+static void mdss_dsi_wait4active_region(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int in_blanking = 0;
+ int retry_count = 0;
+
+ if (ctrl->panel_mode != DSI_VIDEO_MODE)
+ return;
+
+ while (retry_count != MAX_BTA_WAIT_RETRY) {
+ mdss_dsi_wait4video_eng_busy(ctrl);
+ in_blanking = ctrl->mdp_callback->fxn(
+ ctrl->mdp_callback->data,
+ MDP_INTF_CALLBACK_CHECK_LINE_COUNT);
+
+ if (in_blanking) {
+ pr_debug("%s: not in active region\n", __func__);
+ retry_count++;
+ } else
+ break;
+ };
+
+ if (retry_count == MAX_BTA_WAIT_RETRY)
+ MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl",
+ "dsi0_phy", "dsi1_ctrl", "dsi1_phy",
+ "vbif", "vbif_nrt", "dbg_bus",
+ "vbif_dbg_bus", "dsi_dbg_bus", "panic");
+}
+
/**
* mdss_dsi_bta_status_check() - Check dsi panel status through bta check
* @ctrl_pdata: pointer to the dsi controller structure
@@ -1527,8 +1555,6 @@ int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
int ret = 0;
unsigned long flag;
int ignore_underflow = 0;
- int retry_count = 0;
- int in_blanking = 0;
if (ctrl_pdata == NULL) {
pr_err("%s: Invalid input data\n", __func__);
@@ -1554,24 +1580,8 @@ int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
reinit_completion(&ctrl_pdata->bta_comp);
mdss_dsi_enable_irq(ctrl_pdata, DSI_BTA_TERM);
spin_unlock_irqrestore(&ctrl_pdata->mdp_lock, flag);
-wait:
- mdss_dsi_wait4video_eng_busy(ctrl_pdata);
- if (ctrl_pdata->panel_mode == DSI_VIDEO_MODE) {
- in_blanking = ctrl_pdata->mdp_callback->fxn(
- ctrl_pdata->mdp_callback->data,
- MDP_INTF_CALLBACK_CHECK_LINE_COUNT);
- /* Try for maximum of 5 attempts */
- if (in_blanking && (retry_count < MAX_BTA_WAIT_RETRY)) {
- pr_debug("%s: not in active region\n", __func__);
- retry_count++;
- goto wait;
- }
- }
- if (retry_count == MAX_BTA_WAIT_RETRY)
- MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl",
- "dsi0_phy", "dsi1_ctrl", "dsi1_phy",
- "vbif", "vbif_nrt", "dbg_bus",
- "vbif_dbg_bus", "dsi_dbg_bus", "panic");
+
+ mdss_dsi_wait4active_region(ctrl_pdata);
/* mask out overflow errors */
if (ignore_underflow)
@@ -1991,7 +2001,7 @@ do_send:
goto end;
}
- mdss_dsi_wait4video_eng_busy(ctrl);
+ mdss_dsi_wait4active_region(ctrl);
mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
if (use_dma_tpg)
@@ -2029,7 +2039,7 @@ skip_max_pkt_size:
wmb(); /* make sure the RDBK registers are cleared */
}
- mdss_dsi_wait4video_eng_busy(ctrl); /* video mode only */
+ mdss_dsi_wait4active_region(ctrl);
mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
/* transmit read comamnd to client */
if (use_dma_tpg)
diff --git a/fs/Kconfig b/fs/Kconfig
index 4adb93ec85ea..89ddd182f568 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -73,6 +73,8 @@ config FILE_LOCKING
for filesystems like NFS and for the flock() system
call. Disabling this option saves about 11k.
+source "fs/crypto/Kconfig"
+
source "fs/notify/Kconfig"
source "fs/quota/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index dee237540bc0..4644db462ba9 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_EVENTFD) += eventfd.o
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
obj-$(CONFIG_AIO) += aio.o
obj-$(CONFIG_FS_DAX) += dax.o
+obj-$(CONFIG_FS_ENCRYPTION) += crypto/
obj-$(CONFIG_FILE_LOCKING) += locks.o
obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 5d34a062ca4f..3bd2233737ac 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1727,6 +1727,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
goto restore;
}
+ btrfs_qgroup_rescan_resume(fs_info);
+
if (!fs_info->uuid_root) {
btrfs_info(fs_info, "creating UUID tree");
ret = btrfs_create_uuid_tree(fs_info);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 9c62a6f9757a..600c67ef8a03 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -108,7 +108,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
},
};
-const u64 const btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
+const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
[BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1,
[BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP,
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index c6a1ec110c01..22bae2b434e2 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -189,7 +189,7 @@ static int ceph_releasepage(struct page *page, gfp_t g)
/*
* read a single page, without unlocking it.
*/
-static int readpage_nounlock(struct file *filp, struct page *page)
+static int ceph_do_readpage(struct file *filp, struct page *page)
{
struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
@@ -219,7 +219,7 @@ static int readpage_nounlock(struct file *filp, struct page *page)
err = ceph_readpage_from_fscache(inode, page);
if (err == 0)
- goto out;
+ return -EINPROGRESS;
dout("readpage inode %p file %p page %p index %lu\n",
inode, filp, page, page->index);
@@ -249,8 +249,11 @@ out:
static int ceph_readpage(struct file *filp, struct page *page)
{
- int r = readpage_nounlock(filp, page);
- unlock_page(page);
+ int r = ceph_do_readpage(filp, page);
+ if (r != -EINPROGRESS)
+ unlock_page(page);
+ else
+ r = 0;
return r;
}
@@ -1094,7 +1097,7 @@ retry_locked:
goto retry_locked;
r = writepage_nounlock(page, NULL);
if (r < 0)
- goto fail_nosnap;
+ goto fail_unlock;
goto retry_locked;
}
@@ -1122,11 +1125,14 @@ retry_locked:
}
/* we need to read it. */
- r = readpage_nounlock(file, page);
- if (r < 0)
- goto fail_nosnap;
+ r = ceph_do_readpage(file, page);
+ if (r < 0) {
+ if (r == -EINPROGRESS)
+ return -EAGAIN;
+ goto fail_unlock;
+ }
goto retry_locked;
-fail_nosnap:
+fail_unlock:
unlock_page(page);
return r;
}
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index a4766ded1ba7..ff1cfd7b1083 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -224,13 +224,7 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
fscache_relinquish_cookie(cookie, 0);
}
-static void ceph_vfs_readpage_complete(struct page *page, void *data, int error)
-{
- if (!error)
- SetPageUptodate(page);
-}
-
-static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int error)
+static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
{
if (!error)
SetPageUptodate(page);
@@ -259,7 +253,7 @@ int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
return -ENOBUFS;
ret = fscache_read_or_alloc_page(ci->fscache, page,
- ceph_vfs_readpage_complete, NULL,
+ ceph_readpage_from_fscache_complete, NULL,
GFP_KERNEL);
switch (ret) {
@@ -288,7 +282,7 @@ int ceph_readpages_from_fscache(struct inode *inode,
return -ENOBUFS;
ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
- ceph_vfs_readpage_complete_unlock,
+ ceph_readpage_from_fscache_complete,
NULL, mapping_gfp_mask(mapping));
switch (ret) {
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index fa8df3fef6fc..297e05c9e2b0 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -194,7 +194,7 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
int i;
if (unlikely(direntry->d_name.len >
- tcon->fsAttrInfo.MaxPathNameComponentLength))
+ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
return -ENAMETOOLONG;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index b8f553b32dda..aacb15bd56fe 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -82,8 +82,8 @@
#define NUMBER_OF_SMB2_COMMANDS 0x0013
-/* BB FIXME - analyze following length BB */
-#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
+/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
+#define MAX_SMB2_HDR_SIZE 0x00b0
#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig
new file mode 100644
index 000000000000..92348faf9865
--- /dev/null
+++ b/fs/crypto/Kconfig
@@ -0,0 +1,18 @@
+config FS_ENCRYPTION
+ tristate "FS Encryption (Per-file encryption)"
+ depends on BLOCK
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_XTS
+ select CRYPTO_CTS
+ select CRYPTO_CTR
+ select CRYPTO_SHA256
+ select KEYS
+ select ENCRYPTED_KEYS
+ help
+ Enable encryption of files and directories. This
+ feature is similar to ecryptfs, but it is more memory
+ efficient since it avoids caching the encrypted and
+ decrypted pages in the page cache.
diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile
new file mode 100644
index 000000000000..f17684c48739
--- /dev/null
+++ b/fs/crypto/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o
+
+fscrypto-y := crypto.o fname.o policy.o keyinfo.o
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
new file mode 100644
index 000000000000..2d40ab9edc9f
--- /dev/null
+++ b/fs/crypto/crypto.c
@@ -0,0 +1,568 @@
+/*
+ * This contains encryption functions for per-file encryption.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility
+ *
+ * Written by Michael Halcrow, 2014.
+ *
+ * Filename encryption additions
+ * Uday Savagaonkar, 2014
+ * Encryption policy handling additions
+ * Ildar Muslukhov, 2014
+ * Add fscrypt_pullback_bio_page()
+ * Jaegeuk Kim, 2015.
+ *
+ * This has not yet undergone a rigorous security audit.
+ *
+ * The usage of AES-XTS should conform to recommendations in NIST
+ * Special Publication 800-38E and IEEE P1619/D16.
+ */
+
+#include <linux/pagemap.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/ratelimit.h>
+#include <linux/bio.h>
+#include <linux/dcache.h>
+#include <linux/namei.h>
+#include <linux/fscrypto.h>
+
+static unsigned int num_prealloc_crypto_pages = 32;
+static unsigned int num_prealloc_crypto_ctxs = 128;
+
+module_param(num_prealloc_crypto_pages, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_pages,
+ "Number of crypto pages to preallocate");
+module_param(num_prealloc_crypto_ctxs, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
+ "Number of crypto contexts to preallocate");
+
+static mempool_t *fscrypt_bounce_page_pool = NULL;
+
+static LIST_HEAD(fscrypt_free_ctxs);
+static DEFINE_SPINLOCK(fscrypt_ctx_lock);
+
+static struct workqueue_struct *fscrypt_read_workqueue;
+static DEFINE_MUTEX(fscrypt_init_mutex);
+
+static struct kmem_cache *fscrypt_ctx_cachep;
+struct kmem_cache *fscrypt_info_cachep;
+
+/**
+ * fscrypt_release_ctx() - Releases an encryption context
+ * @ctx: The encryption context to release.
+ *
+ * If the encryption context was allocated from the pre-allocated pool, returns
+ * it to that pool. Else, frees it.
+ *
+ * If there's a bounce page in the context, this frees that.
+ */
+void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
+{
+ unsigned long flags;
+
+ if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) {
+ mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
+ ctx->w.bounce_page = NULL;
+ }
+ ctx->w.control_page = NULL;
+ if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
+ kmem_cache_free(fscrypt_ctx_cachep, ctx);
+ } else {
+ spin_lock_irqsave(&fscrypt_ctx_lock, flags);
+ list_add(&ctx->free_list, &fscrypt_free_ctxs);
+ spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
+ }
+}
+EXPORT_SYMBOL(fscrypt_release_ctx);
+
+/**
+ * fscrypt_get_ctx() - Gets an encryption context
+ * @inode: The inode for which we are doing the crypto
+ * @gfp_flags: The gfp flag for memory allocation
+ *
+ * Allocates and initializes an encryption context.
+ *
+ * Return: An allocated and initialized encryption context on success; error
+ * value or NULL otherwise.
+ */
+struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
+{
+ struct fscrypt_ctx *ctx = NULL;
+ struct fscrypt_info *ci = inode->i_crypt_info;
+ unsigned long flags;
+
+ if (ci == NULL)
+ return ERR_PTR(-ENOKEY);
+
+ /*
+ * We first try getting the ctx from a free list because in
+ * the common case the ctx will have an allocated and
+ * initialized crypto tfm, so it's probably a worthwhile
+ * optimization. For the bounce page, we first try getting it
+ * from the kernel allocator because that's just about as fast
+ * as getting it from a list and because a cache of free pages
+ * should generally be a "last resort" option for a filesystem
+ * to be able to do its job.
+ */
+ spin_lock_irqsave(&fscrypt_ctx_lock, flags);
+ ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
+ struct fscrypt_ctx, free_list);
+ if (ctx)
+ list_del(&ctx->free_list);
+ spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
+ if (!ctx) {
+ ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+ ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
+ } else {
+ ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
+ }
+ ctx->flags &= ~FS_WRITE_PATH_FL;
+ return ctx;
+}
+EXPORT_SYMBOL(fscrypt_get_ctx);
+
+/**
+ * page_crypt_complete() - completion callback for page crypto
+ * @req: The asynchronous cipher request context
+ * @res: The result of the cipher operation
+ */
+static void page_crypt_complete(struct crypto_async_request *req, int res)
+{
+ struct fscrypt_completion_result *ecr = req->data;
+
+ if (res == -EINPROGRESS)
+ return;
+ ecr->res = res;
+ complete(&ecr->completion);
+}
+
+typedef enum {
+ FS_DECRYPT = 0,
+ FS_ENCRYPT,
+} fscrypt_direction_t;
+
+static int do_page_crypto(struct inode *inode,
+ fscrypt_direction_t rw, pgoff_t index,
+ struct page *src_page, struct page *dest_page,
+ gfp_t gfp_flags)
+{
+ struct {
+ __le64 index;
+ u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
+ } xts_tweak;
+ struct skcipher_request *req = NULL;
+ DECLARE_FS_COMPLETION_RESULT(ecr);
+ struct scatterlist dst, src;
+ struct fscrypt_info *ci = inode->i_crypt_info;
+ struct crypto_skcipher *tfm = ci->ci_ctfm;
+ int res = 0;
+
+ req = skcipher_request_alloc(tfm, gfp_flags);
+ if (!req) {
+ printk_ratelimited(KERN_ERR
+ "%s: crypto_request_alloc() failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ skcipher_request_set_callback(
+ req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ page_crypt_complete, &ecr);
+
+ BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
+ xts_tweak.index = cpu_to_le64(index);
+ memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
+
+ sg_init_table(&dst, 1);
+ sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
+ sg_init_table(&src, 1);
+ sg_set_page(&src, src_page, PAGE_SIZE, 0);
+ skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &xts_tweak);
+ if (rw == FS_DECRYPT)
+ res = crypto_skcipher_decrypt(req);
+ else
+ res = crypto_skcipher_encrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ BUG_ON(req->base.data != &ecr);
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+ skcipher_request_free(req);
+ if (res) {
+ printk_ratelimited(KERN_ERR
+ "%s: crypto_skcipher_encrypt() returned %d\n",
+ __func__, res);
+ return res;
+ }
+ return 0;
+}
+
+static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
+{
+ ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
+ if (ctx->w.bounce_page == NULL)
+ return ERR_PTR(-ENOMEM);
+ ctx->flags |= FS_WRITE_PATH_FL;
+ return ctx->w.bounce_page;
+}
+
+/**
+ * fscypt_encrypt_page() - Encrypts a page
+ * @inode: The inode for which the encryption should take place
+ * @plaintext_page: The page to encrypt. Must be locked.
+ * @gfp_flags: The gfp flag for memory allocation
+ *
+ * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
+ * encryption context.
+ *
+ * Called on the page write path. The caller must call
+ * fscrypt_restore_control_page() on the returned ciphertext page to
+ * release the bounce buffer and the encryption context.
+ *
+ * Return: An allocated page with the encrypted content on success. Else, an
+ * error value or NULL.
+ */
+struct page *fscrypt_encrypt_page(struct inode *inode,
+ struct page *plaintext_page, gfp_t gfp_flags)
+{
+ struct fscrypt_ctx *ctx;
+ struct page *ciphertext_page = NULL;
+ int err;
+
+ BUG_ON(!PageLocked(plaintext_page));
+
+ ctx = fscrypt_get_ctx(inode, gfp_flags);
+ if (IS_ERR(ctx))
+ return (struct page *)ctx;
+
+ /* The encryption operation will require a bounce page. */
+ ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
+ if (IS_ERR(ciphertext_page))
+ goto errout;
+
+ ctx->w.control_page = plaintext_page;
+ err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index,
+ plaintext_page, ciphertext_page,
+ gfp_flags);
+ if (err) {
+ ciphertext_page = ERR_PTR(err);
+ goto errout;
+ }
+ SetPagePrivate(ciphertext_page);
+ set_page_private(ciphertext_page, (unsigned long)ctx);
+ lock_page(ciphertext_page);
+ return ciphertext_page;
+
+errout:
+ fscrypt_release_ctx(ctx);
+ return ciphertext_page;
+}
+EXPORT_SYMBOL(fscrypt_encrypt_page);
+
+/**
+ * f2crypt_decrypt_page() - Decrypts a page in-place
+ * @page: The page to decrypt. Must be locked.
+ *
+ * Decrypts page in-place using the ctx encryption context.
+ *
+ * Called from the read completion callback.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int fscrypt_decrypt_page(struct page *page)
+{
+ BUG_ON(!PageLocked(page));
+
+ return do_page_crypto(page->mapping->host,
+ FS_DECRYPT, page->index, page, page, GFP_NOFS);
+}
+EXPORT_SYMBOL(fscrypt_decrypt_page);
+
+int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
+ sector_t pblk, unsigned int len)
+{
+ struct fscrypt_ctx *ctx;
+ struct page *ciphertext_page = NULL;
+ struct bio *bio;
+ int ret, err = 0;
+
+ BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
+
+ ctx = fscrypt_get_ctx(inode, GFP_NOFS);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
+ if (IS_ERR(ciphertext_page)) {
+ err = PTR_ERR(ciphertext_page);
+ goto errout;
+ }
+
+ while (len--) {
+ err = do_page_crypto(inode, FS_ENCRYPT, lblk,
+ ZERO_PAGE(0), ciphertext_page,
+ GFP_NOFS);
+ if (err)
+ goto errout;
+
+ bio = bio_alloc(GFP_NOWAIT, 1);
+ if (!bio) {
+ err = -ENOMEM;
+ goto errout;
+ }
+ bio->bi_bdev = inode->i_sb->s_bdev;
+ bio->bi_iter.bi_sector =
+ pblk << (inode->i_sb->s_blocksize_bits - 9);
+ ret = bio_add_page(bio, ciphertext_page,
+ inode->i_sb->s_blocksize, 0);
+ if (ret != inode->i_sb->s_blocksize) {
+ /* should never happen! */
+ WARN_ON(1);
+ bio_put(bio);
+ err = -EIO;
+ goto errout;
+ }
+ err = submit_bio_wait(WRITE, bio);
+ if ((err == 0) && bio->bi_error)
+ err = -EIO;
+ bio_put(bio);
+ if (err)
+ goto errout;
+ lblk++;
+ pblk++;
+ }
+ err = 0;
+errout:
+ fscrypt_release_ctx(ctx);
+ return err;
+}
+EXPORT_SYMBOL(fscrypt_zeroout_range);
+
+/*
+ * Validate dentries for encrypted directories to make sure we aren't
+ * potentially caching stale data after a key has been added or
+ * removed.
+ */
+static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ struct dentry *dir;
+ struct fscrypt_info *ci;
+ int dir_has_key, cached_with_key;
+
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ dir = dget_parent(dentry);
+ if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
+ dput(dir);
+ return 0;
+ }
+
+ ci = d_inode(dir)->i_crypt_info;
+ if (ci && ci->ci_keyring_key &&
+ (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED) |
+ (1 << KEY_FLAG_DEAD))))
+ ci = NULL;
+
+ /* this should eventually be an flag in d_flags */
+ spin_lock(&dentry->d_lock);
+ cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
+ spin_unlock(&dentry->d_lock);
+ dir_has_key = (ci != NULL);
+ dput(dir);
+
+ /*
+ * If the dentry was cached without the key, and it is a
+ * negative dentry, it might be a valid name. We can't check
+ * if the key has since been made available due to locking
+ * reasons, so we fail the validation so ext4_lookup() can do
+ * this check.
+ *
+ * We also fail the validation if the dentry was created with
+ * the key present, but we no longer have the key, or vice versa.
+ */
+ if ((!cached_with_key && d_is_negative(dentry)) ||
+ (!cached_with_key && dir_has_key) ||
+ (cached_with_key && !dir_has_key))
+ return 0;
+ return 1;
+}
+
+const struct dentry_operations fscrypt_d_ops = {
+ .d_revalidate = fscrypt_d_revalidate,
+};
+EXPORT_SYMBOL(fscrypt_d_ops);
+
+/*
+ * Call fscrypt_decrypt_page on every single page, reusing the encryption
+ * context.
+ */
+static void completion_pages(struct work_struct *work)
+{
+ struct fscrypt_ctx *ctx =
+ container_of(work, struct fscrypt_ctx, r.work);
+ struct bio *bio = ctx->r.bio;
+ struct bio_vec *bv;
+ int i;
+
+ bio_for_each_segment_all(bv, bio, i) {
+ struct page *page = bv->bv_page;
+ int ret = fscrypt_decrypt_page(page);
+
+ if (ret) {
+ WARN_ON_ONCE(1);
+ SetPageError(page);
+ } else {
+ SetPageUptodate(page);
+ }
+ unlock_page(page);
+ }
+ fscrypt_release_ctx(ctx);
+ bio_put(bio);
+}
+
+void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
+{
+ INIT_WORK(&ctx->r.work, completion_pages);
+ ctx->r.bio = bio;
+ queue_work(fscrypt_read_workqueue, &ctx->r.work);
+}
+EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
+
+void fscrypt_pullback_bio_page(struct page **page, bool restore)
+{
+ struct fscrypt_ctx *ctx;
+ struct page *bounce_page;
+
+ /* The bounce data pages are unmapped. */
+ if ((*page)->mapping)
+ return;
+
+ /* The bounce data page is unmapped. */
+ bounce_page = *page;
+ ctx = (struct fscrypt_ctx *)page_private(bounce_page);
+
+ /* restore control page */
+ *page = ctx->w.control_page;
+
+ if (restore)
+ fscrypt_restore_control_page(bounce_page);
+}
+EXPORT_SYMBOL(fscrypt_pullback_bio_page);
+
+void fscrypt_restore_control_page(struct page *page)
+{
+ struct fscrypt_ctx *ctx;
+
+ ctx = (struct fscrypt_ctx *)page_private(page);
+ set_page_private(page, (unsigned long)NULL);
+ ClearPagePrivate(page);
+ unlock_page(page);
+ fscrypt_release_ctx(ctx);
+}
+EXPORT_SYMBOL(fscrypt_restore_control_page);
+
+static void fscrypt_destroy(void)
+{
+ struct fscrypt_ctx *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
+ kmem_cache_free(fscrypt_ctx_cachep, pos);
+ INIT_LIST_HEAD(&fscrypt_free_ctxs);
+ mempool_destroy(fscrypt_bounce_page_pool);
+ fscrypt_bounce_page_pool = NULL;
+}
+
+/**
+ * fscrypt_initialize() - allocate major buffers for fs encryption.
+ *
+ * We only call this when we start accessing encrypted files, since it
+ * results in memory getting allocated that wouldn't otherwise be used.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int fscrypt_initialize(void)
+{
+ int i, res = -ENOMEM;
+
+ if (fscrypt_bounce_page_pool)
+ return 0;
+
+ mutex_lock(&fscrypt_init_mutex);
+ if (fscrypt_bounce_page_pool)
+ goto already_initialized;
+
+ for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
+ struct fscrypt_ctx *ctx;
+
+ ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
+ if (!ctx)
+ goto fail;
+ list_add(&ctx->free_list, &fscrypt_free_ctxs);
+ }
+
+ fscrypt_bounce_page_pool =
+ mempool_create_page_pool(num_prealloc_crypto_pages, 0);
+ if (!fscrypt_bounce_page_pool)
+ goto fail;
+
+already_initialized:
+ mutex_unlock(&fscrypt_init_mutex);
+ return 0;
+fail:
+ fscrypt_destroy();
+ mutex_unlock(&fscrypt_init_mutex);
+ return res;
+}
+EXPORT_SYMBOL(fscrypt_initialize);
+
+/**
+ * fscrypt_init() - Set up for fs encryption.
+ */
+static int __init fscrypt_init(void)
+{
+ fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
+ WQ_HIGHPRI, 0);
+ if (!fscrypt_read_workqueue)
+ goto fail;
+
+ fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
+ if (!fscrypt_ctx_cachep)
+ goto fail_free_queue;
+
+ fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
+ if (!fscrypt_info_cachep)
+ goto fail_free_ctx;
+
+ return 0;
+
+fail_free_ctx:
+ kmem_cache_destroy(fscrypt_ctx_cachep);
+fail_free_queue:
+ destroy_workqueue(fscrypt_read_workqueue);
+fail:
+ return -ENOMEM;
+}
+module_init(fscrypt_init)
+
+/**
+ * fscrypt_exit() - Shutdown the fs encryption system
+ */
+static void __exit fscrypt_exit(void)
+{
+ fscrypt_destroy();
+
+ if (fscrypt_read_workqueue)
+ destroy_workqueue(fscrypt_read_workqueue);
+ kmem_cache_destroy(fscrypt_ctx_cachep);
+ kmem_cache_destroy(fscrypt_info_cachep);
+}
+module_exit(fscrypt_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
new file mode 100644
index 000000000000..9b774f4b50c8
--- /dev/null
+++ b/fs/crypto/fname.c
@@ -0,0 +1,414 @@
+/*
+ * This contains functions for filename crypto management
+ *
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility
+ *
+ * Written by Uday Savagaonkar, 2014.
+ * Modified by Jaegeuk Kim, 2015.
+ *
+ * This has not yet undergone a rigorous security audit.
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/ratelimit.h>
+#include <linux/fscrypto.h>
+
+/**
+ * fname_crypt_complete() - completion callback for filename crypto
+ * @req: The asynchronous cipher request context
+ * @res: The result of the cipher operation
+ */
+static void fname_crypt_complete(struct crypto_async_request *req, int res)
+{
+ struct fscrypt_completion_result *ecr = req->data;
+
+ if (res == -EINPROGRESS)
+ return;
+ ecr->res = res;
+ complete(&ecr->completion);
+}
+
+/**
+ * fname_encrypt() - encrypt a filename
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int fname_encrypt(struct inode *inode,
+ const struct qstr *iname, struct fscrypt_str *oname)
+{
+ struct skcipher_request *req = NULL;
+ DECLARE_FS_COMPLETION_RESULT(ecr);
+ struct fscrypt_info *ci = inode->i_crypt_info;
+ struct crypto_skcipher *tfm = ci->ci_ctfm;
+ int res = 0;
+ char iv[FS_CRYPTO_BLOCK_SIZE];
+ struct scatterlist sg;
+ int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
+ unsigned int lim;
+ unsigned int cryptlen;
+
+ lim = inode->i_sb->s_cop->max_namelen(inode);
+ if (iname->len <= 0 || iname->len > lim)
+ return -EIO;
+
+ /*
+ * Copy the filename to the output buffer for encrypting in-place and
+ * pad it with the needed number of NUL bytes.
+ */
+ cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE);
+ cryptlen = round_up(cryptlen, padding);
+ cryptlen = min(cryptlen, lim);
+ memcpy(oname->name, iname->name, iname->len);
+ memset(oname->name + iname->len, 0, cryptlen - iname->len);
+
+ /* Initialize the IV */
+ memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
+
+ /* Set up the encryption request */
+ req = skcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ printk_ratelimited(KERN_ERR
+ "%s: skcipher_request_alloc() failed\n", __func__);
+ return -ENOMEM;
+ }
+ skcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ fname_crypt_complete, &ecr);
+ sg_init_one(&sg, oname->name, cryptlen);
+ skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv);
+
+ /* Do the encryption */
+ res = crypto_skcipher_encrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ /* Request is being completed asynchronously; wait for it */
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+ skcipher_request_free(req);
+ if (res < 0) {
+ printk_ratelimited(KERN_ERR
+ "%s: Error (error code %d)\n", __func__, res);
+ return res;
+ }
+
+ oname->len = cryptlen;
+ return 0;
+}
+
+/**
+ * fname_decrypt() - decrypt a filename
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int fname_decrypt(struct inode *inode,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
+{
+ struct skcipher_request *req = NULL;
+ DECLARE_FS_COMPLETION_RESULT(ecr);
+ struct scatterlist src_sg, dst_sg;
+ struct fscrypt_info *ci = inode->i_crypt_info;
+ struct crypto_skcipher *tfm = ci->ci_ctfm;
+ int res = 0;
+ char iv[FS_CRYPTO_BLOCK_SIZE];
+ unsigned lim;
+
+ lim = inode->i_sb->s_cop->max_namelen(inode);
+ if (iname->len <= 0 || iname->len > lim)
+ return -EIO;
+
+ /* Allocate request */
+ req = skcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ printk_ratelimited(KERN_ERR
+ "%s: crypto_request_alloc() failed\n", __func__);
+ return -ENOMEM;
+ }
+ skcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ fname_crypt_complete, &ecr);
+
+ /* Initialize IV */
+ memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
+
+ /* Create decryption request */
+ sg_init_one(&src_sg, iname->name, iname->len);
+ sg_init_one(&dst_sg, oname->name, oname->len);
+ skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
+ res = crypto_skcipher_decrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+ skcipher_request_free(req);
+ if (res < 0) {
+ printk_ratelimited(KERN_ERR
+ "%s: Error (error code %d)\n", __func__, res);
+ return res;
+ }
+
+ oname->len = strnlen(oname->name, iname->len);
+ return 0;
+}
+
+static const char *lookup_table =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
+
+/**
+ * digest_encode() -
+ *
+ * Encodes the input digest using characters from the set [a-zA-Z0-9_+].
+ * The encoded string is roughly 4/3 times the size of the input string.
+ */
+static int digest_encode(const char *src, int len, char *dst)
+{
+ int i = 0, bits = 0, ac = 0;
+ char *cp = dst;
+
+ while (i < len) {
+ ac += (((unsigned char) src[i]) << bits);
+ bits += 8;
+ do {
+ *cp++ = lookup_table[ac & 0x3f];
+ ac >>= 6;
+ bits -= 6;
+ } while (bits >= 6);
+ i++;
+ }
+ if (bits)
+ *cp++ = lookup_table[ac & 0x3f];
+ return cp - dst;
+}
+
+static int digest_decode(const char *src, int len, char *dst)
+{
+ int i = 0, bits = 0, ac = 0;
+ const char *p;
+ char *cp = dst;
+
+ while (i < len) {
+ p = strchr(lookup_table, src[i]);
+ if (p == NULL || src[i] == 0)
+ return -2;
+ ac += (p - lookup_table) << bits;
+ bits += 6;
+ if (bits >= 8) {
+ *cp++ = ac & 0xff;
+ ac >>= 8;
+ bits -= 8;
+ }
+ i++;
+ }
+ if (ac)
+ return -1;
+ return cp - dst;
+}
+
+u32 fscrypt_fname_encrypted_size(struct inode *inode, u32 ilen)
+{
+ int padding = 32;
+ struct fscrypt_info *ci = inode->i_crypt_info;
+
+ if (ci)
+ padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
+ ilen = max(ilen, (u32)FS_CRYPTO_BLOCK_SIZE);
+ return round_up(ilen, padding);
+}
+EXPORT_SYMBOL(fscrypt_fname_encrypted_size);
+
+/**
+ * fscrypt_fname_crypto_alloc_obuff() -
+ *
+ * Allocates an output buffer that is sufficient for the crypto operation
+ * specified by the context and the direction.
+ */
+int fscrypt_fname_alloc_buffer(struct inode *inode,
+ u32 ilen, struct fscrypt_str *crypto_str)
+{
+ unsigned int olen = fscrypt_fname_encrypted_size(inode, ilen);
+
+ crypto_str->len = olen;
+ if (olen < FS_FNAME_CRYPTO_DIGEST_SIZE * 2)
+ olen = FS_FNAME_CRYPTO_DIGEST_SIZE * 2;
+ /*
+ * Allocated buffer can hold one more character to null-terminate the
+ * string
+ */
+ crypto_str->name = kmalloc(olen + 1, GFP_NOFS);
+ if (!(crypto_str->name))
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL(fscrypt_fname_alloc_buffer);
+
+/**
+ * fscrypt_fname_crypto_free_buffer() -
+ *
+ * Frees the buffer allocated for crypto operation.
+ */
+void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
+{
+ if (!crypto_str)
+ return;
+ kfree(crypto_str->name);
+ crypto_str->name = NULL;
+}
+EXPORT_SYMBOL(fscrypt_fname_free_buffer);
+
+/**
+ * fscrypt_fname_disk_to_usr() - converts a filename from disk space to user
+ * space
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fscrypt_fname_disk_to_usr(struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
+{
+ const struct qstr qname = FSTR_TO_QSTR(iname);
+ char buf[24];
+
+ if (fscrypt_is_dot_dotdot(&qname)) {
+ oname->name[0] = '.';
+ oname->name[iname->len - 1] = '.';
+ oname->len = iname->len;
+ return 0;
+ }
+
+ if (iname->len < FS_CRYPTO_BLOCK_SIZE)
+ return -EUCLEAN;
+
+ if (inode->i_crypt_info)
+ return fname_decrypt(inode, iname, oname);
+
+ if (iname->len <= FS_FNAME_CRYPTO_DIGEST_SIZE) {
+ oname->len = digest_encode(iname->name, iname->len,
+ oname->name);
+ return 0;
+ }
+ if (hash) {
+ memcpy(buf, &hash, 4);
+ memcpy(buf + 4, &minor_hash, 4);
+ } else {
+ memset(buf, 0, 8);
+ }
+ memcpy(buf + 8, iname->name + iname->len - 16, 16);
+ oname->name[0] = '_';
+ oname->len = 1 + digest_encode(buf, 24, oname->name + 1);
+ return 0;
+}
+EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
+
+/**
+ * fscrypt_fname_usr_to_disk() - converts a filename from user space to disk
+ * space
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fscrypt_fname_usr_to_disk(struct inode *inode,
+ const struct qstr *iname,
+ struct fscrypt_str *oname)
+{
+ if (fscrypt_is_dot_dotdot(iname)) {
+ oname->name[0] = '.';
+ oname->name[iname->len - 1] = '.';
+ oname->len = iname->len;
+ return 0;
+ }
+ if (inode->i_crypt_info)
+ return fname_encrypt(inode, iname, oname);
+ /*
+ * Without a proper key, a user is not allowed to modify the filenames
+ * in a directory. Consequently, a user space name cannot be mapped to
+ * a disk-space name
+ */
+ return -EACCES;
+}
+EXPORT_SYMBOL(fscrypt_fname_usr_to_disk);
+
+int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
+ int lookup, struct fscrypt_name *fname)
+{
+ int ret = 0, bigname = 0;
+
+ memset(fname, 0, sizeof(struct fscrypt_name));
+ fname->usr_fname = iname;
+
+ if (!dir->i_sb->s_cop->is_encrypted(dir) ||
+ fscrypt_is_dot_dotdot(iname)) {
+ fname->disk_name.name = (unsigned char *)iname->name;
+ fname->disk_name.len = iname->len;
+ return 0;
+ }
+ ret = get_crypt_info(dir);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ if (dir->i_crypt_info) {
+ ret = fscrypt_fname_alloc_buffer(dir, iname->len,
+ &fname->crypto_buf);
+ if (ret)
+ return ret;
+ ret = fname_encrypt(dir, iname, &fname->crypto_buf);
+ if (ret)
+ goto errout;
+ fname->disk_name.name = fname->crypto_buf.name;
+ fname->disk_name.len = fname->crypto_buf.len;
+ return 0;
+ }
+ if (!lookup)
+ return -EACCES;
+
+ /*
+ * We don't have the key and we are doing a lookup; decode the
+ * user-supplied name
+ */
+ if (iname->name[0] == '_')
+ bigname = 1;
+ if ((bigname && (iname->len != 33)) || (!bigname && (iname->len > 43)))
+ return -ENOENT;
+
+ fname->crypto_buf.name = kmalloc(32, GFP_KERNEL);
+ if (fname->crypto_buf.name == NULL)
+ return -ENOMEM;
+
+ ret = digest_decode(iname->name + bigname, iname->len - bigname,
+ fname->crypto_buf.name);
+ if (ret < 0) {
+ ret = -ENOENT;
+ goto errout;
+ }
+ fname->crypto_buf.len = ret;
+ if (bigname) {
+ memcpy(&fname->hash, fname->crypto_buf.name, 4);
+ memcpy(&fname->minor_hash, fname->crypto_buf.name + 4, 4);
+ } else {
+ fname->disk_name.name = fname->crypto_buf.name;
+ fname->disk_name.len = fname->crypto_buf.len;
+ }
+ return 0;
+
+errout:
+ fscrypt_fname_free_buffer(&fname->crypto_buf);
+ return ret;
+}
+EXPORT_SYMBOL(fscrypt_setup_filename);
+
+void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+ kfree(fname->crypto_buf.name);
+ fname->crypto_buf.name = NULL;
+ fname->usr_fname = NULL;
+ fname->disk_name.name = NULL;
+}
+EXPORT_SYMBOL(fscrypt_free_filename);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
new file mode 100644
index 000000000000..67fb6d8876d0
--- /dev/null
+++ b/fs/crypto/keyinfo.c
@@ -0,0 +1,333 @@
+/*
+ * key management facility for FS encryption support.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption key functions.
+ *
+ * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
+ */
+
+#include <keys/user-type.h>
+#include <linux/scatterlist.h>
+#include <linux/fscrypto.h>
+
+static void derive_crypt_complete(struct crypto_async_request *req, int rc)
+{
+ struct fscrypt_completion_result *ecr = req->data;
+
+ if (rc == -EINPROGRESS)
+ return;
+
+ ecr->res = rc;
+ complete(&ecr->completion);
+}
+
+/**
+ * derive_key_aes() - Derive a key using AES-128-ECB
+ * @deriving_key: Encryption key used for derivation.
+ * @source_key: Source key to which to apply derivation.
+ * @derived_key: Derived key.
+ *
+ * Return: Zero on success; non-zero otherwise.
+ */
+static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
+ u8 source_key[FS_AES_256_XTS_KEY_SIZE],
+ u8 derived_key[FS_AES_256_XTS_KEY_SIZE])
+{
+ int res = 0;
+ struct skcipher_request *req = NULL;
+ DECLARE_FS_COMPLETION_RESULT(ecr);
+ struct scatterlist src_sg, dst_sg;
+ struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
+
+ if (IS_ERR(tfm)) {
+ res = PTR_ERR(tfm);
+ tfm = NULL;
+ goto out;
+ }
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ req = skcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ res = -ENOMEM;
+ goto out;
+ }
+ skcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ derive_crypt_complete, &ecr);
+ res = crypto_skcipher_setkey(tfm, deriving_key,
+ FS_AES_128_ECB_KEY_SIZE);
+ if (res < 0)
+ goto out;
+
+ sg_init_one(&src_sg, source_key, FS_AES_256_XTS_KEY_SIZE);
+ sg_init_one(&dst_sg, derived_key, FS_AES_256_XTS_KEY_SIZE);
+ skcipher_request_set_crypt(req, &src_sg, &dst_sg,
+ FS_AES_256_XTS_KEY_SIZE, NULL);
+ res = crypto_skcipher_encrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+out:
+ skcipher_request_free(req);
+ crypto_free_skcipher(tfm);
+ return res;
+}
+
+static int validate_user_key(struct fscrypt_info *crypt_info,
+ struct fscrypt_context *ctx, u8 *raw_key,
+ u8 *prefix, int prefix_size)
+{
+ u8 *full_key_descriptor;
+ struct key *keyring_key;
+ struct fscrypt_key *master_key;
+ const struct user_key_payload *ukp;
+ int full_key_len = prefix_size + (FS_KEY_DESCRIPTOR_SIZE * 2) + 1;
+ int res;
+
+ full_key_descriptor = kmalloc(full_key_len, GFP_NOFS);
+ if (!full_key_descriptor)
+ return -ENOMEM;
+
+ memcpy(full_key_descriptor, prefix, prefix_size);
+ sprintf(full_key_descriptor + prefix_size,
+ "%*phN", FS_KEY_DESCRIPTOR_SIZE,
+ ctx->master_key_descriptor);
+ full_key_descriptor[full_key_len - 1] = '\0';
+ keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
+ kfree(full_key_descriptor);
+ if (IS_ERR(keyring_key))
+ return PTR_ERR(keyring_key);
+
+ if (keyring_key->type != &key_type_logon) {
+ printk_once(KERN_WARNING
+ "%s: key type must be logon\n", __func__);
+ res = -ENOKEY;
+ goto out;
+ }
+ down_read(&keyring_key->sem);
+ ukp = user_key_payload(keyring_key);
+ if (ukp->datalen != sizeof(struct fscrypt_key)) {
+ res = -EINVAL;
+ up_read(&keyring_key->sem);
+ goto out;
+ }
+ master_key = (struct fscrypt_key *)ukp->data;
+ BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE);
+
+ if (master_key->size != FS_AES_256_XTS_KEY_SIZE) {
+ printk_once(KERN_WARNING
+ "%s: key size incorrect: %d\n",
+ __func__, master_key->size);
+ res = -ENOKEY;
+ up_read(&keyring_key->sem);
+ goto out;
+ }
+ res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
+ up_read(&keyring_key->sem);
+ if (res)
+ goto out;
+
+ crypt_info->ci_keyring_key = keyring_key;
+ return 0;
+out:
+ key_put(keyring_key);
+ return res;
+}
+
+static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
+ const char **cipher_str_ret, int *keysize_ret)
+{
+ if (S_ISREG(inode->i_mode)) {
+ if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_256_XTS) {
+ *cipher_str_ret = "xts(aes)";
+ *keysize_ret = FS_AES_256_XTS_KEY_SIZE;
+ return 0;
+ }
+ pr_warn_once("fscrypto: unsupported contents encryption mode "
+ "%d for inode %lu\n",
+ ci->ci_data_mode, inode->i_ino);
+ return -ENOKEY;
+ }
+
+ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
+ if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS) {
+ *cipher_str_ret = "cts(cbc(aes))";
+ *keysize_ret = FS_AES_256_CTS_KEY_SIZE;
+ return 0;
+ }
+ pr_warn_once("fscrypto: unsupported filenames encryption mode "
+ "%d for inode %lu\n",
+ ci->ci_filename_mode, inode->i_ino);
+ return -ENOKEY;
+ }
+
+ pr_warn_once("fscrypto: unsupported file type %d for inode %lu\n",
+ (inode->i_mode & S_IFMT), inode->i_ino);
+ return -ENOKEY;
+}
+
+static void put_crypt_info(struct fscrypt_info *ci)
+{
+ if (!ci)
+ return;
+
+ key_put(ci->ci_keyring_key);
+ crypto_free_skcipher(ci->ci_ctfm);
+ kmem_cache_free(fscrypt_info_cachep, ci);
+}
+
+int get_crypt_info(struct inode *inode)
+{
+ struct fscrypt_info *crypt_info;
+ struct fscrypt_context ctx;
+ struct crypto_skcipher *ctfm;
+ const char *cipher_str;
+ int keysize;
+ u8 *raw_key = NULL;
+ int res;
+
+ res = fscrypt_initialize();
+ if (res)
+ return res;
+
+ if (!inode->i_sb->s_cop->get_context)
+ return -EOPNOTSUPP;
+retry:
+ crypt_info = ACCESS_ONCE(inode->i_crypt_info);
+ if (crypt_info) {
+ if (!crypt_info->ci_keyring_key ||
+ key_validate(crypt_info->ci_keyring_key) == 0)
+ return 0;
+ fscrypt_put_encryption_info(inode, crypt_info);
+ goto retry;
+ }
+
+ res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+ if (res < 0) {
+ if (!fscrypt_dummy_context_enabled(inode))
+ return res;
+ ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
+ ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
+ ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
+ ctx.flags = 0;
+ } else if (res != sizeof(ctx)) {
+ return -EINVAL;
+ }
+
+ if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
+ return -EINVAL;
+
+ if (ctx.flags & ~FS_POLICY_FLAGS_VALID)
+ return -EINVAL;
+
+ crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS);
+ if (!crypt_info)
+ return -ENOMEM;
+
+ crypt_info->ci_flags = ctx.flags;
+ crypt_info->ci_data_mode = ctx.contents_encryption_mode;
+ crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
+ crypt_info->ci_ctfm = NULL;
+ crypt_info->ci_keyring_key = NULL;
+ memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
+ sizeof(crypt_info->ci_master_key));
+
+ res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
+ if (res)
+ goto out;
+
+ /*
+ * This cannot be a stack buffer because it is passed to the scatterlist
+ * crypto API as part of key derivation.
+ */
+ res = -ENOMEM;
+ raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS);
+ if (!raw_key)
+ goto out;
+
+ if (fscrypt_dummy_context_enabled(inode)) {
+ memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
+ goto got_key;
+ }
+
+ res = validate_user_key(crypt_info, &ctx, raw_key,
+ FS_KEY_DESC_PREFIX, FS_KEY_DESC_PREFIX_SIZE);
+ if (res && inode->i_sb->s_cop->key_prefix) {
+ u8 *prefix = NULL;
+ int prefix_size, res2;
+
+ prefix_size = inode->i_sb->s_cop->key_prefix(inode, &prefix);
+ res2 = validate_user_key(crypt_info, &ctx, raw_key,
+ prefix, prefix_size);
+ if (res2) {
+ if (res2 == -ENOKEY)
+ res = -ENOKEY;
+ goto out;
+ }
+ } else if (res) {
+ goto out;
+ }
+got_key:
+ ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
+ if (!ctfm || IS_ERR(ctfm)) {
+ res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
+ printk(KERN_DEBUG
+ "%s: error %d (inode %u) allocating crypto tfm\n",
+ __func__, res, (unsigned) inode->i_ino);
+ goto out;
+ }
+ crypt_info->ci_ctfm = ctfm;
+ crypto_skcipher_clear_flags(ctfm, ~0);
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ res = crypto_skcipher_setkey(ctfm, raw_key, keysize);
+ if (res)
+ goto out;
+
+ kzfree(raw_key);
+ raw_key = NULL;
+ if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
+ put_crypt_info(crypt_info);
+ goto retry;
+ }
+ return 0;
+
+out:
+ if (res == -ENOKEY)
+ res = 0;
+ put_crypt_info(crypt_info);
+ kzfree(raw_key);
+ return res;
+}
+
+void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
+{
+ struct fscrypt_info *prev;
+
+ if (ci == NULL)
+ ci = ACCESS_ONCE(inode->i_crypt_info);
+ if (ci == NULL)
+ return;
+
+ prev = cmpxchg(&inode->i_crypt_info, ci, NULL);
+ if (prev != ci)
+ return;
+
+ put_crypt_info(ci);
+}
+EXPORT_SYMBOL(fscrypt_put_encryption_info);
+
+int fscrypt_get_encryption_info(struct inode *inode)
+{
+ struct fscrypt_info *ci = inode->i_crypt_info;
+
+ if (!ci ||
+ (ci->ci_keyring_key &&
+ (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED) |
+ (1 << KEY_FLAG_DEAD)))))
+ return get_crypt_info(inode);
+ return 0;
+}
+EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
new file mode 100644
index 000000000000..6865663aac69
--- /dev/null
+++ b/fs/crypto/policy.c
@@ -0,0 +1,250 @@
+/*
+ * Encryption policy functions for per-file encryption support.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility.
+ *
+ * Written by Michael Halcrow, 2015.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/fscrypto.h>
+#include <linux/mount.h>
+
+static int inode_has_encryption_context(struct inode *inode)
+{
+ if (!inode->i_sb->s_cop->get_context)
+ return 0;
+ return (inode->i_sb->s_cop->get_context(inode, NULL, 0L) > 0);
+}
+
+/*
+ * check whether the policy is consistent with the encryption context
+ * for the inode
+ */
+static int is_encryption_context_consistent_with_policy(struct inode *inode,
+ const struct fscrypt_policy *policy)
+{
+ struct fscrypt_context ctx;
+ int res;
+
+ if (!inode->i_sb->s_cop->get_context)
+ return 0;
+
+ res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+ if (res != sizeof(ctx))
+ return 0;
+
+ return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor,
+ FS_KEY_DESCRIPTOR_SIZE) == 0 &&
+ (ctx.flags == policy->flags) &&
+ (ctx.contents_encryption_mode ==
+ policy->contents_encryption_mode) &&
+ (ctx.filenames_encryption_mode ==
+ policy->filenames_encryption_mode));
+}
+
+static int create_encryption_context_from_policy(struct inode *inode,
+ const struct fscrypt_policy *policy)
+{
+ struct fscrypt_context ctx;
+ int res;
+
+ if (!inode->i_sb->s_cop->set_context)
+ return -EOPNOTSUPP;
+
+ if (inode->i_sb->s_cop->prepare_context) {
+ res = inode->i_sb->s_cop->prepare_context(inode);
+ if (res)
+ return res;
+ }
+
+ ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
+ memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
+ FS_KEY_DESCRIPTOR_SIZE);
+
+ if (!fscrypt_valid_contents_enc_mode(
+ policy->contents_encryption_mode)) {
+ printk(KERN_WARNING
+ "%s: Invalid contents encryption mode %d\n", __func__,
+ policy->contents_encryption_mode);
+ return -EINVAL;
+ }
+
+ if (!fscrypt_valid_filenames_enc_mode(
+ policy->filenames_encryption_mode)) {
+ printk(KERN_WARNING
+ "%s: Invalid filenames encryption mode %d\n", __func__,
+ policy->filenames_encryption_mode);
+ return -EINVAL;
+ }
+
+ if (policy->flags & ~FS_POLICY_FLAGS_VALID)
+ return -EINVAL;
+
+ ctx.contents_encryption_mode = policy->contents_encryption_mode;
+ ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
+ ctx.flags = policy->flags;
+ BUILD_BUG_ON(sizeof(ctx.nonce) != FS_KEY_DERIVATION_NONCE_SIZE);
+ get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
+
+ return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL);
+}
+
+int fscrypt_process_policy(struct file *filp,
+ const struct fscrypt_policy *policy)
+{
+ struct inode *inode = file_inode(filp);
+ int ret;
+
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
+ if (policy->version != 0)
+ return -EINVAL;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
+
+ if (!inode_has_encryption_context(inode)) {
+ if (!S_ISDIR(inode->i_mode))
+ ret = -EINVAL;
+ else if (!inode->i_sb->s_cop->empty_dir)
+ ret = -EOPNOTSUPP;
+ else if (!inode->i_sb->s_cop->empty_dir(inode))
+ ret = -ENOTEMPTY;
+ else
+ ret = create_encryption_context_from_policy(inode,
+ policy);
+ } else if (!is_encryption_context_consistent_with_policy(inode,
+ policy)) {
+ printk(KERN_WARNING
+ "%s: Policy inconsistent with encryption context\n",
+ __func__);
+ ret = -EINVAL;
+ }
+
+ inode_unlock(inode);
+
+ mnt_drop_write_file(filp);
+ return ret;
+}
+EXPORT_SYMBOL(fscrypt_process_policy);
+
+int fscrypt_get_policy(struct inode *inode, struct fscrypt_policy *policy)
+{
+ struct fscrypt_context ctx;
+ int res;
+
+ if (!inode->i_sb->s_cop->get_context ||
+ !inode->i_sb->s_cop->is_encrypted(inode))
+ return -ENODATA;
+
+ res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+ if (res != sizeof(ctx))
+ return -ENODATA;
+ if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
+ return -EINVAL;
+
+ policy->version = 0;
+ policy->contents_encryption_mode = ctx.contents_encryption_mode;
+ policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
+ policy->flags = ctx.flags;
+ memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
+ FS_KEY_DESCRIPTOR_SIZE);
+ return 0;
+}
+EXPORT_SYMBOL(fscrypt_get_policy);
+
+int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
+{
+ struct fscrypt_info *parent_ci, *child_ci;
+ int res;
+
+ if ((parent == NULL) || (child == NULL)) {
+ printk(KERN_ERR "parent %p child %p\n", parent, child);
+ BUG_ON(1);
+ }
+
+ /* no restrictions if the parent directory is not encrypted */
+ if (!parent->i_sb->s_cop->is_encrypted(parent))
+ return 1;
+ /* if the child directory is not encrypted, this is always a problem */
+ if (!parent->i_sb->s_cop->is_encrypted(child))
+ return 0;
+ res = fscrypt_get_encryption_info(parent);
+ if (res)
+ return 0;
+ res = fscrypt_get_encryption_info(child);
+ if (res)
+ return 0;
+ parent_ci = parent->i_crypt_info;
+ child_ci = child->i_crypt_info;
+ if (!parent_ci && !child_ci)
+ return 1;
+ if (!parent_ci || !child_ci)
+ return 0;
+
+ return (memcmp(parent_ci->ci_master_key,
+ child_ci->ci_master_key,
+ FS_KEY_DESCRIPTOR_SIZE) == 0 &&
+ (parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
+ (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) &&
+ (parent_ci->ci_flags == child_ci->ci_flags));
+}
+EXPORT_SYMBOL(fscrypt_has_permitted_context);
+
+/**
+ * fscrypt_inherit_context() - Sets a child context from its parent
+ * @parent: Parent inode from which the context is inherited.
+ * @child: Child inode that inherits the context from @parent.
+ * @fs_data: private data given by FS.
+ * @preload: preload child i_crypt_info
+ *
+ * Return: Zero on success, non-zero otherwise
+ */
+int fscrypt_inherit_context(struct inode *parent, struct inode *child,
+ void *fs_data, bool preload)
+{
+ struct fscrypt_context ctx;
+ struct fscrypt_info *ci;
+ int res;
+
+ if (!parent->i_sb->s_cop->set_context)
+ return -EOPNOTSUPP;
+
+ res = fscrypt_get_encryption_info(parent);
+ if (res < 0)
+ return res;
+
+ ci = parent->i_crypt_info;
+ if (ci == NULL)
+ return -ENOKEY;
+
+ ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
+ if (fscrypt_dummy_context_enabled(parent)) {
+ ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
+ ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
+ ctx.flags = 0;
+ memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE);
+ res = 0;
+ } else {
+ ctx.contents_encryption_mode = ci->ci_data_mode;
+ ctx.filenames_encryption_mode = ci->ci_filename_mode;
+ ctx.flags = ci->ci_flags;
+ memcpy(ctx.master_key_descriptor, ci->ci_master_key,
+ FS_KEY_DESCRIPTOR_SIZE);
+ }
+ get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
+ res = parent->i_sb->s_cop->set_context(child, &ctx,
+ sizeof(ctx), fs_data);
+ if (res)
+ return res;
+ return preload ? fscrypt_get_encryption_info(child): 0;
+}
+EXPORT_SYMBOL(fscrypt_inherit_context);
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 173b3873a4f4..e40c440a4555 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -355,6 +355,10 @@ static int dlm_device_register(struct dlm_ls *ls, char *name)
error = misc_register(&ls->ls_device);
if (error) {
kfree(ls->ls_device.name);
+ /* this has to be set to NULL
+ * to avoid a double-free in dlm_device_deregister
+ */
+ ls->ls_device.name = NULL;
}
fail:
return error;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 3ab9c68b8bce..066df649a6b0 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -519,8 +519,13 @@ static void ep_remove_wait_queue(struct eppoll_entry *pwq)
wait_queue_head_t *whead;
rcu_read_lock();
- /* If it is cleared by POLLFREE, it should be rcu-safe */
- whead = rcu_dereference(pwq->whead);
+ /*
+ * If it is cleared by POLLFREE, it should be rcu-safe.
+ * If we read NULL we need a barrier paired with
+ * smp_store_release() in ep_poll_callback(), otherwise
+ * we rely on whead->lock.
+ */
+ whead = smp_load_acquire(&pwq->whead);
if (whead)
remove_wait_queue(whead, &pwq->wait);
rcu_read_unlock();
@@ -1004,17 +1009,6 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
struct epitem *epi = ep_item_from_wait(wait);
struct eventpoll *ep = epi->ep;
- if ((unsigned long)key & POLLFREE) {
- ep_pwq_from_wait(wait)->whead = NULL;
- /*
- * whead = NULL above can race with ep_remove_wait_queue()
- * which can do another remove_wait_queue() after us, so we
- * can't use __remove_wait_queue(). whead->lock is held by
- * the caller.
- */
- list_del_init(&wait->task_list);
- }
-
spin_lock_irqsave(&ep->lock, flags);
/*
@@ -1079,6 +1073,23 @@ out_unlock:
if (pwake)
ep_poll_safewake(&ep->poll_wait);
+
+ if ((unsigned long)key & POLLFREE) {
+ /*
+ * If we race with ep_remove_wait_queue() it can miss
+ * ->whead = NULL and do another remove_wait_queue() after
+ * us, so we can't use __remove_wait_queue().
+ */
+ list_del_init(&wait->task_list);
+ /*
+ * ->whead != NULL protects us from the race with ep_free()
+ * or ep_remove(), ep_remove_wait_queue() takes whead->lock
+ * held by the caller. Once we nullify it, nothing protects
+ * ep/epi or even wait.
+ */
+ smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
+ }
+
return 1;
}
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index b9f838af5a72..f5099a3386ec 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -389,14 +389,12 @@ int ext4_decrypt(struct page *page)
page->index, page, page, GFP_NOFS);
}
-int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
+int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
+ ext4_fsblk_t pblk, ext4_lblk_t len)
{
struct ext4_crypto_ctx *ctx;
struct page *ciphertext_page = NULL;
struct bio *bio;
- ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
- ext4_fsblk_t pblk = ext4_ext_pblock(ex);
- unsigned int len = ext4_ext_get_actual_len(ex);
int ret, err = 0;
#if 0
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 6c910f127f1f..abc9e169cb44 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2271,7 +2271,8 @@ struct page *ext4_encrypt(struct inode *inode,
struct page *plaintext_page,
gfp_t gfp_flags);
int ext4_decrypt(struct page *page);
-int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex);
+int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
+ ext4_fsblk_t pblk, ext4_lblk_t len);
extern const struct dentry_operations ext4_encrypted_d_ops;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
@@ -2539,6 +2540,8 @@ extern int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim);
+extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk,
+ ext4_fsblk_t pblk, ext4_lblk_t len);
/* indirect.c */
extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 61d5bfc7318c..fc496c646d12 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3127,19 +3127,11 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
{
ext4_fsblk_t ee_pblock;
unsigned int ee_len;
- int ret;
ee_len = ext4_ext_get_actual_len(ex);
ee_pblock = ext4_ext_pblock(ex);
-
- if (ext4_encrypted_inode(inode))
- return ext4_encrypted_zeroout(inode, ex);
-
- ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
- if (ret > 0)
- ret = 0;
-
- return ret;
+ return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
+ ee_len);
}
/*
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 46d30bc18c0d..c1d7b3ab8b43 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -390,6 +390,21 @@ static int __check_block_validity(struct inode *inode, const char *func,
return 0;
}
+int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
+ ext4_lblk_t len)
+{
+ int ret;
+
+ if (ext4_encrypted_inode(inode))
+ return ext4_encrypted_zeroout(inode, lblk, pblk, len);
+
+ ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
#define check_block_validity(inode, map) \
__check_block_validity((inode), __func__, __LINE__, (map))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index bd8831bfbafe..83a72da67df7 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2204,6 +2204,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
unsigned int s_flags = sb->s_flags;
int nr_orphans = 0, nr_truncates = 0;
#ifdef CONFIG_QUOTA
+ int quota_update = 0;
int i;
#endif
if (!es->s_last_orphan) {
@@ -2242,14 +2243,32 @@ static void ext4_orphan_cleanup(struct super_block *sb,
#ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
sb->s_flags |= MS_ACTIVE;
- /* Turn on quotas so that they are updated correctly */
+
+ /*
+ * Turn on quotas which were not enabled for read-only mounts if
+ * filesystem has quota feature, so that they are updated correctly.
+ */
+ if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
+ int ret = ext4_enable_quotas(sb);
+
+ if (!ret)
+ quota_update = 1;
+ else
+ ext4_msg(sb, KERN_ERR,
+ "Cannot turn on quotas: error %d", ret);
+ }
+
+ /* Turn on journaled quotas used for old sytle */
for (i = 0; i < EXT4_MAXQUOTAS; i++) {
if (EXT4_SB(sb)->s_qf_names[i]) {
int ret = ext4_quota_on_mount(sb, i);
- if (ret < 0)
+
+ if (!ret)
+ quota_update = 1;
+ else
ext4_msg(sb, KERN_ERR,
"Cannot turn on journaled "
- "quota: error %d", ret);
+ "quota: type %d: error %d", i, ret);
}
}
#endif
@@ -2308,10 +2327,12 @@ static void ext4_orphan_cleanup(struct super_block *sb,
ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
PLURAL(nr_truncates));
#ifdef CONFIG_QUOTA
- /* Turn quotas off */
- for (i = 0; i < EXT4_MAXQUOTAS; i++) {
- if (sb_dqopt(sb)->files[i])
- dquot_quota_off(sb, i);
+ /* Turn off quotas if they were enabled for orphan cleanup */
+ if (quota_update) {
+ for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+ if (sb_dqopt(sb)->files[i])
+ dquot_quota_off(sb, i);
+ }
}
#endif
sb->s_flags = s_flags; /* Restore MS_RDONLY status */
@@ -5123,6 +5144,9 @@ static int ext4_enable_quotas(struct super_block *sb)
err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
DQUOT_USAGE_ENABLED);
if (err) {
+ for (type--; type >= 0; type--)
+ dquot_quota_off(sb, type);
+
ext4_warning(sb,
"Failed to enable quota tracking "
"(type=%d, err=%d). Please run "
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index b0a9dc929f88..1852d99df97b 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -1,6 +1,9 @@
config F2FS_FS
tristate "F2FS filesystem support"
depends on BLOCK
+ select CRYPTO
+ select KEYS
+ select CRYPTO_CRC32
help
F2FS is based on Log-structured File System (LFS), which supports
versatile "flash-friendly" features. The design has been focused on
@@ -76,15 +79,7 @@ config F2FS_FS_ENCRYPTION
bool "F2FS Encryption"
depends on F2FS_FS
depends on F2FS_FS_XATTR
- select CRYPTO_AES
- select CRYPTO_CBC
- select CRYPTO_ECB
- select CRYPTO_XTS
- select CRYPTO_CTS
- select CRYPTO_CTR
- select CRYPTO_SHA256
- select KEYS
- select ENCRYPTED_KEYS
+ select FS_ENCRYPTION
help
Enable encryption of f2fs files and directories. This
feature is similar to ecryptfs, but it is more memory
@@ -100,3 +95,11 @@ config F2FS_IO_TRACE
information and block IO patterns in the filesystem level.
If unsure, say N.
+
+config F2FS_FAULT_INJECTION
+ bool "F2FS fault injection facility"
+ depends on F2FS_FS
+ help
+ Test F2FS to inject faults such as ENOMEM, ENOSPC, and so on.
+
+ If unsure, say N.
diff --git a/fs/f2fs/Makefile b/fs/f2fs/Makefile
index 08e101ed914c..ca949ea7c02f 100644
--- a/fs/f2fs/Makefile
+++ b/fs/f2fs/Makefile
@@ -7,5 +7,3 @@ f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
f2fs-$(CONFIG_F2FS_IO_TRACE) += trace.o
-f2fs-$(CONFIG_F2FS_FS_ENCRYPTION) += crypto_policy.o crypto.o \
- crypto_key.o crypto_fname.o
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 83dcf7bfd7b8..a45d1f4b7b0f 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -109,14 +109,16 @@ fail:
return ERR_PTR(-EINVAL);
}
-static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size)
+static void *f2fs_acl_to_disk(struct f2fs_sb_info *sbi,
+ const struct posix_acl *acl, size_t *size)
{
struct f2fs_acl_header *f2fs_acl;
struct f2fs_acl_entry *entry;
int i;
- f2fs_acl = kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count *
- sizeof(struct f2fs_acl_entry), GFP_NOFS);
+ f2fs_acl = f2fs_kmalloc(sbi, sizeof(struct f2fs_acl_header) +
+ acl->a_count * sizeof(struct f2fs_acl_entry),
+ GFP_NOFS);
if (!f2fs_acl)
return ERR_PTR(-ENOMEM);
@@ -175,7 +177,7 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type,
retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dpage);
if (retval > 0) {
- value = kmalloc(retval, GFP_F2FS_ZERO);
+ value = f2fs_kmalloc(F2FS_I_SB(inode), retval, GFP_F2FS_ZERO);
if (!value)
return ERR_PTR(-ENOMEM);
retval = f2fs_getxattr(inode, name_index, "", value,
@@ -204,7 +206,6 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
static int __f2fs_set_acl(struct inode *inode, int type,
struct posix_acl *acl, struct page *ipage)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
int name_index;
void *value = NULL;
size_t size = 0;
@@ -213,11 +214,11 @@ static int __f2fs_set_acl(struct inode *inode, int type,
switch (type) {
case ACL_TYPE_ACCESS:
name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
- if (acl && !ipage) {
+ if (acl) {
error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
if (error)
return error;
- set_acl_inode(fi, inode->i_mode);
+ set_acl_inode(inode, inode->i_mode);
}
break;
@@ -232,9 +233,9 @@ static int __f2fs_set_acl(struct inode *inode, int type,
}
if (acl) {
- value = f2fs_acl_to_disk(acl, &size);
+ value = f2fs_acl_to_disk(F2FS_I_SB(inode), acl, &size);
if (IS_ERR(value)) {
- clear_inode_flag(fi, FI_ACL_MODE);
+ clear_inode_flag(inode, FI_ACL_MODE);
return (int)PTR_ERR(value);
}
}
@@ -245,7 +246,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
if (!error)
set_cached_acl(inode, type, acl);
- clear_inode_flag(fi, FI_ACL_MODE);
+ clear_inode_flag(inode, FI_ACL_MODE);
return error;
}
@@ -386,6 +387,8 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
if (error)
return error;
+ f2fs_mark_inode_dirty_sync(inode, true);
+
if (default_acl) {
error = __f2fs_set_acl(inode, ACL_TYPE_DEFAULT, default_acl,
ipage);
diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h
index 997ca8edb6cb..2c685185c24d 100644
--- a/fs/f2fs/acl.h
+++ b/fs/f2fs/acl.h
@@ -37,11 +37,10 @@ struct f2fs_acl_header {
#ifdef CONFIG_F2FS_FS_POSIX_ACL
extern struct posix_acl *f2fs_get_acl(struct inode *, int);
-extern int f2fs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+extern int f2fs_set_acl(struct inode *, struct posix_acl *, int);
extern int f2fs_init_acl(struct inode *, struct inode *, struct page *,
struct page *);
#else
-#define f2fs_check_acl NULL
#define f2fs_get_acl NULL
#define f2fs_set_acl NULL
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index f661d80474be..640f28576e88 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -26,6 +26,14 @@
static struct kmem_cache *ino_entry_slab;
struct kmem_cache *inode_entry_slab;
+void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
+{
+ set_ckpt_flags(sbi, CP_ERROR_FLAG);
+ sbi->sb->s_flags |= MS_RDONLY;
+ if (!end_io)
+ f2fs_flush_merged_bios(sbi);
+}
+
/*
* We guarantee no failure on the returned page.
*/
@@ -34,13 +42,14 @@ struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
struct address_space *mapping = META_MAPPING(sbi);
struct page *page = NULL;
repeat:
- page = grab_cache_page(mapping, index);
+ page = f2fs_grab_cache_page(mapping, index, false);
if (!page) {
cond_resched();
goto repeat;
}
- f2fs_wait_on_page_writeback(page, META);
- SetPageUptodate(page);
+ f2fs_wait_on_page_writeback(page, META, true);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
return page;
}
@@ -56,14 +65,15 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
.sbi = sbi,
.type = META,
.rw = READ_SYNC | REQ_META | REQ_PRIO,
- .blk_addr = index,
+ .old_blkaddr = index,
+ .new_blkaddr = index,
.encrypted_page = NULL,
};
if (unlikely(!is_meta))
fio.rw &= ~REQ_META;
repeat:
- page = grab_cache_page(mapping, index);
+ page = f2fs_grab_cache_page(mapping, index, false);
if (!page) {
cond_resched();
goto repeat;
@@ -90,7 +100,7 @@ repeat:
* meta page.
*/
if (unlikely(!PageUptodate(page)))
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
out:
return page;
}
@@ -143,7 +153,6 @@ bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
int type, bool sync)
{
- block_t prev_blk_addr = 0;
struct page *page;
block_t blkno = start;
struct f2fs_io_info fio = {
@@ -152,10 +161,12 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
.rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA,
.encrypted_page = NULL,
};
+ struct blk_plug plug;
if (unlikely(type == META_POR))
fio.rw &= ~REQ_META;
+ blk_start_plug(&plug);
for (; nrpages-- > 0; blkno++) {
if (!is_valid_blkaddr(sbi, blkno, type))
@@ -167,27 +178,25 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
blkno = 0;
/* get nat block addr */
- fio.blk_addr = current_nat_addr(sbi,
+ fio.new_blkaddr = current_nat_addr(sbi,
blkno * NAT_ENTRY_PER_BLOCK);
break;
case META_SIT:
/* get sit block addr */
- fio.blk_addr = current_sit_addr(sbi,
+ fio.new_blkaddr = current_sit_addr(sbi,
blkno * SIT_ENTRY_PER_BLOCK);
- if (blkno != start && prev_blk_addr + 1 != fio.blk_addr)
- goto out;
- prev_blk_addr = fio.blk_addr;
break;
case META_SSA:
case META_CP:
case META_POR:
- fio.blk_addr = blkno;
+ fio.new_blkaddr = blkno;
break;
default:
BUG();
}
- page = grab_cache_page(META_MAPPING(sbi), fio.blk_addr);
+ page = f2fs_grab_cache_page(META_MAPPING(sbi),
+ fio.new_blkaddr, false);
if (!page)
continue;
if (PageUptodate(page)) {
@@ -196,11 +205,13 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
}
fio.page = page;
+ fio.old_blkaddr = fio.new_blkaddr;
f2fs_submit_page_mbio(&fio);
f2fs_put_page(page, 0);
}
out:
f2fs_submit_merged_bio(sbi, META, READ);
+ blk_finish_plug(&plug);
return blkno - start;
}
@@ -210,12 +221,12 @@ void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
bool readahead = false;
page = find_get_page(META_MAPPING(sbi), index);
- if (!page || (page && !PageUptodate(page)))
+ if (!page || !PageUptodate(page))
readahead = true;
f2fs_put_page(page, 0);
if (readahead)
- ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR, true);
+ ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
}
static int f2fs_write_meta_page(struct page *page,
@@ -232,13 +243,17 @@ static int f2fs_write_meta_page(struct page *page,
if (unlikely(f2fs_cp_error(sbi)))
goto redirty_out;
- f2fs_wait_on_page_writeback(page, META);
write_meta_page(sbi, page);
dec_page_count(sbi, F2FS_DIRTY_META);
- unlock_page(page);
if (wbc->for_reclaim)
+ f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, META, WRITE);
+
+ unlock_page(page);
+
+ if (unlikely(f2fs_cp_error(sbi)))
f2fs_submit_merged_bio(sbi, META, WRITE);
+
return 0;
redirty_out:
@@ -252,13 +267,13 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
long diff, written;
- trace_f2fs_writepages(mapping->host, wbc, META);
-
/* collect a number of dirty meta pages and write together */
if (wbc->for_kupdate ||
get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
goto skip_write;
+ trace_f2fs_writepages(mapping->host, wbc, META);
+
/* if mounting is failed, skip writing node pages */
mutex_lock(&sbi->cp_mutex);
diff = nr_pages_to_write(sbi, META, wbc);
@@ -269,6 +284,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
skip_write:
wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
+ trace_f2fs_writepages(mapping->host, wbc, META);
return 0;
}
@@ -276,15 +292,18 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
long nr_to_write)
{
struct address_space *mapping = META_MAPPING(sbi);
- pgoff_t index = 0, end = LONG_MAX, prev = LONG_MAX;
+ pgoff_t index = 0, end = ULONG_MAX, prev = ULONG_MAX;
struct pagevec pvec;
long nwritten = 0;
struct writeback_control wbc = {
.for_reclaim = 0,
};
+ struct blk_plug plug;
pagevec_init(&pvec, 0);
+ blk_start_plug(&plug);
+
while (index <= end) {
int i, nr_pages;
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
@@ -296,7 +315,7 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- if (prev == LONG_MAX)
+ if (prev == ULONG_MAX)
prev = page->index - 1;
if (nr_to_write != LONG_MAX && page->index != prev + 1) {
pagevec_release(&pvec);
@@ -315,6 +334,9 @@ continue_unlock:
goto continue_unlock;
}
+ f2fs_wait_on_page_writeback(page, META, true);
+
+ BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
@@ -334,6 +356,8 @@ stop:
if (nwritten)
f2fs_submit_merged_bio(sbi, type, WRITE);
+ blk_finish_plug(&plug);
+
return nwritten;
}
@@ -341,9 +365,10 @@ static int f2fs_set_meta_page_dirty(struct page *page)
{
trace_f2fs_set_page_dirty(page, META);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
+ f2fs_set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
SetPagePrivate(page);
f2fs_trace_pid(page);
@@ -358,6 +383,9 @@ const struct address_space_operations f2fs_meta_aops = {
.set_page_dirty = f2fs_set_meta_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page,
+#ifdef CONFIG_MIGRATION
+ .migratepage = f2fs_migrate_page,
+#endif
};
static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
@@ -410,13 +438,13 @@ static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
spin_unlock(&im->ino_lock);
}
-void add_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
+void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
{
/* add new dirty ino entry into list */
__add_ino_entry(sbi, ino, type);
}
-void remove_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
+void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
{
/* remove dirty ino entry from list */
__remove_ino_entry(sbi, ino, type);
@@ -434,12 +462,12 @@ bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
return e ? true : false;
}
-void release_dirty_inode(struct f2fs_sb_info *sbi)
+void release_ino_entry(struct f2fs_sb_info *sbi, bool all)
{
struct ino_entry *e, *tmp;
int i;
- for (i = APPEND_INO; i <= UPDATE_INO; i++) {
+ for (i = all ? ORPHAN_INO: APPEND_INO; i <= UPDATE_INO; i++) {
struct inode_management *im = &sbi->im[i];
spin_lock(&im->ino_lock);
@@ -459,6 +487,13 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi)
int err = 0;
spin_lock(&im->ino_lock);
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_ORPHAN)) {
+ spin_unlock(&im->ino_lock);
+ return -ENOSPC;
+ }
+#endif
if (unlikely(im->ino_num >= sbi->max_orphans))
err = -ENOSPC;
else
@@ -478,10 +513,11 @@ void release_orphan_inode(struct f2fs_sb_info *sbi)
spin_unlock(&im->ino_lock);
}
-void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
+void add_orphan_inode(struct inode *inode)
{
/* add new orphan ino entry into list */
- __add_ino_entry(sbi, ino, ORPHAN_INO);
+ __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, ORPHAN_INO);
+ update_inode_page(inode);
}
void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
@@ -493,8 +529,20 @@ void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
{
struct inode *inode;
+ struct node_info ni;
+ int err = acquire_orphan_inode(sbi);
+
+ if (err) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: orphan failed (ino=%x), run fsck to fix.",
+ __func__, ino);
+ return err;
+ }
- inode = f2fs_iget(sbi->sb, ino);
+ __add_ino_entry(sbi, ino, ORPHAN_INO);
+
+ inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode)) {
/*
* there should be a bug that we can't find the entry
@@ -508,6 +556,18 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
/* truncate all the data during iput */
iput(inode);
+
+ get_node_info(sbi, ino, &ni);
+
+ /* ENOMEM was fully retried in f2fs_evict_inode. */
+ if (ni.blk_addr != NULL_ADDR) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: orphan failed (ino=%x), run fsck to fix.",
+ __func__, ino);
+ return -EIO;
+ }
+ __remove_ino_entry(sbi, ino, ORPHAN_INO);
return 0;
}
@@ -516,7 +576,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
block_t start_blk, orphan_blocks, i, j;
int err;
- if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
+ if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
return 0;
start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
@@ -540,7 +600,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
f2fs_put_page(page, 1);
}
/* clear Orphan Flag */
- clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
+ clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
return 0;
}
@@ -601,45 +661,55 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
}
}
-static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
- block_t cp_addr, unsigned long long *version)
+static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
+ struct f2fs_checkpoint **cp_block, struct page **cp_page,
+ unsigned long long *version)
{
- struct page *cp_page_1, *cp_page_2 = NULL;
unsigned long blk_size = sbi->blocksize;
- struct f2fs_checkpoint *cp_block;
- unsigned long long cur_version = 0, pre_version = 0;
- size_t crc_offset;
+ size_t crc_offset = 0;
__u32 crc = 0;
- /* Read the 1st cp block in this CP pack */
- cp_page_1 = get_meta_page(sbi, cp_addr);
+ *cp_page = get_meta_page(sbi, cp_addr);
+ *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
- /* get the version number */
- cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
- crc_offset = le32_to_cpu(cp_block->checksum_offset);
- if (crc_offset >= blk_size)
- goto invalid_cp1;
+ crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
+ if (crc_offset >= blk_size) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "invalid crc_offset: %zu", crc_offset);
+ return -EINVAL;
+ }
- crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
- if (!f2fs_crc_valid(crc, cp_block, crc_offset))
- goto invalid_cp1;
+ crc = le32_to_cpu(*((__le32 *)((unsigned char *)*cp_block
+ + crc_offset)));
+ if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
+ f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
+ return -EINVAL;
+ }
- pre_version = cur_cp_version(cp_block);
+ *version = cur_cp_version(*cp_block);
+ return 0;
+}
- /* Read the 2nd cp block in this CP pack */
- cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
- cp_page_2 = get_meta_page(sbi, cp_addr);
+static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
+ block_t cp_addr, unsigned long long *version)
+{
+ struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
+ struct f2fs_checkpoint *cp_block = NULL;
+ unsigned long long cur_version = 0, pre_version = 0;
+ int err;
- cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
- crc_offset = le32_to_cpu(cp_block->checksum_offset);
- if (crc_offset >= blk_size)
- goto invalid_cp2;
+ err = get_checkpoint_version(sbi, cp_addr, &cp_block,
+ &cp_page_1, version);
+ if (err)
+ goto invalid_cp1;
+ pre_version = *version;
- crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
- if (!f2fs_crc_valid(crc, cp_block, crc_offset))
+ cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
+ err = get_checkpoint_version(sbi, cp_addr, &cp_block,
+ &cp_page_2, version);
+ if (err)
goto invalid_cp2;
-
- cur_version = cur_cp_version(cp_block);
+ cur_version = *version;
if (cur_version == pre_version) {
*version = cur_version;
@@ -696,6 +766,15 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
memcpy(sbi->ckpt, cp_block, blk_size);
+ /* Sanity checking of checkpoint */
+ if (sanity_check_ckpt(sbi))
+ goto free_fail_no_cp;
+
+ if (cur_page == cp1)
+ sbi->cur_cp_pack = 1;
+ else
+ sbi->cur_cp_pack = 2;
+
if (cp_blks <= 1)
goto done;
@@ -717,123 +796,102 @@ done:
f2fs_put_page(cp2, 1);
return 0;
+free_fail_no_cp:
+ f2fs_put_page(cp1, 1);
+ f2fs_put_page(cp2, 1);
fail_no_cp:
kfree(sbi->ckpt);
return -EINVAL;
}
-static int __add_dirty_inode(struct inode *inode, struct inode_entry *new)
+static void __add_dirty_inode(struct inode *inode, enum inode_type type)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
- if (is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR))
- return -EEXIST;
+ if (is_inode_flag_set(inode, flag))
+ return;
- set_inode_flag(F2FS_I(inode), FI_DIRTY_DIR);
- F2FS_I(inode)->dirty_dir = new;
- list_add_tail(&new->list, &sbi->dir_inode_list);
- stat_inc_dirty_dir(sbi);
- return 0;
+ set_inode_flag(inode, flag);
+ list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
+ stat_inc_dirty_inode(sbi, type);
+}
+
+static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
+{
+ int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
+
+ if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
+ return;
+
+ list_del_init(&F2FS_I(inode)->dirty_list);
+ clear_inode_flag(inode, flag);
+ stat_dec_dirty_inode(F2FS_I_SB(inode), type);
}
void update_dirty_page(struct inode *inode, struct page *page)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct inode_entry *new;
- int ret = 0;
+ enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
!S_ISLNK(inode->i_mode))
return;
- if (!S_ISDIR(inode->i_mode)) {
- inode_inc_dirty_pages(inode);
- goto out;
- }
-
- new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
- new->inode = inode;
- INIT_LIST_HEAD(&new->list);
-
- spin_lock(&sbi->dir_inode_lock);
- ret = __add_dirty_inode(inode, new);
+ spin_lock(&sbi->inode_lock[type]);
+ if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
+ __add_dirty_inode(inode, type);
inode_inc_dirty_pages(inode);
- spin_unlock(&sbi->dir_inode_lock);
+ spin_unlock(&sbi->inode_lock[type]);
- if (ret)
- kmem_cache_free(inode_entry_slab, new);
-out:
SetPagePrivate(page);
f2fs_trace_pid(page);
}
-void add_dirty_dir_inode(struct inode *inode)
+void remove_dirty_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct inode_entry *new =
- f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
- int ret = 0;
+ enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
- new->inode = inode;
- INIT_LIST_HEAD(&new->list);
-
- spin_lock(&sbi->dir_inode_lock);
- ret = __add_dirty_inode(inode, new);
- spin_unlock(&sbi->dir_inode_lock);
-
- if (ret)
- kmem_cache_free(inode_entry_slab, new);
-}
-
-void remove_dirty_dir_inode(struct inode *inode)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct inode_entry *entry;
-
- if (!S_ISDIR(inode->i_mode))
+ if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
+ !S_ISLNK(inode->i_mode))
return;
- spin_lock(&sbi->dir_inode_lock);
- if (get_dirty_pages(inode) ||
- !is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR)) {
- spin_unlock(&sbi->dir_inode_lock);
+ if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
return;
- }
- entry = F2FS_I(inode)->dirty_dir;
- list_del(&entry->list);
- F2FS_I(inode)->dirty_dir = NULL;
- clear_inode_flag(F2FS_I(inode), FI_DIRTY_DIR);
- stat_dec_dirty_dir(sbi);
- spin_unlock(&sbi->dir_inode_lock);
- kmem_cache_free(inode_entry_slab, entry);
-
- /* Only from the recovery routine */
- if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) {
- clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT);
- iput(inode);
- }
+ spin_lock(&sbi->inode_lock[type]);
+ __remove_dirty_inode(inode, type);
+ spin_unlock(&sbi->inode_lock[type]);
}
-void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
+int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
{
struct list_head *head;
- struct inode_entry *entry;
struct inode *inode;
+ struct f2fs_inode_info *fi;
+ bool is_dir = (type == DIR_INODE);
+
+ trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
+ get_pages(sbi, is_dir ?
+ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
retry:
if (unlikely(f2fs_cp_error(sbi)))
- return;
+ return -EIO;
- spin_lock(&sbi->dir_inode_lock);
+ spin_lock(&sbi->inode_lock[type]);
- head = &sbi->dir_inode_list;
+ head = &sbi->inode_list[type];
if (list_empty(head)) {
- spin_unlock(&sbi->dir_inode_lock);
- return;
+ spin_unlock(&sbi->inode_lock[type]);
+ trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
+ get_pages(sbi, is_dir ?
+ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
+ return 0;
}
- entry = list_entry(head->next, struct inode_entry, list);
- inode = igrab(entry->inode);
- spin_unlock(&sbi->dir_inode_lock);
+ fi = list_entry(head->next, struct f2fs_inode_info, dirty_list);
+ inode = igrab(&fi->vfs_inode);
+ spin_unlock(&sbi->inode_lock[type]);
if (inode) {
filemap_fdatawrite(inode->i_mapping);
iput(inode);
@@ -848,6 +906,38 @@ retry:
goto retry;
}
+int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
+{
+ struct list_head *head = &sbi->inode_list[DIRTY_META];
+ struct inode *inode;
+ struct f2fs_inode_info *fi;
+ s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
+
+ while (total--) {
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ if (list_empty(head)) {
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return 0;
+ }
+ fi = list_entry(head->next, struct f2fs_inode_info,
+ gdirty_list);
+ inode = igrab(&fi->vfs_inode);
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ if (inode) {
+ sync_inode_metadata(inode, 0);
+
+ /* it's on eviction */
+ if (is_inode_flag_set(inode, FI_DIRTY_INODE))
+ update_inode_page(inode);
+ iput(inode);
+ }
+ };
+ return 0;
+}
+
/*
* Freeze all the FS-operations for checkpoint.
*/
@@ -868,11 +958,17 @@ retry_flush_dents:
/* write all the dirty dentry pages */
if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
f2fs_unlock_all(sbi);
- sync_dirty_dir_inodes(sbi);
- if (unlikely(f2fs_cp_error(sbi))) {
- err = -EIO;
+ err = sync_dirty_inodes(sbi, DIR_INODE);
+ if (err)
+ goto out;
+ goto retry_flush_dents;
+ }
+
+ if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
+ f2fs_unlock_all(sbi);
+ err = f2fs_sync_inode_meta(sbi);
+ if (err)
goto out;
- }
goto retry_flush_dents;
}
@@ -885,10 +981,9 @@ retry_flush_nodes:
if (get_pages(sbi, F2FS_DIRTY_NODES)) {
up_write(&sbi->node_write);
- sync_node_pages(sbi, 0, &wbc);
- if (unlikely(f2fs_cp_error(sbi))) {
+ err = sync_node_pages(sbi, &wbc);
+ if (err) {
f2fs_unlock_all(sbi);
- err = -EIO;
goto out;
}
goto retry_flush_nodes;
@@ -901,6 +996,8 @@ out:
static void unblock_operations(struct f2fs_sb_info *sbi)
{
up_write(&sbi->node_write);
+
+ build_free_nids(sbi, false);
f2fs_unlock_all(sbi);
}
@@ -911,18 +1008,48 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
for (;;) {
prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
- if (!get_pages(sbi, F2FS_WRITEBACK))
+ if (!get_pages(sbi, F2FS_WB_CP_DATA))
break;
- io_schedule();
+ io_schedule_timeout(5*HZ);
}
finish_wait(&sbi->cp_wait, &wait);
}
-static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+{
+ unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+
+ spin_lock(&sbi->cp_lock);
+
+ if (cpc->reason == CP_UMOUNT)
+ __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+
+ if (cpc->reason == CP_FASTBOOT)
+ __set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
+
+ if (orphan_num)
+ __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
+
+ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
+ __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
+
+ /* set this flag to activate crc|cp_ver for recovery */
+ __set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
+
+ spin_unlock(&sbi->cp_lock);
+}
+
+static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
- struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
nid_t last_nid = nm_i->next_scan_nid;
@@ -931,21 +1058,15 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
__u32 crc32 = 0;
int i;
int cp_payload_blks = __cp_payload(sbi);
- block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
- bool invalidate = false;
-
- /*
- * This avoids to conduct wrong roll-forward operations and uses
- * metapages, so should be called prior to sync_meta_pages below.
- */
- if (discard_next_dnode(sbi, discard_blk))
- invalidate = true;
+ struct super_block *sb = sbi->sb;
+ struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
+ u64 kbytes_written;
/* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META)) {
sync_meta_pages(sbi, META, LONG_MAX);
if (unlikely(f2fs_cp_error(sbi)))
- return;
+ return -EIO;
}
next_free_nid(sbi, &last_nid);
@@ -980,10 +1101,12 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* 2 cp + n data seg summary + orphan inode blocks */
data_sum_blocks = npages_for_summary_flush(sbi, false);
+ spin_lock(&sbi->cp_lock);
if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
- set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+ __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
else
- clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+ __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+ spin_unlock(&sbi->cp_lock);
orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
@@ -998,39 +1121,24 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
cp_payload_blks + data_sum_blocks +
orphan_blocks);
- if (cpc->reason == CP_UMOUNT)
- set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
- else
- clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
-
- if (cpc->reason == CP_FASTBOOT)
- set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
- else
- clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
-
- if (orphan_num)
- set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
- else
- clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
-
- if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
- set_ckpt_flags(ckpt, CP_FSCK_FLAG);
+ /* update ckpt flag for checkpoint */
+ update_ckpt_flags(sbi, cpc);
/* update SIT/NAT bitmap */
get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
- crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
+ crc32 = f2fs_crc32(sbi, ckpt, le32_to_cpu(ckpt->checksum_offset));
*((__le32 *)((unsigned char *)ckpt +
le32_to_cpu(ckpt->checksum_offset)))
= cpu_to_le32(crc32);
- start_blk = __start_cp_addr(sbi);
+ start_blk = __start_cp_next_addr(sbi);
/* need to wait for end_io results */
wait_on_all_pages_writeback(sbi);
if (unlikely(f2fs_cp_error(sbi)))
- return;
+ return -EIO;
/* write out checkpoint buffer at block 0 */
update_meta_page(sbi, ckpt, start_blk++);
@@ -1046,6 +1154,14 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
write_data_summaries(sbi, start_blk);
start_blk += data_sum_blocks;
+
+ /* Record write statistics in the hot node summary */
+ kbytes_written = sbi->kbytes_written;
+ if (sb->s_bdev->bd_part)
+ kbytes_written += BD_PART_WRITTEN(sbi);
+
+ seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
+
if (__remain_node_summaries(cpc->reason)) {
write_node_summaries(sbi, start_blk);
start_blk += NR_CURSEG_NODE_TYPE;
@@ -1058,14 +1174,14 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
wait_on_all_pages_writeback(sbi);
if (unlikely(f2fs_cp_error(sbi)))
- return;
+ return -EIO;
- filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX);
- filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX);
+ filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LLONG_MAX);
+ filemap_fdatawait_range(META_MAPPING(sbi), 0, LLONG_MAX);
/* update user_block_counts */
sbi->last_valid_block_count = sbi->total_valid_block_count;
- sbi->alloc_valid_block_count = 0;
+ percpu_counter_set(&sbi->alloc_valid_block_count, 0);
/* Here, we only have one bio having CP pack */
sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
@@ -1073,30 +1189,36 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* wait for previous submitted meta pages writeback */
wait_on_all_pages_writeback(sbi);
- /*
- * invalidate meta page which is used temporarily for zeroing out
- * block at the end of warm node chain.
- */
- if (invalidate)
- invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
- discard_blk);
-
- release_dirty_inode(sbi);
+ release_ino_entry(sbi, false);
if (unlikely(f2fs_cp_error(sbi)))
- return;
+ return -EIO;
- clear_prefree_segments(sbi, cpc);
clear_sbi_flag(sbi, SBI_IS_DIRTY);
+ clear_sbi_flag(sbi, SBI_NEED_CP);
+ __set_cp_next_pack(sbi);
+
+ /*
+ * redirty superblock if metadata like node page or inode cache is
+ * updated during writing checkpoint.
+ */
+ if (get_pages(sbi, F2FS_DIRTY_NODES) ||
+ get_pages(sbi, F2FS_DIRTY_IMETA))
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
+
+ f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
+
+ return 0;
}
/*
* We guarantee that this checkpoint procedure will not fail.
*/
-void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
unsigned long long ckpt_ver;
+ int err = 0;
mutex_lock(&sbi->cp_mutex);
@@ -1104,21 +1226,35 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
(cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC ||
(cpc->reason == CP_DISCARD && !sbi->discard_blks)))
goto out;
- if (unlikely(f2fs_cp_error(sbi)))
+ if (unlikely(f2fs_cp_error(sbi))) {
+ err = -EIO;
goto out;
- if (f2fs_readonly(sbi->sb))
+ }
+ if (f2fs_readonly(sbi->sb)) {
+ err = -EROFS;
goto out;
+ }
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
- if (block_operations(sbi))
+ err = block_operations(sbi);
+ if (err)
goto out;
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
- f2fs_submit_merged_bio(sbi, NODE, WRITE);
- f2fs_submit_merged_bio(sbi, META, WRITE);
+ f2fs_flush_merged_bios(sbi);
+
+ /* this is the case of multiple fstrims without any changes */
+ if (cpc->reason == CP_DISCARD && !is_sbi_flag_set(sbi, SBI_IS_DIRTY)) {
+ f2fs_bug_on(sbi, NM_I(sbi)->dirty_nat_cnt);
+ f2fs_bug_on(sbi, SIT_I(sbi)->dirty_sentries);
+ f2fs_bug_on(sbi, prefree_segments(sbi));
+ flush_sit_entries(sbi, cpc);
+ clear_prefree_segments(sbi, cpc);
+ unblock_operations(sbi);
+ goto out;
+ }
/*
* update checkpoint pack index
@@ -1133,7 +1269,11 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
flush_sit_entries(sbi, cpc);
/* unlock all the fs_lock[] in do_checkpoint() */
- do_checkpoint(sbi, cpc);
+ err = do_checkpoint(sbi, cpc);
+ if (err)
+ release_discard_addrs(sbi);
+ else
+ clear_prefree_segments(sbi, cpc);
unblock_operations(sbi);
stat_inc_cp_count(sbi->stat_info);
@@ -1143,10 +1283,11 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
"checkpoint: version = %llx", ckpt_ver);
/* do checkpoint periodically */
- sbi->cp_expires = round_jiffies_up(jiffies + HZ * sbi->cp_interval);
+ f2fs_update_time(sbi, CP_TIME);
+ trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
out:
mutex_unlock(&sbi->cp_mutex);
- trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
+ return err;
}
void init_ino_entry_info(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/crypto.c b/fs/f2fs/crypto.c
deleted file mode 100644
index 4a62ef14e932..000000000000
--- a/fs/f2fs/crypto.c
+++ /dev/null
@@ -1,491 +0,0 @@
-/*
- * linux/fs/f2fs/crypto.c
- *
- * Copied from linux/fs/ext4/crypto.c
- *
- * Copyright (C) 2015, Google, Inc.
- * Copyright (C) 2015, Motorola Mobility
- *
- * This contains encryption functions for f2fs
- *
- * Written by Michael Halcrow, 2014.
- *
- * Filename encryption additions
- * Uday Savagaonkar, 2014
- * Encryption policy handling additions
- * Ildar Muslukhov, 2014
- * Remove ext4_encrypted_zeroout(),
- * add f2fs_restore_and_release_control_page()
- * Jaegeuk Kim, 2015.
- *
- * This has not yet undergone a rigorous security audit.
- *
- * The usage of AES-XTS should conform to recommendations in NIST
- * Special Publication 800-38E and IEEE P1619/D16.
- */
-#include <crypto/hash.h>
-#include <crypto/sha.h>
-#include <keys/user-type.h>
-#include <keys/encrypted-type.h>
-#include <linux/crypto.h>
-#include <linux/ecryptfs.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/key.h>
-#include <linux/list.h>
-#include <linux/mempool.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/random.h>
-#include <linux/scatterlist.h>
-#include <linux/spinlock_types.h>
-#include <linux/f2fs_fs.h>
-#include <linux/ratelimit.h>
-#include <linux/bio.h>
-
-#include "f2fs.h"
-#include "xattr.h"
-
-/* Encryption added and removed here! (L: */
-
-static unsigned int num_prealloc_crypto_pages = 32;
-static unsigned int num_prealloc_crypto_ctxs = 128;
-
-module_param(num_prealloc_crypto_pages, uint, 0444);
-MODULE_PARM_DESC(num_prealloc_crypto_pages,
- "Number of crypto pages to preallocate");
-module_param(num_prealloc_crypto_ctxs, uint, 0444);
-MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
- "Number of crypto contexts to preallocate");
-
-static mempool_t *f2fs_bounce_page_pool;
-
-static LIST_HEAD(f2fs_free_crypto_ctxs);
-static DEFINE_SPINLOCK(f2fs_crypto_ctx_lock);
-
-static struct workqueue_struct *f2fs_read_workqueue;
-static DEFINE_MUTEX(crypto_init);
-
-static struct kmem_cache *f2fs_crypto_ctx_cachep;
-struct kmem_cache *f2fs_crypt_info_cachep;
-
-/**
- * f2fs_release_crypto_ctx() - Releases an encryption context
- * @ctx: The encryption context to release.
- *
- * If the encryption context was allocated from the pre-allocated pool, returns
- * it to that pool. Else, frees it.
- *
- * If there's a bounce page in the context, this frees that.
- */
-void f2fs_release_crypto_ctx(struct f2fs_crypto_ctx *ctx)
-{
- unsigned long flags;
-
- if (ctx->flags & F2FS_WRITE_PATH_FL && ctx->w.bounce_page) {
- mempool_free(ctx->w.bounce_page, f2fs_bounce_page_pool);
- ctx->w.bounce_page = NULL;
- }
- ctx->w.control_page = NULL;
- if (ctx->flags & F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
- kmem_cache_free(f2fs_crypto_ctx_cachep, ctx);
- } else {
- spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags);
- list_add(&ctx->free_list, &f2fs_free_crypto_ctxs);
- spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags);
- }
-}
-
-/**
- * f2fs_get_crypto_ctx() - Gets an encryption context
- * @inode: The inode for which we are doing the crypto
- *
- * Allocates and initializes an encryption context.
- *
- * Return: An allocated and initialized encryption context on success; error
- * value or NULL otherwise.
- */
-struct f2fs_crypto_ctx *f2fs_get_crypto_ctx(struct inode *inode)
-{
- struct f2fs_crypto_ctx *ctx = NULL;
- unsigned long flags;
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
-
- if (ci == NULL)
- return ERR_PTR(-ENOKEY);
-
- /*
- * We first try getting the ctx from a free list because in
- * the common case the ctx will have an allocated and
- * initialized crypto tfm, so it's probably a worthwhile
- * optimization. For the bounce page, we first try getting it
- * from the kernel allocator because that's just about as fast
- * as getting it from a list and because a cache of free pages
- * should generally be a "last resort" option for a filesystem
- * to be able to do its job.
- */
- spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags);
- ctx = list_first_entry_or_null(&f2fs_free_crypto_ctxs,
- struct f2fs_crypto_ctx, free_list);
- if (ctx)
- list_del(&ctx->free_list);
- spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags);
- if (!ctx) {
- ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_NOFS);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
- ctx->flags |= F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- } else {
- ctx->flags &= ~F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- }
- ctx->flags &= ~F2FS_WRITE_PATH_FL;
- return ctx;
-}
-
-/*
- * Call f2fs_decrypt on every single page, reusing the encryption
- * context.
- */
-static void completion_pages(struct work_struct *work)
-{
- struct f2fs_crypto_ctx *ctx =
- container_of(work, struct f2fs_crypto_ctx, r.work);
- struct bio *bio = ctx->r.bio;
- struct bio_vec *bv;
- int i;
-
- bio_for_each_segment_all(bv, bio, i) {
- struct page *page = bv->bv_page;
- int ret = f2fs_decrypt(ctx, page);
-
- if (ret) {
- WARN_ON_ONCE(1);
- SetPageError(page);
- } else
- SetPageUptodate(page);
- unlock_page(page);
- }
- f2fs_release_crypto_ctx(ctx);
- bio_put(bio);
-}
-
-void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *ctx, struct bio *bio)
-{
- INIT_WORK(&ctx->r.work, completion_pages);
- ctx->r.bio = bio;
- queue_work(f2fs_read_workqueue, &ctx->r.work);
-}
-
-static void f2fs_crypto_destroy(void)
-{
- struct f2fs_crypto_ctx *pos, *n;
-
- list_for_each_entry_safe(pos, n, &f2fs_free_crypto_ctxs, free_list)
- kmem_cache_free(f2fs_crypto_ctx_cachep, pos);
- INIT_LIST_HEAD(&f2fs_free_crypto_ctxs);
- if (f2fs_bounce_page_pool)
- mempool_destroy(f2fs_bounce_page_pool);
- f2fs_bounce_page_pool = NULL;
-}
-
-/**
- * f2fs_crypto_initialize() - Set up for f2fs encryption.
- *
- * We only call this when we start accessing encrypted files, since it
- * results in memory getting allocated that wouldn't otherwise be used.
- *
- * Return: Zero on success, non-zero otherwise.
- */
-int f2fs_crypto_initialize(void)
-{
- int i, res = -ENOMEM;
-
- if (f2fs_bounce_page_pool)
- return 0;
-
- mutex_lock(&crypto_init);
- if (f2fs_bounce_page_pool)
- goto already_initialized;
-
- for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
- struct f2fs_crypto_ctx *ctx;
-
- ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_KERNEL);
- if (!ctx)
- goto fail;
- list_add(&ctx->free_list, &f2fs_free_crypto_ctxs);
- }
-
- /* must be allocated at the last step to avoid race condition above */
- f2fs_bounce_page_pool =
- mempool_create_page_pool(num_prealloc_crypto_pages, 0);
- if (!f2fs_bounce_page_pool)
- goto fail;
-
-already_initialized:
- mutex_unlock(&crypto_init);
- return 0;
-fail:
- f2fs_crypto_destroy();
- mutex_unlock(&crypto_init);
- return res;
-}
-
-/**
- * f2fs_exit_crypto() - Shutdown the f2fs encryption system
- */
-void f2fs_exit_crypto(void)
-{
- f2fs_crypto_destroy();
-
- if (f2fs_read_workqueue)
- destroy_workqueue(f2fs_read_workqueue);
- if (f2fs_crypto_ctx_cachep)
- kmem_cache_destroy(f2fs_crypto_ctx_cachep);
- if (f2fs_crypt_info_cachep)
- kmem_cache_destroy(f2fs_crypt_info_cachep);
-}
-
-int __init f2fs_init_crypto(void)
-{
- int res = -ENOMEM;
-
- f2fs_read_workqueue = alloc_workqueue("f2fs_crypto", WQ_HIGHPRI, 0);
- if (!f2fs_read_workqueue)
- goto fail;
-
- f2fs_crypto_ctx_cachep = KMEM_CACHE(f2fs_crypto_ctx,
- SLAB_RECLAIM_ACCOUNT);
- if (!f2fs_crypto_ctx_cachep)
- goto fail;
-
- f2fs_crypt_info_cachep = KMEM_CACHE(f2fs_crypt_info,
- SLAB_RECLAIM_ACCOUNT);
- if (!f2fs_crypt_info_cachep)
- goto fail;
-
- return 0;
-fail:
- f2fs_exit_crypto();
- return res;
-}
-
-void f2fs_restore_and_release_control_page(struct page **page)
-{
- struct f2fs_crypto_ctx *ctx;
- struct page *bounce_page;
-
- /* The bounce data pages are unmapped. */
- if ((*page)->mapping)
- return;
-
- /* The bounce data page is unmapped. */
- bounce_page = *page;
- ctx = (struct f2fs_crypto_ctx *)page_private(bounce_page);
-
- /* restore control page */
- *page = ctx->w.control_page;
-
- f2fs_restore_control_page(bounce_page);
-}
-
-void f2fs_restore_control_page(struct page *data_page)
-{
- struct f2fs_crypto_ctx *ctx =
- (struct f2fs_crypto_ctx *)page_private(data_page);
-
- set_page_private(data_page, (unsigned long)NULL);
- ClearPagePrivate(data_page);
- unlock_page(data_page);
- f2fs_release_crypto_ctx(ctx);
-}
-
-/**
- * f2fs_crypt_complete() - The completion callback for page encryption
- * @req: The asynchronous encryption request context
- * @res: The result of the encryption operation
- */
-static void f2fs_crypt_complete(struct crypto_async_request *req, int res)
-{
- struct f2fs_completion_result *ecr = req->data;
-
- if (res == -EINPROGRESS)
- return;
- ecr->res = res;
- complete(&ecr->completion);
-}
-
-typedef enum {
- F2FS_DECRYPT = 0,
- F2FS_ENCRYPT,
-} f2fs_direction_t;
-
-static int f2fs_page_crypto(struct f2fs_crypto_ctx *ctx,
- struct inode *inode,
- f2fs_direction_t rw,
- pgoff_t index,
- struct page *src_page,
- struct page *dest_page)
-{
- u8 xts_tweak[F2FS_XTS_TWEAK_SIZE];
- struct ablkcipher_request *req = NULL;
- DECLARE_F2FS_COMPLETION_RESULT(ecr);
- struct scatterlist dst, src;
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
- struct crypto_ablkcipher *tfm = ci->ci_ctfm;
- int res = 0;
-
- req = ablkcipher_request_alloc(tfm, GFP_NOFS);
- if (!req) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_request_alloc() failed\n",
- __func__);
- return -ENOMEM;
- }
- ablkcipher_request_set_callback(
- req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- f2fs_crypt_complete, &ecr);
-
- BUILD_BUG_ON(F2FS_XTS_TWEAK_SIZE < sizeof(index));
- memcpy(xts_tweak, &index, sizeof(index));
- memset(&xts_tweak[sizeof(index)], 0,
- F2FS_XTS_TWEAK_SIZE - sizeof(index));
-
- sg_init_table(&dst, 1);
- sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
- sg_init_table(&src, 1);
- sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
- ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
- xts_tweak);
- if (rw == F2FS_DECRYPT)
- res = crypto_ablkcipher_decrypt(req);
- else
- res = crypto_ablkcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- BUG_ON(req->base.data != &ecr);
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
- ablkcipher_request_free(req);
- if (res) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_ablkcipher_encrypt() returned %d\n",
- __func__, res);
- return res;
- }
- return 0;
-}
-
-static struct page *alloc_bounce_page(struct f2fs_crypto_ctx *ctx)
-{
- ctx->w.bounce_page = mempool_alloc(f2fs_bounce_page_pool, GFP_NOWAIT);
- if (ctx->w.bounce_page == NULL)
- return ERR_PTR(-ENOMEM);
- ctx->flags |= F2FS_WRITE_PATH_FL;
- return ctx->w.bounce_page;
-}
-
-/**
- * f2fs_encrypt() - Encrypts a page
- * @inode: The inode for which the encryption should take place
- * @plaintext_page: The page to encrypt. Must be locked.
- *
- * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
- * encryption context.
- *
- * Called on the page write path. The caller must call
- * f2fs_restore_control_page() on the returned ciphertext page to
- * release the bounce buffer and the encryption context.
- *
- * Return: An allocated page with the encrypted content on success. Else, an
- * error value or NULL.
- */
-struct page *f2fs_encrypt(struct inode *inode,
- struct page *plaintext_page)
-{
- struct f2fs_crypto_ctx *ctx;
- struct page *ciphertext_page = NULL;
- int err;
-
- BUG_ON(!PageLocked(plaintext_page));
-
- ctx = f2fs_get_crypto_ctx(inode);
- if (IS_ERR(ctx))
- return (struct page *)ctx;
-
- /* The encryption operation will require a bounce page. */
- ciphertext_page = alloc_bounce_page(ctx);
- if (IS_ERR(ciphertext_page))
- goto err_out;
-
- ctx->w.control_page = plaintext_page;
- err = f2fs_page_crypto(ctx, inode, F2FS_ENCRYPT, plaintext_page->index,
- plaintext_page, ciphertext_page);
- if (err) {
- ciphertext_page = ERR_PTR(err);
- goto err_out;
- }
-
- SetPagePrivate(ciphertext_page);
- set_page_private(ciphertext_page, (unsigned long)ctx);
- lock_page(ciphertext_page);
- return ciphertext_page;
-
-err_out:
- f2fs_release_crypto_ctx(ctx);
- return ciphertext_page;
-}
-
-/**
- * f2fs_decrypt() - Decrypts a page in-place
- * @ctx: The encryption context.
- * @page: The page to decrypt. Must be locked.
- *
- * Decrypts page in-place using the ctx encryption context.
- *
- * Called from the read completion callback.
- *
- * Return: Zero on success, non-zero otherwise.
- */
-int f2fs_decrypt(struct f2fs_crypto_ctx *ctx, struct page *page)
-{
- BUG_ON(!PageLocked(page));
-
- return f2fs_page_crypto(ctx, page->mapping->host,
- F2FS_DECRYPT, page->index, page, page);
-}
-
-/*
- * Convenience function which takes care of allocating and
- * deallocating the encryption context
- */
-int f2fs_decrypt_one(struct inode *inode, struct page *page)
-{
- struct f2fs_crypto_ctx *ctx = f2fs_get_crypto_ctx(inode);
- int ret;
-
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
- ret = f2fs_decrypt(ctx, page);
- f2fs_release_crypto_ctx(ctx);
- return ret;
-}
-
-bool f2fs_valid_contents_enc_mode(uint32_t mode)
-{
- return (mode == F2FS_ENCRYPTION_MODE_AES_256_XTS);
-}
-
-/**
- * f2fs_validate_encryption_key_size() - Validate the encryption key size
- * @mode: The key mode.
- * @size: The key size to validate.
- *
- * Return: The validated key size for @mode. Zero if invalid.
- */
-uint32_t f2fs_validate_encryption_key_size(uint32_t mode, uint32_t size)
-{
- if (size == f2fs_encryption_key_size(mode))
- return size;
- return 0;
-}
diff --git a/fs/f2fs/crypto_fname.c b/fs/f2fs/crypto_fname.c
deleted file mode 100644
index 38349ed5ea51..000000000000
--- a/fs/f2fs/crypto_fname.c
+++ /dev/null
@@ -1,440 +0,0 @@
-/*
- * linux/fs/f2fs/crypto_fname.c
- *
- * Copied from linux/fs/ext4/crypto.c
- *
- * Copyright (C) 2015, Google, Inc.
- * Copyright (C) 2015, Motorola Mobility
- *
- * This contains functions for filename crypto management in f2fs
- *
- * Written by Uday Savagaonkar, 2014.
- *
- * Adjust f2fs dentry structure
- * Jaegeuk Kim, 2015.
- *
- * This has not yet undergone a rigorous security audit.
- */
-#include <crypto/hash.h>
-#include <crypto/sha.h>
-#include <keys/encrypted-type.h>
-#include <keys/user-type.h>
-#include <linux/crypto.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/key.h>
-#include <linux/list.h>
-#include <linux/mempool.h>
-#include <linux/random.h>
-#include <linux/scatterlist.h>
-#include <linux/spinlock_types.h>
-#include <linux/f2fs_fs.h>
-#include <linux/ratelimit.h>
-
-#include "f2fs.h"
-#include "f2fs_crypto.h"
-#include "xattr.h"
-
-/**
- * f2fs_dir_crypt_complete() -
- */
-static void f2fs_dir_crypt_complete(struct crypto_async_request *req, int res)
-{
- struct f2fs_completion_result *ecr = req->data;
-
- if (res == -EINPROGRESS)
- return;
- ecr->res = res;
- complete(&ecr->completion);
-}
-
-bool f2fs_valid_filenames_enc_mode(uint32_t mode)
-{
- return (mode == F2FS_ENCRYPTION_MODE_AES_256_CTS);
-}
-
-static unsigned max_name_len(struct inode *inode)
-{
- return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
- F2FS_NAME_LEN;
-}
-
-/**
- * f2fs_fname_encrypt() -
- *
- * This function encrypts the input filename, and returns the length of the
- * ciphertext. Errors are returned as negative numbers. We trust the caller to
- * allocate sufficient memory to oname string.
- */
-static int f2fs_fname_encrypt(struct inode *inode,
- const struct qstr *iname, struct f2fs_str *oname)
-{
- u32 ciphertext_len;
- struct ablkcipher_request *req = NULL;
- DECLARE_F2FS_COMPLETION_RESULT(ecr);
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
- struct crypto_ablkcipher *tfm = ci->ci_ctfm;
- int res = 0;
- char iv[F2FS_CRYPTO_BLOCK_SIZE];
- struct scatterlist src_sg, dst_sg;
- int padding = 4 << (ci->ci_flags & F2FS_POLICY_FLAGS_PAD_MASK);
- char *workbuf, buf[32], *alloc_buf = NULL;
- unsigned lim = max_name_len(inode);
-
- if (iname->len <= 0 || iname->len > lim)
- return -EIO;
-
- ciphertext_len = (iname->len < F2FS_CRYPTO_BLOCK_SIZE) ?
- F2FS_CRYPTO_BLOCK_SIZE : iname->len;
- ciphertext_len = f2fs_fname_crypto_round_up(ciphertext_len, padding);
- ciphertext_len = (ciphertext_len > lim) ? lim : ciphertext_len;
-
- if (ciphertext_len <= sizeof(buf)) {
- workbuf = buf;
- } else {
- alloc_buf = kmalloc(ciphertext_len, GFP_NOFS);
- if (!alloc_buf)
- return -ENOMEM;
- workbuf = alloc_buf;
- }
-
- /* Allocate request */
- req = ablkcipher_request_alloc(tfm, GFP_NOFS);
- if (!req) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_request_alloc() failed\n", __func__);
- kfree(alloc_buf);
- return -ENOMEM;
- }
- ablkcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- f2fs_dir_crypt_complete, &ecr);
-
- /* Copy the input */
- memcpy(workbuf, iname->name, iname->len);
- if (iname->len < ciphertext_len)
- memset(workbuf + iname->len, 0, ciphertext_len - iname->len);
-
- /* Initialize IV */
- memset(iv, 0, F2FS_CRYPTO_BLOCK_SIZE);
-
- /* Create encryption request */
- sg_init_one(&src_sg, workbuf, ciphertext_len);
- sg_init_one(&dst_sg, oname->name, ciphertext_len);
- ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
- res = crypto_ablkcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- BUG_ON(req->base.data != &ecr);
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
- kfree(alloc_buf);
- ablkcipher_request_free(req);
- if (res < 0) {
- printk_ratelimited(KERN_ERR
- "%s: Error (error code %d)\n", __func__, res);
- }
- oname->len = ciphertext_len;
- return res;
-}
-
-/*
- * f2fs_fname_decrypt()
- * This function decrypts the input filename, and returns
- * the length of the plaintext.
- * Errors are returned as negative numbers.
- * We trust the caller to allocate sufficient memory to oname string.
- */
-static int f2fs_fname_decrypt(struct inode *inode,
- const struct f2fs_str *iname, struct f2fs_str *oname)
-{
- struct ablkcipher_request *req = NULL;
- DECLARE_F2FS_COMPLETION_RESULT(ecr);
- struct scatterlist src_sg, dst_sg;
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
- struct crypto_ablkcipher *tfm = ci->ci_ctfm;
- int res = 0;
- char iv[F2FS_CRYPTO_BLOCK_SIZE];
- unsigned lim = max_name_len(inode);
-
- if (iname->len <= 0 || iname->len > lim)
- return -EIO;
-
- /* Allocate request */
- req = ablkcipher_request_alloc(tfm, GFP_NOFS);
- if (!req) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_request_alloc() failed\n", __func__);
- return -ENOMEM;
- }
- ablkcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- f2fs_dir_crypt_complete, &ecr);
-
- /* Initialize IV */
- memset(iv, 0, F2FS_CRYPTO_BLOCK_SIZE);
-
- /* Create decryption request */
- sg_init_one(&src_sg, iname->name, iname->len);
- sg_init_one(&dst_sg, oname->name, oname->len);
- ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
- res = crypto_ablkcipher_decrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- BUG_ON(req->base.data != &ecr);
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
- ablkcipher_request_free(req);
- if (res < 0) {
- printk_ratelimited(KERN_ERR
- "%s: Error in f2fs_fname_decrypt (error code %d)\n",
- __func__, res);
- return res;
- }
-
- oname->len = strnlen(oname->name, iname->len);
- return oname->len;
-}
-
-static const char *lookup_table =
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
-
-/**
- * f2fs_fname_encode_digest() -
- *
- * Encodes the input digest using characters from the set [a-zA-Z0-9_+].
- * The encoded string is roughly 4/3 times the size of the input string.
- */
-static int digest_encode(const char *src, int len, char *dst)
-{
- int i = 0, bits = 0, ac = 0;
- char *cp = dst;
-
- while (i < len) {
- ac += (((unsigned char) src[i]) << bits);
- bits += 8;
- do {
- *cp++ = lookup_table[ac & 0x3f];
- ac >>= 6;
- bits -= 6;
- } while (bits >= 6);
- i++;
- }
- if (bits)
- *cp++ = lookup_table[ac & 0x3f];
- return cp - dst;
-}
-
-static int digest_decode(const char *src, int len, char *dst)
-{
- int i = 0, bits = 0, ac = 0;
- const char *p;
- char *cp = dst;
-
- while (i < len) {
- p = strchr(lookup_table, src[i]);
- if (p == NULL || src[i] == 0)
- return -2;
- ac += (p - lookup_table) << bits;
- bits += 6;
- if (bits >= 8) {
- *cp++ = ac & 0xff;
- ac >>= 8;
- bits -= 8;
- }
- i++;
- }
- if (ac)
- return -1;
- return cp - dst;
-}
-
-/**
- * f2fs_fname_crypto_round_up() -
- *
- * Return: The next multiple of block size
- */
-u32 f2fs_fname_crypto_round_up(u32 size, u32 blksize)
-{
- return ((size + blksize - 1) / blksize) * blksize;
-}
-
-/**
- * f2fs_fname_crypto_alloc_obuff() -
- *
- * Allocates an output buffer that is sufficient for the crypto operation
- * specified by the context and the direction.
- */
-int f2fs_fname_crypto_alloc_buffer(struct inode *inode,
- u32 ilen, struct f2fs_str *crypto_str)
-{
- unsigned int olen;
- int padding = 16;
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
-
- if (ci)
- padding = 4 << (ci->ci_flags & F2FS_POLICY_FLAGS_PAD_MASK);
- if (padding < F2FS_CRYPTO_BLOCK_SIZE)
- padding = F2FS_CRYPTO_BLOCK_SIZE;
- olen = f2fs_fname_crypto_round_up(ilen, padding);
- crypto_str->len = olen;
- if (olen < F2FS_FNAME_CRYPTO_DIGEST_SIZE * 2)
- olen = F2FS_FNAME_CRYPTO_DIGEST_SIZE * 2;
- /* Allocated buffer can hold one more character to null-terminate the
- * string */
- crypto_str->name = kmalloc(olen + 1, GFP_NOFS);
- if (!(crypto_str->name))
- return -ENOMEM;
- return 0;
-}
-
-/**
- * f2fs_fname_crypto_free_buffer() -
- *
- * Frees the buffer allocated for crypto operation.
- */
-void f2fs_fname_crypto_free_buffer(struct f2fs_str *crypto_str)
-{
- if (!crypto_str)
- return;
- kfree(crypto_str->name);
- crypto_str->name = NULL;
-}
-
-/**
- * f2fs_fname_disk_to_usr() - converts a filename from disk space to user space
- */
-int f2fs_fname_disk_to_usr(struct inode *inode,
- f2fs_hash_t *hash,
- const struct f2fs_str *iname,
- struct f2fs_str *oname)
-{
- const struct qstr qname = FSTR_TO_QSTR(iname);
- char buf[24];
- int ret;
-
- if (is_dot_dotdot(&qname)) {
- oname->name[0] = '.';
- oname->name[iname->len - 1] = '.';
- oname->len = iname->len;
- return oname->len;
- }
-
- if (F2FS_I(inode)->i_crypt_info)
- return f2fs_fname_decrypt(inode, iname, oname);
-
- if (iname->len <= F2FS_FNAME_CRYPTO_DIGEST_SIZE) {
- ret = digest_encode(iname->name, iname->len, oname->name);
- oname->len = ret;
- return ret;
- }
- if (hash) {
- memcpy(buf, hash, 4);
- memset(buf + 4, 0, 4);
- } else
- memset(buf, 0, 8);
- memcpy(buf + 8, iname->name + ((iname->len - 17) & ~15), 16);
- oname->name[0] = '_';
- ret = digest_encode(buf, 24, oname->name + 1);
- oname->len = ret + 1;
- return ret + 1;
-}
-
-/**
- * f2fs_fname_usr_to_disk() - converts a filename from user space to disk space
- */
-int f2fs_fname_usr_to_disk(struct inode *inode,
- const struct qstr *iname,
- struct f2fs_str *oname)
-{
- int res;
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
-
- if (is_dot_dotdot(iname)) {
- oname->name[0] = '.';
- oname->name[iname->len - 1] = '.';
- oname->len = iname->len;
- return oname->len;
- }
-
- if (ci) {
- res = f2fs_fname_encrypt(inode, iname, oname);
- return res;
- }
- /* Without a proper key, a user is not allowed to modify the filenames
- * in a directory. Consequently, a user space name cannot be mapped to
- * a disk-space name */
- return -EACCES;
-}
-
-int f2fs_fname_setup_filename(struct inode *dir, const struct qstr *iname,
- int lookup, struct f2fs_filename *fname)
-{
- struct f2fs_crypt_info *ci;
- int ret = 0, bigname = 0;
-
- memset(fname, 0, sizeof(struct f2fs_filename));
- fname->usr_fname = iname;
-
- if (!f2fs_encrypted_inode(dir) || is_dot_dotdot(iname)) {
- fname->disk_name.name = (unsigned char *)iname->name;
- fname->disk_name.len = iname->len;
- return 0;
- }
- ret = f2fs_get_encryption_info(dir);
- if (ret)
- return ret;
- ci = F2FS_I(dir)->i_crypt_info;
- if (ci) {
- ret = f2fs_fname_crypto_alloc_buffer(dir, iname->len,
- &fname->crypto_buf);
- if (ret < 0)
- return ret;
- ret = f2fs_fname_encrypt(dir, iname, &fname->crypto_buf);
- if (ret < 0)
- goto errout;
- fname->disk_name.name = fname->crypto_buf.name;
- fname->disk_name.len = fname->crypto_buf.len;
- return 0;
- }
- if (!lookup)
- return -EACCES;
-
- /* We don't have the key and we are doing a lookup; decode the
- * user-supplied name
- */
- if (iname->name[0] == '_')
- bigname = 1;
- if ((bigname && (iname->len != 33)) ||
- (!bigname && (iname->len > 43)))
- return -ENOENT;
-
- fname->crypto_buf.name = kmalloc(32, GFP_KERNEL);
- if (fname->crypto_buf.name == NULL)
- return -ENOMEM;
- ret = digest_decode(iname->name + bigname, iname->len - bigname,
- fname->crypto_buf.name);
- if (ret < 0) {
- ret = -ENOENT;
- goto errout;
- }
- fname->crypto_buf.len = ret;
- if (bigname) {
- memcpy(&fname->hash, fname->crypto_buf.name, 4);
- } else {
- fname->disk_name.name = fname->crypto_buf.name;
- fname->disk_name.len = fname->crypto_buf.len;
- }
- return 0;
-errout:
- f2fs_fname_crypto_free_buffer(&fname->crypto_buf);
- return ret;
-}
-
-void f2fs_fname_free_filename(struct f2fs_filename *fname)
-{
- kfree(fname->crypto_buf.name);
- fname->crypto_buf.name = NULL;
- fname->usr_fname = NULL;
- fname->disk_name.name = NULL;
-}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 4fb5709256fd..283fc9de4762 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -19,6 +19,8 @@
#include <linux/bio.h>
#include <linux/prefetch.h>
#include <linux/uio.h>
+#include <linux/mm.h>
+#include <linux/memcontrol.h>
#include <linux/cleancache.h>
#include "f2fs.h"
@@ -28,16 +30,41 @@
#include <trace/events/f2fs.h>
#include <trace/events/android_fs.h>
+static bool __is_cp_guaranteed(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct inode *inode;
+ struct f2fs_sb_info *sbi;
+
+ if (!mapping)
+ return false;
+
+ inode = mapping->host;
+ sbi = F2FS_I_SB(inode);
+
+ if (inode->i_ino == F2FS_META_INO(sbi) ||
+ inode->i_ino == F2FS_NODE_INO(sbi) ||
+ S_ISDIR(inode->i_mode) ||
+ is_cold_data(page))
+ return true;
+ return false;
+}
+
static void f2fs_read_end_io(struct bio *bio)
{
struct bio_vec *bvec;
int i;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO))
+ bio->bi_error = -EIO;
+#endif
+
if (f2fs_bio_encrypted(bio)) {
if (bio->bi_error) {
- f2fs_release_crypto_ctx(bio->bi_private);
+ fscrypt_release_ctx(bio->bi_private);
} else {
- f2fs_end_io_crypto_work(bio->bi_private, bio);
+ fscrypt_decrypt_bio_pages(bio->bi_private, bio);
return;
}
}
@@ -46,7 +73,8 @@ static void f2fs_read_end_io(struct bio *bio)
struct page *page = bvec->bv_page;
if (!bio->bi_error) {
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
} else {
ClearPageUptodate(page);
SetPageError(page);
@@ -64,26 +92,66 @@ static void f2fs_write_end_io(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
+ enum count_type type = WB_DATA_TYPE(page);
- f2fs_restore_and_release_control_page(&page);
+ fscrypt_pullback_bio_page(&page, true);
if (unlikely(bio->bi_error)) {
- set_page_dirty(page);
set_bit(AS_EIO, &page->mapping->flags);
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, true);
}
+ dec_page_count(sbi, type);
+ clear_cold_data(page);
end_page_writeback(page);
- dec_page_count(sbi, F2FS_WRITEBACK);
}
-
- if (!get_pages(sbi, F2FS_WRITEBACK) &&
- !list_empty(&sbi->cp_wait.task_list))
+ if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
+ wq_has_sleeper(&sbi->cp_wait))
wake_up(&sbi->cp_wait);
bio_put(bio);
}
/*
+ * Return true, if pre_bio's bdev is same as its target device.
+ */
+struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
+ block_t blk_addr, struct bio *bio)
+{
+ struct block_device *bdev = sbi->sb->s_bdev;
+ int i;
+
+ for (i = 0; i < sbi->s_ndevs; i++) {
+ if (FDEV(i).start_blk <= blk_addr &&
+ FDEV(i).end_blk >= blk_addr) {
+ blk_addr -= FDEV(i).start_blk;
+ bdev = FDEV(i).bdev;
+ break;
+ }
+ }
+ if (bio) {
+ bio->bi_bdev = bdev;
+ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
+ }
+ return bdev;
+}
+
+int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
+{
+ int i;
+
+ for (i = 0; i < sbi->s_ndevs; i++)
+ if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
+ return i;
+ return 0;
+}
+
+static bool __same_bdev(struct f2fs_sb_info *sbi,
+ block_t blk_addr, struct bio *bio)
+{
+ return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev;
+}
+
+/*
* Low-level block read/write IO operations.
*/
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
@@ -93,14 +161,24 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
bio = f2fs_bio_alloc(npages);
- bio->bi_bdev = sbi->sb->s_bdev;
- bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
+ f2fs_target_device(sbi, blk_addr, bio);
bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
bio->bi_private = is_read ? NULL : sbi;
return bio;
}
+static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw,
+ struct bio *bio, enum page_type type)
+{
+ if (!is_read_io(rw)) {
+ if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
+ current->plug && (type == DATA || type == NODE))
+ blk_finish_plug(current->plug);
+ }
+ submit_bio(rw, bio);
+}
+
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
struct f2fs_io_info *fio = &io->fio;
@@ -113,12 +191,58 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
else
trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
- submit_bio(fio->rw, io->bio);
+ __submit_bio(io->sbi, fio->rw, io->bio, fio->type);
io->bio = NULL;
}
-void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
- enum page_type type, int rw)
+static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
+ struct page *page, nid_t ino)
+{
+ struct bio_vec *bvec;
+ struct page *target;
+ int i;
+
+ if (!io->bio)
+ return false;
+
+ if (!inode && !page && !ino)
+ return true;
+
+ bio_for_each_segment_all(bvec, io->bio, i) {
+
+ if (bvec->bv_page->mapping)
+ target = bvec->bv_page;
+ else
+ target = fscrypt_control_page(bvec->bv_page);
+
+ if (inode && inode == target->mapping->host)
+ return true;
+ if (page && page == target)
+ return true;
+ if (ino && ino == ino_of_node(target))
+ return true;
+ }
+
+ return false;
+}
+
+static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
+ struct page *page, nid_t ino,
+ enum page_type type)
+{
+ enum page_type btype = PAGE_TYPE_OF_BIO(type);
+ struct f2fs_bio_info *io = &sbi->write_io[btype];
+ bool ret;
+
+ down_read(&io->io_rwsem);
+ ret = __has_merged_page(io, inode, page, ino);
+ up_read(&io->io_rwsem);
+ return ret;
+}
+
+static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
+ struct inode *inode, struct page *page,
+ nid_t ino, enum page_type type, int rw)
{
enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io;
@@ -127,6 +251,9 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
down_write(&io->io_rwsem);
+ if (!__has_merged_page(io, inode, page, ino))
+ goto out;
+
/* change META to META_FLUSH in the checkpoint procedure */
if (type >= META_FLUSH) {
io->fio.type = META_FLUSH;
@@ -136,9 +263,31 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
}
__submit_merged_bio(io);
+out:
up_write(&io->io_rwsem);
}
+void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
+ int rw)
+{
+ __f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
+}
+
+void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
+ struct inode *inode, struct page *page,
+ nid_t ino, enum page_type type, int rw)
+{
+ if (has_merged_page(sbi, inode, page, ino, type))
+ __f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
+}
+
+void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
+{
+ f2fs_submit_merged_bio(sbi, DATA, WRITE);
+ f2fs_submit_merged_bio(sbi, NODE, WRITE);
+ f2fs_submit_merged_bio(sbi, META, WRITE);
+}
+
/*
* Fill the locked page with data located in the block address.
* Return unlocked page.
@@ -146,20 +295,21 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio;
- struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+ struct page *page = fio->encrypted_page ?
+ fio->encrypted_page : fio->page;
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);
/* Allocate a new bio */
- bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw));
+ bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
- if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
}
- submit_bio(fio->rw, bio);
+ __submit_bio(fio->sbi, fio->rw, bio, fio->type);
return 0;
}
@@ -173,39 +323,51 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
io = is_read ? &sbi->read_io : &sbi->write_io[btype];
- verify_block_addr(sbi, fio->blk_addr);
+ if (fio->old_blkaddr != NEW_ADDR)
+ verify_block_addr(sbi, fio->old_blkaddr);
+ verify_block_addr(sbi, fio->new_blkaddr);
- down_write(&io->io_rwsem);
+ bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
if (!is_read)
- inc_page_count(sbi, F2FS_WRITEBACK);
+ inc_page_count(sbi, WB_DATA_TYPE(bio_page));
- if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
- io->fio.rw != fio->rw))
+ down_write(&io->io_rwsem);
+
+ if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
+ (io->fio.rw != fio->rw) ||
+ !__same_bdev(sbi, fio->new_blkaddr, io->bio)))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
- int bio_blocks = MAX_BIO_BLOCKS(sbi);
-
- io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
+ io->bio = __bio_alloc(sbi, fio->new_blkaddr,
+ BIO_MAX_PAGES, is_read);
io->fio = *fio;
}
- bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
-
- if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
- PAGE_CACHE_SIZE) {
+ if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
+ PAGE_SIZE) {
__submit_merged_bio(io);
goto alloc_new;
}
- io->last_block_in_bio = fio->blk_addr;
+ io->last_block_in_bio = fio->new_blkaddr;
f2fs_trace_ios(fio, 0);
up_write(&io->io_rwsem);
trace_f2fs_submit_page_mbio(fio->page, fio);
}
+static void __set_data_blkaddr(struct dnode_of_data *dn)
+{
+ struct f2fs_node *rn = F2FS_NODE(dn->node_page);
+ __le32 *addr_array;
+
+ /* Get physical address of data block */
+ addr_array = blkaddr_in_node(rn);
+ addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
+}
+
/*
* Lock ordering for the change of data block address:
* ->data_page
@@ -214,39 +376,63 @@ alloc_new:
*/
void set_data_blkaddr(struct dnode_of_data *dn)
{
- struct f2fs_node *rn;
- __le32 *addr_array;
- struct page *node_page = dn->node_page;
- unsigned int ofs_in_node = dn->ofs_in_node;
-
- f2fs_wait_on_page_writeback(node_page, NODE);
-
- rn = F2FS_NODE(node_page);
+ f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+ __set_data_blkaddr(dn);
+ if (set_page_dirty(dn->node_page))
+ dn->node_changed = true;
+}
- /* Get physical address of data block */
- addr_array = blkaddr_in_node(rn);
- addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
- set_page_dirty(node_page);
+void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
+{
+ dn->data_blkaddr = blkaddr;
+ set_data_blkaddr(dn);
+ f2fs_update_extent_cache(dn);
}
-int reserve_new_block(struct dnode_of_data *dn)
+/* dn->ofs_in_node will be returned with up-to-date last block pointer */
+int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
- if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
+ if (!count)
+ return 0;
+
+ if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
- if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
+ if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
return -ENOSPC;
- trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
+ trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
+ dn->ofs_in_node, count);
- dn->data_blkaddr = NEW_ADDR;
- set_data_blkaddr(dn);
- mark_inode_dirty(dn->inode);
- sync_inode_page(dn);
+ f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+
+ for (; count > 0; dn->ofs_in_node++) {
+ block_t blkaddr =
+ datablock_addr(dn->node_page, dn->ofs_in_node);
+ if (blkaddr == NULL_ADDR) {
+ dn->data_blkaddr = NEW_ADDR;
+ __set_data_blkaddr(dn);
+ count--;
+ }
+ }
+
+ if (set_page_dirty(dn->node_page))
+ dn->node_changed = true;
return 0;
}
+/* Should keep dn->ofs_in_node unchanged */
+int reserve_new_block(struct dnode_of_data *dn)
+{
+ unsigned int ofs_in_node = dn->ofs_in_node;
+ int ret;
+
+ ret = reserve_new_blocks(dn, 1);
+ dn->ofs_in_node = ofs_in_node;
+ return ret;
+}
+
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
bool need_put = dn->inode_page ? false : true;
@@ -326,13 +512,14 @@ got_it:
* see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
*/
if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
- SetPageUptodate(page);
+ zero_user_segment(page, 0, PAGE_SIZE);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
unlock_page(page);
return page;
}
- fio.blk_addr = dn.data_blkaddr;
+ fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
fio.page = page;
err = f2fs_submit_page_bio(&fio);
if (err)
@@ -386,14 +573,14 @@ repeat:
/* wait for read completion */
lock_page(page);
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 1);
- return ERR_PTR(-EIO);
- }
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
+ if (unlikely(!PageUptodate(page))) {
+ f2fs_put_page(page, 1);
+ return ERR_PTR(-EIO);
+ }
return page;
}
@@ -413,7 +600,7 @@ struct page *get_new_data_page(struct inode *inode,
struct page *page;
struct dnode_of_data dn;
int err;
-repeat:
+
page = f2fs_grab_cache_page(mapping, index, true);
if (!page) {
/*
@@ -437,125 +624,99 @@ repeat:
goto got_it;
if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
- SetPageUptodate(page);
+ zero_user_segment(page, 0, PAGE_SIZE);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
} else {
f2fs_put_page(page, 1);
- page = get_read_data_page(inode, index, READ_SYNC, true);
+ /* if ipage exists, blkaddr should be NEW_ADDR */
+ f2fs_bug_on(F2FS_I_SB(inode), ipage);
+ page = get_lock_data_page(inode, index, true);
if (IS_ERR(page))
- goto repeat;
-
- /* wait for read completion */
- lock_page(page);
+ return page;
}
got_it:
if (new_i_size && i_size_read(inode) <
- ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
- i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
- /* Only the directory inode sets new_i_size */
- set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
- }
+ ((loff_t)(index + 1) << PAGE_SHIFT))
+ f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
return page;
}
static int __allocate_data_block(struct dnode_of_data *dn)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
- struct f2fs_inode_info *fi = F2FS_I(dn->inode);
struct f2fs_summary sum;
struct node_info ni;
- int seg = CURSEG_WARM_DATA;
pgoff_t fofs;
+ blkcnt_t count = 1;
- if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
+ if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
if (dn->data_blkaddr == NEW_ADDR)
goto alloc;
- if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
+ if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
return -ENOSPC;
alloc:
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
- if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
- seg = CURSEG_DIRECT_IO;
-
allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
- &sum, seg);
+ &sum, CURSEG_WARM_DATA);
set_data_blkaddr(dn);
/* update i_size */
- fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
+ fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
dn->ofs_in_node;
- if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
- i_size_write(dn->inode,
- ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
-
- /* direct IO doesn't use extent cache to maximize the performance */
- f2fs_drop_largest_extent(dn->inode, fofs);
-
+ if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
+ f2fs_i_size_write(dn->inode,
+ ((loff_t)(fofs + 1) << PAGE_SHIFT));
return 0;
}
-static void __allocate_data_blocks(struct inode *inode, loff_t offset,
- size_t count)
+static inline bool __force_buffered_io(struct inode *inode, int rw)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct dnode_of_data dn;
- u64 start = F2FS_BYTES_TO_BLK(offset);
- u64 len = F2FS_BYTES_TO_BLK(count);
- bool allocated;
- u64 end_offset;
-
- while (len) {
- f2fs_balance_fs(sbi);
- f2fs_lock_op(sbi);
-
- /* When reading holes, we need its node page */
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- if (get_dnode_of_data(&dn, start, ALLOC_NODE))
- goto out;
-
- allocated = false;
- end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
-
- while (dn.ofs_in_node < end_offset && len) {
- block_t blkaddr;
+ return ((f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) ||
+ (rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
+ F2FS_I_SB(inode)->s_ndevs);
+}
- if (unlikely(f2fs_cp_error(sbi)))
- goto sync_out;
+int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct f2fs_map_blocks map;
+ int err = 0;
- blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
- if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
- if (__allocate_data_block(&dn))
- goto sync_out;
- allocated = true;
- }
- len--;
- start++;
- dn.ofs_in_node++;
- }
+ map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
+ map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
+ if (map.m_len > map.m_lblk)
+ map.m_len -= map.m_lblk;
+ else
+ map.m_len = 0;
- if (allocated)
- sync_inode_page(&dn);
+ map.m_next_pgofs = NULL;
- f2fs_put_dnode(&dn);
- f2fs_unlock_op(sbi);
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
+ return f2fs_map_blocks(inode, &map, 1,
+ __force_buffered_io(inode, WRITE) ?
+ F2FS_GET_BLOCK_PRE_AIO :
+ F2FS_GET_BLOCK_PRE_DIO);
}
- return;
-
-sync_out:
- if (allocated)
- sync_inode_page(&dn);
- f2fs_put_dnode(&dn);
-out:
- f2fs_unlock_op(sbi);
- return;
+ if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
+ }
+ if (!f2fs_has_inline_data(inode))
+ return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+ return err;
}
/*
@@ -567,188 +728,210 @@ out:
* b. do not use extent cache for better performance
* c. give the block addresses to blockdev
*/
-static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
+int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int create, int flag)
{
unsigned int maxblocks = map->m_len;
struct dnode_of_data dn;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
- pgoff_t pgofs, end_offset;
+ int mode = create ? ALLOC_NODE : LOOKUP_NODE;
+ pgoff_t pgofs, end_offset, end;
int err = 0, ofs = 1;
+ unsigned int ofs_in_node, last_ofs_in_node;
+ blkcnt_t prealloc;
struct extent_info ei;
- bool allocated = false;
+ block_t blkaddr;
+
+ if (!maxblocks)
+ return 0;
map->m_len = 0;
map->m_flags = 0;
/* it only supports block size == page size */
pgofs = (pgoff_t)map->m_lblk;
+ end = pgofs + maxblocks;
- if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
+ if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
map->m_pblk = ei.blk + pgofs - ei.fofs;
map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
map->m_flags = F2FS_MAP_MAPPED;
goto out;
}
+next_dnode:
if (create)
- f2fs_lock_op(F2FS_I_SB(inode));
+ f2fs_lock_op(sbi);
/* When reading holes, we need its node page */
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, pgofs, mode);
if (err) {
- if (err == -ENOENT)
+ if (flag == F2FS_GET_BLOCK_BMAP)
+ map->m_pblk = 0;
+ if (err == -ENOENT) {
err = 0;
+ if (map->m_next_pgofs)
+ *map->m_next_pgofs =
+ get_next_page_offset(&dn, pgofs);
+ }
goto unlock_out;
}
- if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
+ prealloc = 0;
+ last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+
+next_block:
+ blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+
+ if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
if (create) {
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
- goto put_out;
+ goto sync_out;
+ }
+ if (flag == F2FS_GET_BLOCK_PRE_AIO) {
+ if (blkaddr == NULL_ADDR) {
+ prealloc++;
+ last_ofs_in_node = dn.ofs_in_node;
+ }
+ } else {
+ err = __allocate_data_block(&dn);
+ if (!err)
+ set_inode_flag(inode, FI_APPEND_WRITE);
}
- err = __allocate_data_block(&dn);
if (err)
- goto put_out;
- allocated = true;
+ goto sync_out;
map->m_flags = F2FS_MAP_NEW;
+ blkaddr = dn.data_blkaddr;
} else {
- if (flag != F2FS_GET_BLOCK_FIEMAP ||
- dn.data_blkaddr != NEW_ADDR) {
- if (flag == F2FS_GET_BLOCK_BMAP)
- err = -ENOENT;
- goto put_out;
+ if (flag == F2FS_GET_BLOCK_BMAP) {
+ map->m_pblk = 0;
+ goto sync_out;
}
-
- /*
- * preallocated unwritten block should be mapped
- * for fiemap.
- */
- if (dn.data_blkaddr == NEW_ADDR)
- map->m_flags = F2FS_MAP_UNWRITTEN;
+ if (flag == F2FS_GET_BLOCK_FIEMAP &&
+ blkaddr == NULL_ADDR) {
+ if (map->m_next_pgofs)
+ *map->m_next_pgofs = pgofs + 1;
+ }
+ if (flag != F2FS_GET_BLOCK_FIEMAP ||
+ blkaddr != NEW_ADDR)
+ goto sync_out;
}
}
- map->m_flags |= F2FS_MAP_MAPPED;
- map->m_pblk = dn.data_blkaddr;
- map->m_len = 1;
+ if (flag == F2FS_GET_BLOCK_PRE_AIO)
+ goto skip;
+
+ if (map->m_len == 0) {
+ /* preallocated unwritten block should be mapped for fiemap. */
+ if (blkaddr == NEW_ADDR)
+ map->m_flags |= F2FS_MAP_UNWRITTEN;
+ map->m_flags |= F2FS_MAP_MAPPED;
+
+ map->m_pblk = blkaddr;
+ map->m_len = 1;
+ } else if ((map->m_pblk != NEW_ADDR &&
+ blkaddr == (map->m_pblk + ofs)) ||
+ (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
+ flag == F2FS_GET_BLOCK_PRE_DIO) {
+ ofs++;
+ map->m_len++;
+ } else {
+ goto sync_out;
+ }
- end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+skip:
dn.ofs_in_node++;
pgofs++;
-get_next:
- if (dn.ofs_in_node >= end_offset) {
- if (allocated)
- sync_inode_page(&dn);
- allocated = false;
- f2fs_put_dnode(&dn);
+ /* preallocate blocks in batch for one dnode page */
+ if (flag == F2FS_GET_BLOCK_PRE_AIO &&
+ (pgofs == end || dn.ofs_in_node == end_offset)) {
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = get_dnode_of_data(&dn, pgofs, mode);
- if (err) {
- if (err == -ENOENT)
- err = 0;
- goto unlock_out;
- }
+ dn.ofs_in_node = ofs_in_node;
+ err = reserve_new_blocks(&dn, prealloc);
+ if (err)
+ goto sync_out;
- end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+ map->m_len += dn.ofs_in_node - ofs_in_node;
+ if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
+ err = -ENOSPC;
+ goto sync_out;
+ }
+ dn.ofs_in_node = end_offset;
}
- if (maxblocks > map->m_len) {
- block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+ if (pgofs >= end)
+ goto sync_out;
+ else if (dn.ofs_in_node < end_offset)
+ goto next_block;
- if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
- if (create) {
- if (unlikely(f2fs_cp_error(sbi))) {
- err = -EIO;
- goto sync_out;
- }
- err = __allocate_data_block(&dn);
- if (err)
- goto sync_out;
- allocated = true;
- map->m_flags |= F2FS_MAP_NEW;
- blkaddr = dn.data_blkaddr;
- } else {
- /*
- * we only merge preallocated unwritten blocks
- * for fiemap.
- */
- if (flag != F2FS_GET_BLOCK_FIEMAP ||
- blkaddr != NEW_ADDR)
- goto sync_out;
- }
- }
+ f2fs_put_dnode(&dn);
- /* Give more consecutive addresses for the readahead */
- if ((map->m_pblk != NEW_ADDR &&
- blkaddr == (map->m_pblk + ofs)) ||
- (map->m_pblk == NEW_ADDR &&
- blkaddr == NEW_ADDR)) {
- ofs++;
- dn.ofs_in_node++;
- pgofs++;
- map->m_len++;
- goto get_next;
- }
+ if (create) {
+ f2fs_unlock_op(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
}
+ goto next_dnode;
+
sync_out:
- if (allocated)
- sync_inode_page(&dn);
-put_out:
f2fs_put_dnode(&dn);
unlock_out:
- if (create)
- f2fs_unlock_op(F2FS_I_SB(inode));
+ if (create) {
+ f2fs_unlock_op(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
+ }
out:
trace_f2fs_map_blocks(inode, map, err);
return err;
}
static int __get_data_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh, int create, int flag)
+ struct buffer_head *bh, int create, int flag,
+ pgoff_t *next_pgofs)
{
struct f2fs_map_blocks map;
- int ret;
+ int err;
map.m_lblk = iblock;
map.m_len = bh->b_size >> inode->i_blkbits;
+ map.m_next_pgofs = next_pgofs;
- ret = f2fs_map_blocks(inode, &map, create, flag);
- if (!ret) {
+ err = f2fs_map_blocks(inode, &map, create, flag);
+ if (!err) {
map_bh(bh, inode->i_sb, map.m_pblk);
bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
bh->b_size = map.m_len << inode->i_blkbits;
}
- return ret;
+ return err;
}
static int get_data_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create, int flag)
+ struct buffer_head *bh_result, int create, int flag,
+ pgoff_t *next_pgofs)
{
- return __get_data_block(inode, iblock, bh_result, create, flag);
+ return __get_data_block(inode, iblock, bh_result, create,
+ flag, next_pgofs);
}
static int get_data_block_dio(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_DIO);
+ F2FS_GET_BLOCK_DIO, NULL);
}
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
/* Block number less than F2FS MAX BLOCKS */
- if (unlikely(iblock >= max_file_size(0)))
+ if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
return -EFBIG;
return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_BMAP);
+ F2FS_GET_BLOCK_BMAP, NULL);
}
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
@@ -766,10 +949,9 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
{
struct buffer_head map_bh;
sector_t start_blk, last_blk;
- loff_t isize = i_size_read(inode);
+ pgoff_t next_pgofs;
u64 logical = 0, phys = 0, size = 0;
u32 flags = 0;
- bool past_eof = false, whole_file = false;
int ret = 0;
ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
@@ -782,82 +964,55 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return ret;
}
- mutex_lock(&inode->i_mutex);
-
- if (len >= isize) {
- whole_file = true;
- len = isize;
- }
+ inode_lock(inode);
if (logical_to_blk(inode, len) == 0)
len = blk_to_logical(inode, 1);
start_blk = logical_to_blk(inode, start);
last_blk = logical_to_blk(inode, start + len - 1);
+
next:
memset(&map_bh, 0, sizeof(struct buffer_head));
map_bh.b_size = len;
ret = get_data_block(inode, start_blk, &map_bh, 0,
- F2FS_GET_BLOCK_FIEMAP);
+ F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
if (ret)
goto out;
/* HOLE */
if (!buffer_mapped(&map_bh)) {
- start_blk++;
-
- if (!past_eof && blk_to_logical(inode, start_blk) >= isize)
- past_eof = 1;
-
- if (past_eof && size) {
- flags |= FIEMAP_EXTENT_LAST;
- ret = fiemap_fill_next_extent(fieinfo, logical,
- phys, size, flags);
- } else if (size) {
- ret = fiemap_fill_next_extent(fieinfo, logical,
- phys, size, flags);
- size = 0;
- }
+ start_blk = next_pgofs;
- /* if we have holes up to/past EOF then we're done */
- if (start_blk > last_blk || past_eof || ret)
- goto out;
- } else {
- if (start_blk > last_blk && !whole_file) {
- ret = fiemap_fill_next_extent(fieinfo, logical,
- phys, size, flags);
- goto out;
- }
-
- /*
- * if size != 0 then we know we already have an extent
- * to add, so add it.
- */
- if (size) {
- ret = fiemap_fill_next_extent(fieinfo, logical,
- phys, size, flags);
- if (ret)
- goto out;
- }
+ if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
+ F2FS_I_SB(inode)->max_file_blocks))
+ goto prep_next;
- logical = blk_to_logical(inode, start_blk);
- phys = blk_to_logical(inode, map_bh.b_blocknr);
- size = map_bh.b_size;
- flags = 0;
- if (buffer_unwritten(&map_bh))
- flags = FIEMAP_EXTENT_UNWRITTEN;
+ flags |= FIEMAP_EXTENT_LAST;
+ }
- start_blk += logical_to_blk(inode, size);
+ if (size) {
+ if (f2fs_encrypted_inode(inode))
+ flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
- /*
- * If we are past the EOF, then we need to make sure as
- * soon as we find a hole that the last extent we found
- * is marked with FIEMAP_EXTENT_LAST
- */
- if (!past_eof && logical + size >= isize)
- past_eof = true;
+ ret = fiemap_fill_next_extent(fieinfo, logical,
+ phys, size, flags);
}
+
+ if (start_blk > last_blk || ret)
+ goto out;
+
+ logical = blk_to_logical(inode, start_blk);
+ phys = blk_to_logical(inode, map_bh.b_blocknr);
+ size = map_bh.b_size;
+ flags = 0;
+ if (buffer_unwritten(&map_bh))
+ flags = FIEMAP_EXTENT_UNWRITTEN;
+
+ start_blk += logical_to_blk(inode, size);
+
+prep_next:
cond_resched();
if (fatal_signal_pending(current))
ret = -EINTR;
@@ -867,10 +1022,39 @@ out:
if (ret == 1)
ret = 0;
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
return ret;
}
+static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
+ unsigned nr_pages)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct fscrypt_ctx *ctx = NULL;
+ struct bio *bio;
+
+ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+ ctx = fscrypt_get_ctx(inode, GFP_NOFS);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ /* wait the page to be moved by cleaning */
+ f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
+ }
+
+ bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
+ if (!bio) {
+ if (ctx)
+ fscrypt_release_ctx(ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+ f2fs_target_device(sbi, blkaddr, bio);
+ bio->bi_end_io = f2fs_read_end_io;
+ bio->bi_private = ctx;
+
+ return bio;
+}
+
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
@@ -889,13 +1073,13 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
sector_t last_block;
sector_t last_block_in_file;
sector_t block_nr;
- struct block_device *bdev = inode->i_sb->s_bdev;
struct f2fs_map_blocks map;
map.m_pblk = 0;
map.m_lblk = 0;
map.m_len = 0;
map.m_flags = 0;
+ map.m_next_pgofs = NULL;
for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
@@ -934,7 +1118,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
map.m_len = last_block - block_in_file;
if (f2fs_map_blocks(inode, &map, 0,
- F2FS_GET_BLOCK_READ))
+ F2FS_GET_BLOCK_READ))
goto set_error_page;
}
got_it:
@@ -947,8 +1131,9 @@ got_it:
goto confused;
}
} else {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
- SetPageUptodate(page);
+ zero_user_segment(page, 0, PAGE_SIZE);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
unlock_page(page);
goto next_page;
}
@@ -957,37 +1142,18 @@ got_it:
* This page will go to BIO. Do we need to send this
* BIO off first?
*/
- if (bio && (last_block_in_bio != block_nr - 1)) {
+ if (bio && (last_block_in_bio != block_nr - 1 ||
+ !__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
submit_and_realloc:
- submit_bio(READ, bio);
+ __submit_bio(F2FS_I_SB(inode), READ, bio, DATA);
bio = NULL;
}
if (bio == NULL) {
- struct f2fs_crypto_ctx *ctx = NULL;
-
- if (f2fs_encrypted_inode(inode) &&
- S_ISREG(inode->i_mode)) {
-
- ctx = f2fs_get_crypto_ctx(inode);
- if (IS_ERR(ctx))
- goto set_error_page;
-
- /* wait the page to be moved by cleaning */
- f2fs_wait_on_encrypted_page_writeback(
- F2FS_I_SB(inode), block_nr);
- }
-
- bio = bio_alloc(GFP_KERNEL,
- min_t(int, nr_pages, BIO_MAX_PAGES));
- if (!bio) {
- if (ctx)
- f2fs_release_crypto_ctx(ctx);
+ bio = f2fs_grab_bio(inode, block_nr, nr_pages);
+ if (IS_ERR(bio)) {
+ bio = NULL;
goto set_error_page;
}
- bio->bi_bdev = bdev;
- bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
- bio->bi_end_io = f2fs_read_end_io;
- bio->bi_private = ctx;
}
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
@@ -997,22 +1163,22 @@ submit_and_realloc:
goto next_page;
set_error_page:
SetPageError(page);
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
unlock_page(page);
goto next_page;
confused:
if (bio) {
- submit_bio(READ, bio);
+ __submit_bio(F2FS_I_SB(inode), READ, bio, DATA);
bio = NULL;
}
unlock_page(page);
next_page:
if (pages)
- page_cache_release(page);
+ put_page(page);
}
BUG_ON(pages && !list_empty(pages));
if (bio)
- submit_bio(READ, bio);
+ __submit_bio(F2FS_I_SB(inode), READ, bio, DATA);
return 0;
}
@@ -1059,23 +1225,33 @@ int do_write_data_page(struct f2fs_io_info *fio)
if (err)
return err;
- fio->blk_addr = dn.data_blkaddr;
+ fio->old_blkaddr = dn.data_blkaddr;
/* This page is already truncated */
- if (fio->blk_addr == NULL_ADDR) {
+ if (fio->old_blkaddr == NULL_ADDR) {
ClearPageUptodate(page);
goto out_writepage;
}
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+ gfp_t gfp_flags = GFP_NOFS;
/* wait for GCed encrypted page writeback */
f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
- fio->blk_addr);
-
- fio->encrypted_page = f2fs_encrypt(inode, fio->page);
+ fio->old_blkaddr);
+retry_encrypt:
+ fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
+ gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
err = PTR_ERR(fio->encrypted_page);
+ if (err == -ENOMEM) {
+ /* flush pending ios and wait for a while */
+ f2fs_flush_merged_bios(F2FS_I_SB(inode));
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ gfp_flags |= __GFP_NOFAIL;
+ err = 0;
+ goto retry_encrypt;
+ }
goto out_writepage;
}
}
@@ -1086,20 +1262,19 @@ int do_write_data_page(struct f2fs_io_info *fio)
* If current allocation needs SSR,
* it had better in-place writes for updated data.
*/
- if (unlikely(fio->blk_addr != NEW_ADDR &&
+ if (unlikely(fio->old_blkaddr != NEW_ADDR &&
!is_cold_data(page) &&
+ !IS_ATOMIC_WRITTEN_PAGE(page) &&
need_inplace_update(inode))) {
rewrite_data_page(fio);
- set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
+ set_inode_flag(inode, FI_UPDATE_WRITE);
trace_f2fs_do_write_data_page(page, IPU);
} else {
write_data_page(&dn, fio);
- set_data_blkaddr(&dn);
- f2fs_update_extent_cache(&dn);
trace_f2fs_do_write_data_page(page, OPU);
- set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
+ set_inode_flag(inode, FI_APPEND_WRITE);
if (page->index == 0)
- set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
}
out_writepage:
f2fs_put_dnode(&dn);
@@ -1113,7 +1288,8 @@ static int f2fs_write_data_page(struct page *page,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long) i_size)
- >> PAGE_CACHE_SHIFT;
+ >> PAGE_SHIFT;
+ loff_t psize = (page->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
int err = 0;
@@ -1134,37 +1310,37 @@ static int f2fs_write_data_page(struct page *page,
* If the offset is out-of-range of file size,
* this page does not have to be written to disk.
*/
- offset = i_size & (PAGE_CACHE_SIZE - 1);
+ offset = i_size & (PAGE_SIZE - 1);
if ((page->index >= end_index + 1) || !offset)
goto out;
- zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ zero_user_segment(page, offset, PAGE_SIZE);
write:
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (f2fs_is_drop_cache(inode))
goto out;
- if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
- available_free_memory(sbi, BASE_CHECK))
+ /* we should not write 0'th page having journal header */
+ if (f2fs_is_volatile_file(inode) && (!page->index ||
+ (!wbc->for_reclaim &&
+ available_free_memory(sbi, BASE_CHECK))))
goto redirty_out;
+ /* we should bypass data pages to proceed the kworkder jobs */
+ if (unlikely(f2fs_cp_error(sbi))) {
+ mapping_set_error(page->mapping, -EIO);
+ goto out;
+ }
+
/* Dentry blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode)) {
- if (unlikely(f2fs_cp_error(sbi)))
- goto redirty_out;
err = do_write_data_page(&fio);
goto done;
}
- /* we should bypass data pages to proceed the kworkder jobs */
- if (unlikely(f2fs_cp_error(sbi))) {
- SetPageError(page);
- goto out;
- }
-
if (!wbc->for_reclaim)
need_balance_fs = true;
- else if (has_not_enough_free_secs(sbi, 0))
+ else if (has_not_enough_free_secs(sbi, 0, 0))
goto redirty_out;
err = -EAGAIN;
@@ -1173,35 +1349,37 @@ write:
err = f2fs_write_inline_data(inode, page);
if (err == -EAGAIN)
err = do_write_data_page(&fio);
+ if (F2FS_I(inode)->last_disk_size < psize)
+ F2FS_I(inode)->last_disk_size = psize;
f2fs_unlock_op(sbi);
done:
if (err && err != -ENOENT)
goto redirty_out;
- clear_cold_data(page);
out:
inode_dec_dirty_pages(inode);
if (err)
ClearPageUptodate(page);
+
+ if (wbc->for_reclaim) {
+ f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
+ remove_dirty_inode(inode);
+ }
+
unlock_page(page);
- if (need_balance_fs)
- f2fs_balance_fs(sbi);
- if (wbc->for_reclaim)
+ f2fs_balance_fs(sbi, need_balance_fs);
+
+ if (unlikely(f2fs_cp_error(sbi)))
f2fs_submit_merged_bio(sbi, DATA, WRITE);
+
return 0;
redirty_out:
redirty_page_for_writepage(wbc, page);
- return AOP_WRITEPAGE_ACTIVATE;
-}
-
-static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
- void *data)
-{
- struct address_space *mapping = data;
- int ret = mapping->a_ops->writepage(page, wbc);
- mapping_set_error(mapping, ret);
- return ret;
+ if (!err)
+ return AOP_WRITEPAGE_ACTIVATE;
+ unlock_page(page);
+ return err;
}
/*
@@ -1210,8 +1388,7 @@ static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
* warm/hot data page.
*/
static int f2fs_write_cache_pages(struct address_space *mapping,
- struct writeback_control *wbc, writepage_t writepage,
- void *data)
+ struct writeback_control *wbc)
{
int ret = 0;
int done = 0;
@@ -1224,10 +1401,10 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int cycled;
int range_whole = 0;
int tag;
- int step = 0;
+ int nwritten = 0;
pagevec_init(&pvec, 0);
-next:
+
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
@@ -1237,8 +1414,8 @@ next:
cycled = 0;
end = -1;
} else {
- index = wbc->range_start >> PAGE_CACHE_SHIFT;
- end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ index = wbc->range_start >> PAGE_SHIFT;
+ end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
cycled = 1; /* ignore range_cyclic tests */
@@ -1282,12 +1459,10 @@ continue_unlock:
goto continue_unlock;
}
- if (step == is_cold_data(page))
- goto continue_unlock;
-
if (PageWriteback(page)) {
if (wbc->sync_mode != WB_SYNC_NONE)
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page,
+ DATA, true);
else
goto continue_unlock;
}
@@ -1296,16 +1471,22 @@ continue_unlock:
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
- ret = (*writepage)(page, wbc, data);
+ ret = mapping->a_ops->writepage(page, wbc);
if (unlikely(ret)) {
+ /*
+ * keep nr_to_write, since vfs uses this to
+ * get # of written pages.
+ */
if (ret == AOP_WRITEPAGE_ACTIVATE) {
unlock_page(page);
ret = 0;
- } else {
- done_index = page->index + 1;
- done = 1;
- break;
+ continue;
}
+ done_index = page->index + 1;
+ done = 1;
+ break;
+ } else {
+ nwritten++;
}
if (--wbc->nr_to_write <= 0 &&
@@ -1318,11 +1499,6 @@ continue_unlock:
cond_resched();
}
- if (step < 1) {
- step++;
- goto next;
- }
-
if (!cycled && !done) {
cycled = 1;
index = 0;
@@ -1332,6 +1508,10 @@ continue_unlock:
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index;
+ if (nwritten)
+ f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host,
+ NULL, 0, DATA, WRITE);
+
return ret;
}
@@ -1340,11 +1520,8 @@ static int f2fs_write_data_pages(struct address_space *mapping,
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- bool locked = false;
+ struct blk_plug plug;
int ret;
- long diff;
-
- trace_f2fs_writepages(mapping->host, wbc, DATA);
/* deal with chardevs and other special file */
if (!mapping->a_ops->writepage)
@@ -1359,39 +1536,117 @@ static int f2fs_write_data_pages(struct address_space *mapping,
available_free_memory(sbi, DIRTY_DENTS))
goto skip_write;
+ /* skip writing during file defragment */
+ if (is_inode_flag_set(inode, FI_DO_DEFRAG))
+ goto skip_write;
+
/* during POR, we don't need to trigger writepage at all. */
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto skip_write;
- diff = nr_pages_to_write(sbi, DATA, wbc);
-
- if (!S_ISDIR(inode->i_mode)) {
- mutex_lock(&sbi->writepages);
- locked = true;
- }
- ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
- if (locked)
- mutex_unlock(&sbi->writepages);
+ trace_f2fs_writepages(mapping->host, wbc, DATA);
- remove_dirty_dir_inode(inode);
+ blk_start_plug(&plug);
+ ret = f2fs_write_cache_pages(mapping, wbc);
+ blk_finish_plug(&plug);
+ /*
+ * if some pages were truncated, we cannot guarantee its mapping->host
+ * to detect pending bios.
+ */
- wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
+ remove_dirty_inode(inode);
return ret;
skip_write:
wbc->pages_skipped += get_dirty_pages(inode);
+ trace_f2fs_writepages(mapping->host, wbc, DATA);
return 0;
}
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
+ loff_t i_size = i_size_read(inode);
+
+ if (to > i_size) {
+ truncate_pagecache(inode, i_size);
+ truncate_blocks(inode, i_size, true);
+ }
+}
+
+static int prepare_write_begin(struct f2fs_sb_info *sbi,
+ struct page *page, loff_t pos, unsigned len,
+ block_t *blk_addr, bool *node_changed)
+{
+ struct inode *inode = page->mapping->host;
+ pgoff_t index = page->index;
+ struct dnode_of_data dn;
+ struct page *ipage;
+ bool locked = false;
+ struct extent_info ei;
+ int err = 0;
+
+ /*
+ * we already allocated all the blocks, so we don't need to get
+ * the block addresses when there is no need to fill the page.
+ */
+ if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE)
+ return 0;
+
+ if (f2fs_has_inline_data(inode) ||
+ (pos & PAGE_MASK) >= i_size_read(inode)) {
+ f2fs_lock_op(sbi);
+ locked = true;
+ }
+restart:
+ /* check inline_data */
+ ipage = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(ipage)) {
+ err = PTR_ERR(ipage);
+ goto unlock_out;
+ }
+
+ set_new_dnode(&dn, inode, ipage, ipage, 0);
- if (to > inode->i_size) {
- truncate_pagecache(inode, inode->i_size);
- truncate_blocks(inode, inode->i_size, true);
+ if (f2fs_has_inline_data(inode)) {
+ if (pos + len <= MAX_INLINE_DATA) {
+ read_inline_data(page, ipage);
+ set_inode_flag(inode, FI_DATA_EXIST);
+ if (inode->i_nlink)
+ set_inline_node(ipage);
+ } else {
+ err = f2fs_convert_inline_page(&dn, page);
+ if (err)
+ goto out;
+ if (dn.data_blkaddr == NULL_ADDR)
+ err = f2fs_get_block(&dn, index);
+ }
+ } else if (locked) {
+ err = f2fs_get_block(&dn, index);
+ } else {
+ if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ } else {
+ /* hole case */
+ err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
+ if (err || dn.data_blkaddr == NULL_ADDR) {
+ f2fs_put_dnode(&dn);
+ f2fs_lock_op(sbi);
+ locked = true;
+ goto restart;
+ }
+ }
}
+
+ /* convert_inline_page can make node_changed */
+ *blk_addr = dn.data_blkaddr;
+ *node_changed = dn.node_changed;
+out:
+ f2fs_put_dnode(&dn);
+unlock_out:
+ if (locked)
+ f2fs_unlock_op(sbi);
+ return err;
}
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
@@ -1401,9 +1656,9 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *page = NULL;
- struct page *ipage;
- pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
- struct dnode_of_data dn;
+ pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
+ bool need_balance = false;
+ block_t blkaddr = NULL_ADDR;
int err = 0;
if (trace_android_fs_datawrite_start_enabled()) {
@@ -1418,8 +1673,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
}
trace_f2fs_write_begin(inode, pos, len, flags);
- f2fs_balance_fs(sbi);
-
/*
* We should check this at this moment to avoid deadlock on inode page
* and #0 page. The locking rule for inline_data conversion should be:
@@ -1439,98 +1692,63 @@ repeat:
*pagep = page;
- f2fs_lock_op(sbi);
-
- /* check inline_data */
- ipage = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
- goto unlock_fail;
- }
-
- set_new_dnode(&dn, inode, ipage, ipage, 0);
+ err = prepare_write_begin(sbi, page, pos, len,
+ &blkaddr, &need_balance);
+ if (err)
+ goto fail;
- if (f2fs_has_inline_data(inode)) {
- if (pos + len <= MAX_INLINE_DATA) {
- read_inline_data(page, ipage);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
- sync_inode_page(&dn);
- goto put_next;
+ if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
+ unlock_page(page);
+ f2fs_balance_fs(sbi, true);
+ lock_page(page);
+ if (page->mapping != mapping) {
+ /* The page got truncated from under us */
+ f2fs_put_page(page, 1);
+ goto repeat;
}
- err = f2fs_convert_inline_page(&dn, page);
- if (err)
- goto put_fail;
}
- err = f2fs_get_block(&dn, index);
- if (err)
- goto put_fail;
-put_next:
- f2fs_put_dnode(&dn);
- f2fs_unlock_op(sbi);
-
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page, DATA, false);
/* wait for GCed encrypted page writeback */
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
-
- if (len == PAGE_CACHE_SIZE)
- goto out_update;
- if (PageUptodate(page))
- goto out_clear;
-
- if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
- unsigned start = pos & (PAGE_CACHE_SIZE - 1);
- unsigned end = start + len;
+ f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
- /* Reading beyond i_size is simple: memset to zero */
- zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
- goto out_update;
- }
+ if (len == PAGE_SIZE || PageUptodate(page))
+ return 0;
- if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ if (blkaddr == NEW_ADDR) {
+ zero_user_segment(page, 0, PAGE_SIZE);
+ SetPageUptodate(page);
} else {
- struct f2fs_io_info fio = {
- .sbi = sbi,
- .type = DATA,
- .rw = READ_SYNC,
- .blk_addr = dn.data_blkaddr,
- .page = page,
- .encrypted_page = NULL,
- };
- err = f2fs_submit_page_bio(&fio);
- if (err)
+ struct bio *bio;
+
+ bio = f2fs_grab_bio(inode, blkaddr, 1);
+ if (IS_ERR(bio)) {
+ err = PTR_ERR(bio);
goto fail;
+ }
- lock_page(page);
- if (unlikely(!PageUptodate(page))) {
- err = -EIO;
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ bio_put(bio);
+ err = -EFAULT;
goto fail;
}
+
+ __submit_bio(sbi, READ_SYNC, bio, DATA);
+
+ lock_page(page);
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
-
- /* avoid symlink page */
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
- err = f2fs_decrypt_one(inode, page);
- if (err)
- goto fail;
+ if (unlikely(!PageUptodate(page))) {
+ err = -EIO;
+ goto fail;
}
}
-out_update:
- SetPageUptodate(page);
-out_clear:
- clear_cold_data(page);
return 0;
-put_fail:
- f2fs_put_dnode(&dn);
-unlock_fail:
- f2fs_unlock_op(sbi);
fail:
f2fs_put_page(page, 1);
f2fs_write_failed(mapping, pos + len);
@@ -1547,15 +1765,27 @@ static int f2fs_write_end(struct file *file,
trace_android_fs_datawrite_end(inode, pos, len);
trace_f2fs_write_end(inode, pos, len, copied);
- set_page_dirty(page);
-
- if (pos + copied > i_size_read(inode)) {
- i_size_write(inode, pos + copied);
- mark_inode_dirty(inode);
- update_inode_page(inode);
+ /*
+ * This should be come from len == PAGE_SIZE, and we expect copied
+ * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
+ * let generic_perform_write() try to copy data again through copied=0.
+ */
+ if (!PageUptodate(page)) {
+ if (unlikely(copied != PAGE_SIZE))
+ copied = 0;
+ else
+ SetPageUptodate(page);
}
+ if (!copied)
+ goto unlock_out;
+ set_page_dirty(page);
+
+ if (pos + copied > i_size_read(inode))
+ f2fs_i_size_write(inode, pos + copied);
+unlock_out:
f2fs_put_page(page, 1);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return copied;
}
@@ -1574,29 +1804,20 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
}
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
- loff_t offset)
+ loff_t offset)
{
- struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
+ int rw = iov_iter_rw(iter);
int err;
- /* we don't need to use inline_data strictly */
- if (f2fs_has_inline_data(inode)) {
- err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
- }
-
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- return 0;
-
err = check_direct_IO(inode, iter, offset);
if (err)
return err;
- trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
+ if (__force_buffered_io(inode, rw))
+ return 0;
if (trace_android_fs_dataread_start_enabled() &&
(iov_iter_rw(iter) == READ)) {
@@ -1620,18 +1841,18 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
current->pid, path,
current->comm);
}
- if (iov_iter_rw(iter) == WRITE) {
- __allocate_data_blocks(inode, offset, count);
- if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
- err = -EIO;
- goto out;
- }
- }
+ trace_f2fs_direct_IO_enter(inode, offset, count, rw);
+ down_read(&F2FS_I(inode)->dio_rwsem[rw]);
err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
-out:
- if (err < 0 && iov_iter_rw(iter) == WRITE)
- f2fs_write_failed(mapping, offset + count);
+ up_read(&F2FS_I(inode)->dio_rwsem[rw]);
+
+ if (rw == WRITE) {
+ if (err > 0)
+ set_inode_flag(inode, FI_UPDATE_WRITE);
+ else if (err < 0)
+ f2fs_write_failed(mapping, offset + count);
+ }
if (trace_android_fs_dataread_start_enabled() &&
(iov_iter_rw(iter) == READ))
@@ -1640,7 +1861,7 @@ out:
(iov_iter_rw(iter) == WRITE))
trace_android_fs_datawrite_end(inode, offset, count);
- trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
+ trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
return err;
}
@@ -1652,22 +1873,25 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
- (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
+ (offset % PAGE_SIZE || length != PAGE_SIZE))
return;
if (PageDirty(page)) {
- if (inode->i_ino == F2FS_META_INO(sbi))
+ if (inode->i_ino == F2FS_META_INO(sbi)) {
dec_page_count(sbi, F2FS_DIRTY_META);
- else if (inode->i_ino == F2FS_NODE_INO(sbi))
+ } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
dec_page_count(sbi, F2FS_DIRTY_NODES);
- else
+ } else {
inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
+ }
}
/* This is atomic written page, keep Private */
if (IS_ATOMIC_WRITTEN_PAGE(page))
return;
+ set_page_private(page, 0);
ClearPagePrivate(page);
}
@@ -1681,10 +1905,42 @@ int f2fs_release_page(struct page *page, gfp_t wait)
if (IS_ATOMIC_WRITTEN_PAGE(page))
return 0;
+ set_page_private(page, 0);
ClearPagePrivate(page);
return 1;
}
+/*
+ * This was copied from __set_page_dirty_buffers which gives higher performance
+ * in very high speed storages. (e.g., pmem)
+ */
+void f2fs_set_page_dirty_nobuffers(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct mem_cgroup *memcg;
+ unsigned long flags;
+
+ if (unlikely(!mapping))
+ return;
+
+ spin_lock(&mapping->private_lock);
+ memcg = mem_cgroup_begin_page_stat(page);
+ SetPageDirty(page);
+ spin_unlock(&mapping->private_lock);
+
+ spin_lock_irqsave(&mapping->tree_lock, flags);
+ WARN_ON_ONCE(!PageUptodate(page));
+ account_page_dirtied(page, mapping, memcg);
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page), PAGECACHE_TAG_DIRTY);
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
+
+ mem_cgroup_end_page_stat(memcg);
+
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ return;
+}
+
static int f2fs_set_data_page_dirty(struct page *page)
{
struct address_space *mapping = page->mapping;
@@ -1692,7 +1948,8 @@ static int f2fs_set_data_page_dirty(struct page *page)
trace_f2fs_set_page_dirty(page, DATA);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
if (f2fs_is_atomic_file(inode)) {
if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
@@ -1707,7 +1964,7 @@ static int f2fs_set_data_page_dirty(struct page *page)
}
if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
+ f2fs_set_page_dirty_nobuffers(page);
update_dirty_page(inode, page);
return 1;
}
@@ -1728,6 +1985,58 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, get_data_block_bmap);
}
+#ifdef CONFIG_MIGRATION
+#include <linux/migrate.h>
+
+int f2fs_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+ int rc, extra_count;
+ struct f2fs_inode_info *fi = F2FS_I(mapping->host);
+ bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
+
+ BUG_ON(PageWriteback(page));
+
+ /* migrating an atomic written page is safe with the inmem_lock hold */
+ if (atomic_written && !mutex_trylock(&fi->inmem_lock))
+ return -EAGAIN;
+
+ /*
+ * A reference is expected if PagePrivate set when move mapping,
+ * however F2FS breaks this for maintaining dirty page counts when
+ * truncating pages. So here adjusting the 'extra_count' make it work.
+ */
+ extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
+ rc = migrate_page_move_mapping(mapping, newpage,
+ page, NULL, mode, extra_count);
+ if (rc != MIGRATEPAGE_SUCCESS) {
+ if (atomic_written)
+ mutex_unlock(&fi->inmem_lock);
+ return rc;
+ }
+
+ if (atomic_written) {
+ struct inmem_pages *cur;
+ list_for_each_entry(cur, &fi->inmem_pages, list)
+ if (cur->page == page) {
+ cur->page = newpage;
+ break;
+ }
+ mutex_unlock(&fi->inmem_lock);
+ put_page(page);
+ get_page(newpage);
+ }
+
+ if (PagePrivate(page))
+ SetPagePrivate(newpage);
+ set_page_private(newpage, page_private(page));
+
+ migrate_page_copy(newpage, page);
+
+ return MIGRATEPAGE_SUCCESS;
+}
+#endif
+
const struct address_space_operations f2fs_dblock_aops = {
.readpage = f2fs_read_data_page,
.readpages = f2fs_read_data_pages,
@@ -1740,4 +2049,7 @@ const struct address_space_operations f2fs_dblock_aops = {
.releasepage = f2fs_release_page,
.direct_IO = f2fs_direct_IO,
.bmap = f2fs_bmap,
+#ifdef CONFIG_MIGRATION
+ .migratepage = f2fs_migrate_page,
+#endif
};
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 24d6a51b48d1..fbd5184140d0 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -38,23 +38,31 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree);
si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
si->total_ext = atomic64_read(&sbi->total_hit_ext);
- si->ext_tree = sbi->total_ext_tree;
+ si->ext_tree = atomic_read(&sbi->total_ext_tree);
+ si->zombie_tree = atomic_read(&sbi->total_zombie_tree);
si->ext_node = atomic_read(&sbi->total_ext_node);
si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
- si->ndirty_dirs = sbi->n_dirty_dirs;
si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
+ si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA);
+ si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
+ si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
+ si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
+ si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
- si->wb_pages = get_pages(sbi, F2FS_WRITEBACK);
+ si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
+ si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA);
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
si->rsvd_segs = reserved_segments(sbi);
si->overp_segs = overprovision_segments(sbi);
si->valid_count = valid_user_blocks(sbi);
+ si->discard_blks = discard_blocks(sbi);
si->valid_node_count = valid_node_count(sbi);
si->valid_inode_count = valid_inode_count(sbi);
si->inline_xattr = atomic_read(&sbi->inline_xattr);
si->inline_inode = atomic_read(&sbi->inline_inode);
si->inline_dir = atomic_read(&sbi->inline_dir);
+ si->orphans = sbi->im[ORPHAN_INO].ino_num;
si->utilization = utilization(sbi);
si->free_segs = free_segments(sbi);
@@ -67,7 +75,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
si->sits = MAIN_SEGS(sbi);
si->dirty_sits = SIT_I(sbi)->dirty_sentries;
- si->fnids = NM_I(sbi)->fcnt;
+ si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID_LIST];
+ si->alloc_nids = NM_I(sbi)->nid_cnt[ALLOC_NID_LIST];
si->bg_gc = sbi->bg_gc;
si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
@@ -105,7 +114,7 @@ static void update_sit_info(struct f2fs_sb_info *sbi)
bimodal = 0;
total_vblocks = 0;
- blks_per_sec = sbi->segs_per_sec * (1 << sbi->log_blocks_per_seg);
+ blks_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
hblks_per_sec = blks_per_sec / 2;
for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
@@ -140,6 +149,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem = sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize;
si->base_mem += 2 * sizeof(struct f2fs_inode_info);
si->base_mem += sizeof(*sbi->ckpt);
+ si->base_mem += sizeof(struct percpu_counter) * NR_COUNT_TYPE;
/* build sm */
si->base_mem += sizeof(struct f2fs_sm_info);
@@ -148,7 +158,9 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += sizeof(struct sit_info);
si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
- si->base_mem += 3 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+ si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+ if (f2fs_discard_en(sbi))
+ si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += SIT_VBLOCK_MAP_SIZE;
if (sbi->segs_per_sec > 1)
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
@@ -161,7 +173,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
/* build curseg */
si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
- si->base_mem += PAGE_CACHE_SIZE * NR_CURSEG_TYPE;
+ si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE;
/* build dirty segmap */
si->base_mem += sizeof(struct dirty_seglist_info);
@@ -184,23 +196,25 @@ get_cache:
si->cache_mem += sizeof(struct flush_cmd_control);
/* free nids */
- si->cache_mem += NM_I(sbi)->fcnt * sizeof(struct free_nid);
+ si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID_LIST] +
+ NM_I(sbi)->nid_cnt[ALLOC_NID_LIST]) *
+ sizeof(struct free_nid);
si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
sizeof(struct nat_entry_set);
si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
- si->cache_mem += sbi->n_dirty_dirs * sizeof(struct inode_entry);
- for (i = 0; i <= UPDATE_INO; i++)
+ for (i = 0; i <= ORPHAN_INO; i++)
si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
- si->cache_mem += sbi->total_ext_tree * sizeof(struct extent_tree);
+ si->cache_mem += atomic_read(&sbi->total_ext_tree) *
+ sizeof(struct extent_tree);
si->cache_mem += atomic_read(&sbi->total_ext_node) *
sizeof(struct extent_node);
si->page_mem = 0;
npages = NODE_MAPPING(sbi)->nrpages;
- si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
+ si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
npages = META_MAPPING(sbi)->nrpages;
- si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
+ si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
}
static int stat_show(struct seq_file *s, void *v)
@@ -211,20 +225,24 @@ static int stat_show(struct seq_file *s, void *v)
mutex_lock(&f2fs_stat_mutex);
list_for_each_entry(si, &f2fs_stat_list, stat_list) {
- char devname[BDEVNAME_SIZE];
-
update_general_status(si->sbi);
- seq_printf(s, "\n=====[ partition info(%s). #%d ]=====\n",
- bdevname(si->sbi->sb->s_bdev, devname), i++);
+ seq_printf(s, "\n=====[ partition info(%pg). #%d, %s]=====\n",
+ si->sbi->sb->s_bdev, i++,
+ f2fs_readonly(si->sbi->sb) ? "RO": "RW");
seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
si->sit_area_segs, si->nat_area_segs);
seq_printf(s, "[SSA: %d] [MAIN: %d",
si->ssa_area_segs, si->main_area_segs);
seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
si->overp_segs, si->rsvd_segs);
- seq_printf(s, "Utilization: %d%% (%d valid blocks)\n",
- si->utilization, si->valid_count);
+ if (test_opt(si->sbi, DISCARD))
+ seq_printf(s, "Utilization: %u%% (%u valid blocks, %u discard blocks)\n",
+ si->utilization, si->valid_count, si->discard_blks);
+ else
+ seq_printf(s, "Utilization: %u%% (%u valid blocks)\n",
+ si->utilization, si->valid_count);
+
seq_printf(s, " - Node: %u (Inode: %u, ",
si->valid_node_count, si->valid_inode_count);
seq_printf(s, "Other: %u)\n - Data: %u\n",
@@ -236,6 +254,8 @@ static int stat_show(struct seq_file *s, void *v)
si->inline_inode);
seq_printf(s, " - Inline_dentry Inode: %u\n",
si->inline_dir);
+ seq_printf(s, " - Orphan Inode: %u\n",
+ si->orphans);
seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n",
si->main_area_segs, si->main_area_sections,
si->main_area_zones);
@@ -269,7 +289,8 @@ static int stat_show(struct seq_file *s, void *v)
si->dirty_count);
seq_printf(s, " - Prefree: %d\n - Free: %d (%d)\n\n",
si->prefree_count, si->free_segs, si->free_secs);
- seq_printf(s, "CP calls: %d\n", si->cp_count);
+ seq_printf(s, "CP calls: %d (BG: %d)\n",
+ si->cp_count, si->bg_cp_count);
seq_printf(s, "GC calls: %d (BG: %d)\n",
si->call_count, si->bg_gc);
seq_printf(s, " - data segments : %d (%d)\n",
@@ -290,21 +311,25 @@ static int stat_show(struct seq_file *s, void *v)
!si->total_ext ? 0 :
div64_u64(si->hit_total * 100, si->total_ext),
si->hit_total, si->total_ext);
- seq_printf(s, " - Inner Struct Count: tree: %d, node: %d\n",
- si->ext_tree, si->ext_node);
+ seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n",
+ si->ext_tree, si->zombie_tree, si->ext_node);
seq_puts(s, "\nBalancing F2FS Async:\n");
- seq_printf(s, " - inmem: %4d, wb: %4d\n",
- si->inmem_pages, si->wb_pages);
+ seq_printf(s, " - inmem: %4d, wb_cp_data: %4d, wb_data: %4d\n",
+ si->inmem_pages, si->nr_wb_cp_data, si->nr_wb_data);
seq_printf(s, " - nodes: %4d in %4d\n",
si->ndirty_node, si->node_pages);
- seq_printf(s, " - dents: %4d in dirs:%4d\n",
- si->ndirty_dent, si->ndirty_dirs);
+ seq_printf(s, " - dents: %4d in dirs:%4d (%4d)\n",
+ si->ndirty_dent, si->ndirty_dirs, si->ndirty_all);
+ seq_printf(s, " - datas: %4d in files:%4d\n",
+ si->ndirty_data, si->ndirty_files);
seq_printf(s, " - meta: %4d in %4d\n",
si->ndirty_meta, si->meta_pages);
+ seq_printf(s, " - imeta: %4d\n",
+ si->ndirty_imeta);
seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n",
si->dirty_nats, si->nats, si->dirty_sits, si->sits);
- seq_printf(s, " - free_nids: %9d\n",
- si->fnids);
+ seq_printf(s, " - free_nids: %9d, alloc_nids: %9d\n",
+ si->free_nids, si->alloc_nids);
seq_puts(s, "\nDistribution of User Blocks:");
seq_puts(s, " [ valid | invalid | free ]\n");
seq_puts(s, " [");
@@ -407,20 +432,23 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
kfree(si);
}
-void __init f2fs_create_root_stats(void)
+int __init f2fs_create_root_stats(void)
{
struct dentry *file;
f2fs_debugfs_root = debugfs_create_dir("f2fs", NULL);
if (!f2fs_debugfs_root)
- return;
+ return -ENOMEM;
file = debugfs_create_file("status", S_IRUGO, f2fs_debugfs_root,
NULL, &stat_fops);
if (!file) {
debugfs_remove(f2fs_debugfs_root);
f2fs_debugfs_root = NULL;
+ return -ENOMEM;
}
+
+ return 0;
}
void f2fs_destroy_root_stats(void)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 60972a559685..4436079dbf0c 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -17,8 +17,8 @@
static unsigned long dir_blocks(struct inode *inode)
{
- return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1))
- >> PAGE_CACHE_SHIFT;
+ return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1))
+ >> PAGE_SHIFT;
}
static unsigned int dir_buckets(unsigned int level, int dir_level)
@@ -37,7 +37,7 @@ static unsigned int bucket_blocks(unsigned int level)
return 4;
}
-unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
+static unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
[F2FS_FT_UNKNOWN] = DT_UNKNOWN,
[F2FS_FT_REG_FILE] = DT_REG,
[F2FS_FT_DIR] = DT_DIR,
@@ -48,7 +48,6 @@ unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
[F2FS_FT_SYMLINK] = DT_LNK,
};
-#define S_SHIFT 12
static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] = F2FS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = F2FS_FT_DIR,
@@ -64,6 +63,13 @@ void set_de_type(struct f2fs_dir_entry *de, umode_t mode)
de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
}
+unsigned char get_de_type(struct f2fs_dir_entry *de)
+{
+ if (de->file_type < F2FS_FT_MAX)
+ return f2fs_filetype_table[de->file_type];
+ return DT_UNKNOWN;
+}
+
static unsigned long dir_block_index(unsigned int level,
int dir_level, unsigned int idx)
{
@@ -77,7 +83,7 @@ static unsigned long dir_block_index(unsigned int level,
}
static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
- struct f2fs_filename *fname,
+ struct fscrypt_name *fname,
f2fs_hash_t namehash,
int *max_slots,
struct page **res_page)
@@ -95,23 +101,18 @@ static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
else
kunmap(dentry_page);
- /*
- * For the most part, it should be a bug when name_len is zero.
- * We stop here for figuring out where the bugs has occurred.
- */
- f2fs_bug_on(F2FS_P_SB(dentry_page), d.max < 0);
return de;
}
-struct f2fs_dir_entry *find_target_dentry(struct f2fs_filename *fname,
+struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
f2fs_hash_t namehash, int *max_slots,
struct f2fs_dentry_ptr *d)
{
struct f2fs_dir_entry *de;
unsigned long bit_pos = 0;
int max_len = 0;
- struct f2fs_str de_name = FSTR_INIT(NULL, 0);
- struct f2fs_str *name = &fname->disk_name;
+ struct fscrypt_str de_name = FSTR_INIT(NULL, 0);
+ struct fscrypt_str *name = &fname->disk_name;
if (max_slots)
*max_slots = 0;
@@ -124,37 +125,28 @@ struct f2fs_dir_entry *find_target_dentry(struct f2fs_filename *fname,
de = &d->dentry[bit_pos];
- if (de->hash_code != namehash)
- goto not_match;
+ if (unlikely(!de->name_len)) {
+ bit_pos++;
+ continue;
+ }
+ /* encrypted case */
de_name.name = d->filename[bit_pos];
de_name.len = le16_to_cpu(de->name_len);
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- if (unlikely(!name->name)) {
- if (fname->usr_fname->name[0] == '_') {
- if (de_name.len > 32 &&
- !memcmp(de_name.name + ((de_name.len - 17) & ~15),
- fname->crypto_buf.name + 8, 16))
- goto found;
- goto not_match;
- }
- name->name = fname->crypto_buf.name;
- name->len = fname->crypto_buf.len;
- }
-#endif
- if (de_name.len == name->len &&
- !memcmp(de_name.name, name->name, name->len))
+ /* show encrypted name */
+ if (fname->hash) {
+ if (de->hash_code == cpu_to_le32(fname->hash))
+ goto found;
+ } else if (de_name.len == name->len &&
+ de->hash_code == namehash &&
+ !memcmp(de_name.name, name->name, name->len))
goto found;
-not_match:
+
if (max_slots && max_len > *max_slots)
*max_slots = max_len;
max_len = 0;
- /* remain bug on condition */
- if (unlikely(!de->name_len))
- d->max = -1;
-
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
}
@@ -167,7 +159,7 @@ found:
static struct f2fs_dir_entry *find_in_level(struct inode *dir,
unsigned int level,
- struct f2fs_filename *fname,
+ struct fscrypt_name *fname,
struct page **res_page)
{
struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
@@ -180,9 +172,10 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
int max_slots;
f2fs_hash_t namehash;
- namehash = f2fs_dentry_hash(&name, fname);
-
- f2fs_bug_on(F2FS_I_SB(dir), level > MAX_DIR_HASH_DEPTH);
+ if(fname->hash)
+ namehash = cpu_to_le32(fname->hash);
+ else
+ namehash = f2fs_dentry_hash(&name);
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
@@ -195,8 +188,13 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
/* no need to allocate new dentry pages to all the indices */
dentry_page = find_data_page(dir, bidx);
if (IS_ERR(dentry_page)) {
- room = true;
- continue;
+ if (PTR_ERR(dentry_page) == -ENOENT) {
+ room = true;
+ continue;
+ } else {
+ *res_page = dentry_page;
+ break;
+ }
}
de = find_in_block(dentry_page, fname, namehash, &max_slots,
@@ -217,79 +215,87 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
return de;
}
-/*
- * Find an entry in the specified directory with the wanted name.
- * It returns the page where the entry was found (as a parameter - res_page),
- * and the entry itself. Page is returned mapped and unlocked.
- * Entry is guaranteed to be valid.
- */
-struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
- struct qstr *child, struct page **res_page)
+struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+ struct fscrypt_name *fname, struct page **res_page)
{
unsigned long npages = dir_blocks(dir);
struct f2fs_dir_entry *de = NULL;
unsigned int max_depth;
unsigned int level;
- struct f2fs_filename fname;
- int err;
-
- *res_page = NULL;
-
- err = f2fs_fname_setup_filename(dir, child, 1, &fname);
- if (err)
- return NULL;
if (f2fs_has_inline_dentry(dir)) {
- de = find_in_inline_dir(dir, &fname, res_page);
+ *res_page = NULL;
+ de = find_in_inline_dir(dir, fname, res_page);
goto out;
}
- if (npages == 0)
+ if (npages == 0) {
+ *res_page = NULL;
goto out;
+ }
max_depth = F2FS_I(dir)->i_current_depth;
+ if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) {
+ f2fs_msg(F2FS_I_SB(dir)->sb, KERN_WARNING,
+ "Corrupted max_depth of %lu: %u",
+ dir->i_ino, max_depth);
+ max_depth = MAX_DIR_HASH_DEPTH;
+ f2fs_i_depth_write(dir, max_depth);
+ }
for (level = 0; level < max_depth; level++) {
- de = find_in_level(dir, level, &fname, res_page);
- if (de)
+ *res_page = NULL;
+ de = find_in_level(dir, level, fname, res_page);
+ if (de || IS_ERR(*res_page))
break;
}
out:
- f2fs_fname_free_filename(&fname);
return de;
}
-struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
+/*
+ * Find an entry in the specified directory with the wanted name.
+ * It returns the page where the entry was found (as a parameter - res_page),
+ * and the entry itself. Page is returned mapped and unlocked.
+ * Entry is guaranteed to be valid.
+ */
+struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
+ const struct qstr *child, struct page **res_page)
{
- struct page *page;
- struct f2fs_dir_entry *de;
- struct f2fs_dentry_block *dentry_blk;
-
- if (f2fs_has_inline_dentry(dir))
- return f2fs_parent_inline_dir(dir, p);
+ struct f2fs_dir_entry *de = NULL;
+ struct fscrypt_name fname;
+ int err;
- page = get_lock_data_page(dir, 0, false);
- if (IS_ERR(page))
+ err = fscrypt_setup_filename(dir, child, 1, &fname);
+ if (err) {
+ *res_page = ERR_PTR(err);
return NULL;
+ }
+
+ de = __f2fs_find_entry(dir, &fname, res_page);
- dentry_blk = kmap(page);
- de = &dentry_blk->dentry[1];
- *p = page;
- unlock_page(page);
+ fscrypt_free_filename(&fname);
return de;
}
-ino_t f2fs_inode_by_name(struct inode *dir, struct qstr *qstr)
+struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
+{
+ struct qstr dotdot = QSTR_INIT("..", 2);
+
+ return f2fs_find_entry(dir, &dotdot, p);
+}
+
+ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
+ struct page **page)
{
ino_t res = 0;
struct f2fs_dir_entry *de;
- struct page *page;
- de = f2fs_find_entry(dir, qstr, &page);
+ de = f2fs_find_entry(dir, qstr, page);
if (de) {
res = le32_to_cpu(de->ino);
- f2fs_dentry_kunmap(dir, page);
- f2fs_put_page(page, 0);
+ f2fs_dentry_kunmap(dir, *page);
+ f2fs_put_page(*page, 0);
}
return res;
@@ -300,14 +306,14 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
{
enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
lock_page(page);
- f2fs_wait_on_page_writeback(page, type);
+ f2fs_wait_on_page_writeback(page, type, true);
de->ino = cpu_to_le32(inode->i_ino);
set_de_type(de, inode->i_mode);
f2fs_dentry_kunmap(dir, page);
set_page_dirty(page);
- dir->i_mtime = dir->i_ctime = CURRENT_TIME;
- mark_inode_dirty(dir);
+ dir->i_mtime = dir->i_ctime = current_time(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
f2fs_put_page(page, 1);
}
@@ -315,7 +321,7 @@ static void init_dent_inode(const struct qstr *name, struct page *ipage)
{
struct f2fs_inode *ri;
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
/* copy name info. to this inode page */
ri = F2FS_INODE(ipage);
@@ -345,24 +351,14 @@ int update_dent_inode(struct inode *inode, struct inode *to,
void do_make_empty_dir(struct inode *inode, struct inode *parent,
struct f2fs_dentry_ptr *d)
{
- struct f2fs_dir_entry *de;
+ struct qstr dot = QSTR_INIT(".", 1);
+ struct qstr dotdot = QSTR_INIT("..", 2);
- de = &d->dentry[0];
- de->name_len = cpu_to_le16(1);
- de->hash_code = 0;
- de->ino = cpu_to_le32(inode->i_ino);
- memcpy(d->filename[0], ".", 1);
- set_de_type(de, inode->i_mode);
+ /* update dirent of "." */
+ f2fs_update_dentry(inode->i_ino, inode->i_mode, d, &dot, 0, 0);
- de = &d->dentry[1];
- de->hash_code = 0;
- de->name_len = cpu_to_le16(2);
- de->ino = cpu_to_le32(parent->i_ino);
- memcpy(d->filename[1], "..", 2);
- set_de_type(de, parent->i_mode);
-
- test_and_set_bit_le(0, (void *)d->bitmap);
- test_and_set_bit_le(1, (void *)d->bitmap);
+ /* update dirent of ".." */
+ f2fs_update_dentry(parent->i_ino, parent->i_mode, d, &dotdot, 0, 1);
}
static int make_empty_dir(struct inode *inode,
@@ -392,32 +388,38 @@ static int make_empty_dir(struct inode *inode,
}
struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
- const struct qstr *name, struct page *dpage)
+ const struct qstr *new_name, const struct qstr *orig_name,
+ struct page *dpage)
{
struct page *page;
int err;
- if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
+ if (is_inode_flag_set(inode, FI_NEW_INODE)) {
page = new_inode_page(inode);
if (IS_ERR(page))
return page;
if (S_ISDIR(inode->i_mode)) {
+ /* in order to handle error case */
+ get_page(page);
err = make_empty_dir(inode, dir, page);
- if (err)
- goto error;
+ if (err) {
+ lock_page(page);
+ goto put_error;
+ }
+ put_page(page);
}
err = f2fs_init_acl(inode, dir, page, dpage);
if (err)
goto put_error;
- err = f2fs_init_security(inode, dir, name, page);
+ err = f2fs_init_security(inode, dir, orig_name, page);
if (err)
goto put_error;
if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode)) {
- err = f2fs_inherit_context(dir, inode, page);
+ err = fscrypt_inherit_context(dir, inode, page, false);
if (err)
goto put_error;
}
@@ -429,14 +431,14 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
set_cold_node(inode, page);
}
- if (name)
- init_dent_inode(name, page);
+ if (new_name)
+ init_dent_inode(new_name, page);
/*
* This file should be checkpointed during fsync.
* We lost i_pino from now on.
*/
- if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) {
+ if (is_inode_flag_set(inode, FI_INC_LINK)) {
file_lost_pino(inode);
/*
* If link the tmpfile to alias through linkat path,
@@ -444,41 +446,33 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
*/
if (inode->i_nlink == 0)
remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino);
- inc_nlink(inode);
+ f2fs_i_links_write(inode, true);
}
return page;
put_error:
+ clear_nlink(inode);
+ update_inode(inode, page);
f2fs_put_page(page, 1);
-error:
- /* once the failed inode becomes a bad inode, i_mode is S_IFREG */
- truncate_inode_pages(&inode->i_data, 0);
- truncate_blocks(inode, 0, false);
- remove_dirty_dir_inode(inode);
- remove_inode_page(inode);
return ERR_PTR(err);
}
void update_parent_metadata(struct inode *dir, struct inode *inode,
unsigned int current_depth)
{
- if (inode && is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
- if (S_ISDIR(inode->i_mode)) {
- inc_nlink(dir);
- set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
- clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
+ if (inode && is_inode_flag_set(inode, FI_NEW_INODE)) {
+ if (S_ISDIR(inode->i_mode))
+ f2fs_i_links_write(dir, true);
+ clear_inode_flag(inode, FI_NEW_INODE);
}
- dir->i_mtime = dir->i_ctime = CURRENT_TIME;
- mark_inode_dirty(dir);
+ dir->i_mtime = dir->i_ctime = current_time(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
- if (F2FS_I(dir)->i_current_depth != current_depth) {
- F2FS_I(dir)->i_current_depth = current_depth;
- set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
+ if (F2FS_I(dir)->i_current_depth != current_depth)
+ f2fs_i_depth_write(dir, current_depth);
- if (inode && is_inode_flag_set(F2FS_I(inode), FI_INC_LINK))
- clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ if (inode && is_inode_flag_set(inode, FI_INC_LINK))
+ clear_inode_flag(inode, FI_INC_LINK);
}
int room_for_filename(const void *bitmap, int slots, int max_slots)
@@ -515,15 +509,16 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
memcpy(d->filename[bit_pos], name->name, name->len);
de->ino = cpu_to_le32(ino);
set_de_type(de, mode);
- for (i = 0; i < slots; i++)
- test_and_set_bit_le(bit_pos + i, (void *)d->bitmap);
+ for (i = 0; i < slots; i++) {
+ __set_bit_le(bit_pos + i, (void *)d->bitmap);
+ /* avoid wrong garbage data for readdir */
+ if (i)
+ (de + i)->name_len = 0;
+ }
}
-/*
- * Caller should grab and release a rwsem by calling f2fs_lock_op() and
- * f2fs_unlock_op().
- */
-int __f2fs_add_link(struct inode *dir, const struct qstr *name,
+int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
+ const struct qstr *orig_name,
struct inode *inode, nid_t ino, umode_t mode)
{
unsigned int bit_pos;
@@ -536,28 +531,11 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
struct f2fs_dentry_block *dentry_blk = NULL;
struct f2fs_dentry_ptr d;
struct page *page = NULL;
- struct f2fs_filename fname;
- struct qstr new_name;
- int slots, err;
-
- err = f2fs_fname_setup_filename(dir, name, 0, &fname);
- if (err)
- return err;
-
- new_name.name = fname_name(&fname);
- new_name.len = fname_len(&fname);
-
- if (f2fs_has_inline_dentry(dir)) {
- err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode);
- if (!err || err != -EAGAIN)
- goto out;
- else
- err = 0;
- }
+ int slots, err = 0;
level = 0;
- slots = GET_DENTRY_SLOTS(new_name.len);
- dentry_hash = f2fs_dentry_hash(&new_name, NULL);
+ slots = GET_DENTRY_SLOTS(new_name->len);
+ dentry_hash = f2fs_dentry_hash(new_name);
current_depth = F2FS_I(dir)->i_current_depth;
if (F2FS_I(dir)->chash == dentry_hash) {
@@ -566,10 +544,12 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
}
start:
- if (unlikely(current_depth == MAX_DIR_HASH_DEPTH)) {
- err = -ENOSPC;
- goto out;
- }
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH))
+ return -ENOSPC;
+#endif
+ if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
+ return -ENOSPC;
/* Increase the depth, if required */
if (level == current_depth)
@@ -583,10 +563,8 @@ start:
for (block = bidx; block <= (bidx + nblock - 1); block++) {
dentry_page = get_new_data_page(dir, NULL, block, true);
- if (IS_ERR(dentry_page)) {
- err = PTR_ERR(dentry_page);
- goto out;
- }
+ if (IS_ERR(dentry_page))
+ return PTR_ERR(dentry_page);
dentry_blk = kmap(dentry_page);
bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
@@ -602,11 +580,12 @@ start:
++level;
goto start;
add_dentry:
- f2fs_wait_on_page_writeback(dentry_page, DATA);
+ f2fs_wait_on_page_writeback(dentry_page, DATA, true);
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
- page = init_inode_metadata(inode, dir, &new_name, NULL);
+ page = init_inode_metadata(inode, dir, new_name,
+ orig_name, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
@@ -616,14 +595,12 @@ add_dentry:
}
make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
- f2fs_update_dentry(ino, mode, &d, &new_name, dentry_hash, bit_pos);
+ f2fs_update_dentry(ino, mode, &d, new_name, dentry_hash, bit_pos);
set_page_dirty(dentry_page);
if (inode) {
- /* we don't need to mark_inode_dirty now */
- F2FS_I(inode)->i_pino = dir->i_ino;
- update_inode(inode, page);
+ f2fs_i_pino_write(inode, dir->i_ino);
f2fs_put_page(page, 1);
}
@@ -632,14 +609,49 @@ fail:
if (inode)
up_write(&F2FS_I(inode)->i_sem);
- if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
- update_inode_page(dir);
- clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
-out:
- f2fs_fname_free_filename(&fname);
+
+ return err;
+}
+
+int __f2fs_do_add_link(struct inode *dir, struct fscrypt_name *fname,
+ struct inode *inode, nid_t ino, umode_t mode)
+{
+ struct qstr new_name;
+ int err = -EAGAIN;
+
+ new_name.name = fname_name(fname);
+ new_name.len = fname_len(fname);
+
+ if (f2fs_has_inline_dentry(dir))
+ err = f2fs_add_inline_entry(dir, &new_name, fname->usr_fname,
+ inode, ino, mode);
+ if (err == -EAGAIN)
+ err = f2fs_add_regular_entry(dir, &new_name, fname->usr_fname,
+ inode, ino, mode);
+
+ f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
+ return err;
+}
+
+/*
+ * Caller should grab and release a rwsem by calling f2fs_lock_op() and
+ * f2fs_unlock_op().
+ */
+int __f2fs_add_link(struct inode *dir, const struct qstr *name,
+ struct inode *inode, nid_t ino, umode_t mode)
+{
+ struct fscrypt_name fname;
+ int err;
+
+ err = fscrypt_setup_filename(dir, name, 0, &fname);
+ if (err)
+ return err;
+
+ err = __f2fs_do_add_link(dir, &fname, inode, ino, mode);
+
+ fscrypt_free_filename(&fname);
return err;
}
@@ -649,46 +661,39 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
int err = 0;
down_write(&F2FS_I(inode)->i_sem);
- page = init_inode_metadata(inode, dir, NULL, NULL);
+ page = init_inode_metadata(inode, dir, NULL, NULL, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
}
- /* we don't need to mark_inode_dirty now */
- update_inode(inode, page);
f2fs_put_page(page, 1);
- clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
+ clear_inode_flag(inode, FI_NEW_INODE);
fail:
up_write(&F2FS_I(inode)->i_sem);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return err;
}
-void f2fs_drop_nlink(struct inode *dir, struct inode *inode, struct page *page)
+void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
down_write(&F2FS_I(inode)->i_sem);
- if (S_ISDIR(inode->i_mode)) {
- drop_nlink(dir);
- if (page)
- update_inode(dir, page);
- else
- update_inode_page(dir);
- }
- inode->i_ctime = CURRENT_TIME;
+ if (S_ISDIR(inode->i_mode))
+ f2fs_i_links_write(dir, false);
+ inode->i_ctime = current_time(inode);
- drop_nlink(inode);
+ f2fs_i_links_write(inode, false);
if (S_ISDIR(inode->i_mode)) {
- drop_nlink(inode);
- i_size_write(inode, 0);
+ f2fs_i_links_write(inode, false);
+ f2fs_i_size_write(inode, 0);
}
up_write(&F2FS_I(inode)->i_sem);
- update_inode_page(inode);
if (inode->i_nlink == 0)
- add_orphan_inode(sbi, inode->i_ino);
+ add_orphan_inode(inode);
else
release_orphan_inode(sbi);
}
@@ -705,11 +710,13 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
int i;
+ f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
+
if (f2fs_has_inline_dentry(dir))
return f2fs_delete_inline_entry(dentry, page, dir, inode);
lock_page(page);
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page, DATA, true);
dentry_blk = page_address(page);
bit_pos = dentry - dentry_blk->dentry;
@@ -723,10 +730,11 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
kunmap(page); /* kunmap - pair of f2fs_find_entry */
set_page_dirty(page);
- dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+ dir->i_ctime = dir->i_mtime = current_time(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
- f2fs_drop_nlink(dir, inode, NULL);
+ f2fs_drop_nlink(dir, inode);
if (bit_pos == NR_DENTRY_IN_BLOCK &&
!truncate_hole(dir, page->index, page->index + 1)) {
@@ -734,6 +742,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
ClearPagePrivate(page);
ClearPageUptodate(page);
inode_dec_dirty_pages(dir);
+ remove_dirty_inode(dir);
}
f2fs_put_page(page, 1);
}
@@ -776,13 +785,13 @@ bool f2fs_empty_dir(struct inode *dir)
return true;
}
-bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
- unsigned int start_pos, struct f2fs_str *fstr)
+int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
+ unsigned int start_pos, struct fscrypt_str *fstr)
{
unsigned char d_type = DT_UNKNOWN;
unsigned int bit_pos;
struct f2fs_dir_entry *de = NULL;
- struct f2fs_str de_name = FSTR_INIT(NULL, 0);
+ struct fscrypt_str de_name = FSTR_INIT(NULL, 0);
bit_pos = ((unsigned long)ctx->pos % d->max);
@@ -792,29 +801,26 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
break;
de = &d->dentry[bit_pos];
- if (de->file_type < F2FS_FT_MAX)
- d_type = f2fs_filetype_table[de->file_type];
- else
- d_type = DT_UNKNOWN;
+ if (de->name_len == 0) {
+ bit_pos++;
+ ctx->pos = start_pos + bit_pos;
+ continue;
+ }
+
+ d_type = get_de_type(de);
de_name.name = d->filename[bit_pos];
de_name.len = le16_to_cpu(de->name_len);
if (f2fs_encrypted_inode(d->inode)) {
int save_len = fstr->len;
- int ret;
+ int err;
- de_name.name = kmalloc(de_name.len, GFP_NOFS);
- if (!de_name.name)
- return false;
-
- memcpy(de_name.name, d->filename[bit_pos], de_name.len);
-
- ret = f2fs_fname_disk_to_usr(d->inode, &de->hash_code,
- &de_name, fstr);
- kfree(de_name.name);
- if (ret < 0)
- return true;
+ err = fscrypt_fname_disk_to_usr(d->inode,
+ (u32)de->hash_code, 0,
+ &de_name, fstr);
+ if (err)
+ return err;
de_name = *fstr;
fstr->len = save_len;
@@ -822,12 +828,12 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
if (!dir_emit(ctx, de_name.name, de_name.len,
le32_to_cpu(de->ino), d_type))
- return true;
+ return 1;
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
ctx->pos = start_pos + bit_pos;
}
- return false;
+ return 0;
}
static int f2fs_readdir(struct file *file, struct dir_context *ctx)
@@ -839,16 +845,15 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
struct file_ra_state *ra = &file->f_ra;
unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK);
struct f2fs_dentry_ptr d;
- struct f2fs_str fstr = FSTR_INIT(NULL, 0);
+ struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
int err = 0;
if (f2fs_encrypted_inode(inode)) {
- err = f2fs_get_encryption_info(inode);
- if (err)
+ err = fscrypt_get_encryption_info(inode);
+ if (err && err != -ENOKEY)
return err;
- err = f2fs_fname_crypto_alloc_buffer(inode, F2FS_NAME_LEN,
- &fstr);
+ err = fscrypt_fname_alloc_buffer(inode, F2FS_NAME_LEN, &fstr);
if (err < 0)
return err;
}
@@ -865,29 +870,42 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
for (; n < npages; n++) {
dentry_page = get_lock_data_page(inode, n, false);
- if (IS_ERR(dentry_page))
- continue;
+ if (IS_ERR(dentry_page)) {
+ err = PTR_ERR(dentry_page);
+ if (err == -ENOENT) {
+ err = 0;
+ continue;
+ } else {
+ goto out;
+ }
+ }
dentry_blk = kmap(dentry_page);
make_dentry_ptr(inode, &d, (void *)dentry_blk, 1);
- if (f2fs_fill_dentries(ctx, &d, n * NR_DENTRY_IN_BLOCK, &fstr))
- goto stop;
+ err = f2fs_fill_dentries(ctx, &d,
+ n * NR_DENTRY_IN_BLOCK, &fstr);
+ if (err) {
+ kunmap(dentry_page);
+ f2fs_put_page(dentry_page, 1);
+ break;
+ }
ctx->pos = (n + 1) * NR_DENTRY_IN_BLOCK;
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
- dentry_page = NULL;
- }
-stop:
- if (dentry_page && !IS_ERR(dentry_page)) {
- kunmap(dentry_page);
- f2fs_put_page(dentry_page, 1);
}
out:
- f2fs_fname_crypto_free_buffer(&fstr);
- return err;
+ fscrypt_fname_free_buffer(&fstr);
+ return err < 0 ? err : 0;
+}
+
+static int f2fs_dir_open(struct inode *inode, struct file *filp)
+{
+ if (f2fs_encrypted_inode(inode))
+ return fscrypt_get_encryption_info(inode) ? -EACCES : 0;
+ return 0;
}
const struct file_operations f2fs_dir_operations = {
@@ -895,6 +913,7 @@ const struct file_operations f2fs_dir_operations = {
.read = generic_read_dir,
.iterate = f2fs_readdir,
.fsync = f2fs_sync_file,
+ .open = f2fs_dir_open,
.unlocked_ioctl = f2fs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = f2fs_compat_ioctl,
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 7ddba812e11b..4db44da7ef69 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -33,10 +33,11 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
en->ei = *ei;
INIT_LIST_HEAD(&en->list);
+ en->et = et;
rb_link_node(&en->rb_node, parent, p);
rb_insert_color(&en->rb_node, &et->root);
- et->count++;
+ atomic_inc(&et->node_cnt);
atomic_inc(&sbi->total_ext_node);
return en;
}
@@ -45,11 +46,29 @@ static void __detach_extent_node(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_node *en)
{
rb_erase(&en->rb_node, &et->root);
- et->count--;
+ atomic_dec(&et->node_cnt);
atomic_dec(&sbi->total_ext_node);
if (et->cached_en == en)
et->cached_en = NULL;
+ kmem_cache_free(extent_node_slab, en);
+}
+
+/*
+ * Flow to release an extent_node:
+ * 1. list_del_init
+ * 2. __detach_extent_node
+ * 3. kmem_cache_free.
+ */
+static void __release_extent_node(struct f2fs_sb_info *sbi,
+ struct extent_tree *et, struct extent_node *en)
+{
+ spin_lock(&sbi->extent_lock);
+ f2fs_bug_on(sbi, list_empty(&en->list));
+ list_del_init(&en->list);
+ spin_unlock(&sbi->extent_lock);
+
+ __detach_extent_node(sbi, et, en);
}
static struct extent_tree *__grab_extent_tree(struct inode *inode)
@@ -68,11 +87,13 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
et->root = RB_ROOT;
et->cached_en = NULL;
rwlock_init(&et->lock);
- atomic_set(&et->refcount, 0);
- et->count = 0;
- sbi->total_ext_tree++;
+ INIT_LIST_HEAD(&et->list);
+ atomic_set(&et->node_cnt, 0);
+ atomic_inc(&sbi->total_ext_tree);
+ } else {
+ atomic_dec(&sbi->total_zombie_tree);
+ list_del_init(&et->list);
}
- atomic_inc(&et->refcount);
up_write(&sbi->extent_tree_lock);
/* never died until evict_inode */
@@ -127,32 +148,21 @@ static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
}
static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
- struct extent_tree *et, bool free_all)
+ struct extent_tree *et)
{
struct rb_node *node, *next;
struct extent_node *en;
- unsigned int count = et->count;
+ unsigned int count = atomic_read(&et->node_cnt);
node = rb_first(&et->root);
while (node) {
next = rb_next(node);
en = rb_entry(node, struct extent_node, rb_node);
-
- if (free_all) {
- spin_lock(&sbi->extent_lock);
- if (!list_empty(&en->list))
- list_del_init(&en->list);
- spin_unlock(&sbi->extent_lock);
- }
-
- if (free_all || list_empty(&en->list)) {
- __detach_extent_node(sbi, et, en);
- kmem_cache_free(extent_node_slab, en);
- }
+ __release_extent_node(sbi, et, en);
node = next;
}
- return count - et->count;
+ return count - atomic_read(&et->node_cnt);
}
static void __drop_largest_extent(struct inode *inode,
@@ -160,38 +170,38 @@ static void __drop_largest_extent(struct inode *inode,
{
struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
- if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs)
+ if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) {
largest->len = 0;
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
}
-void f2fs_drop_largest_extent(struct inode *inode, pgoff_t fofs)
-{
- if (!f2fs_may_extent_tree(inode))
- return;
-
- __drop_largest_extent(inode, fofs, 1);
-}
-
-void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
+/* return true, if inode page is changed */
+bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et;
struct extent_node *en;
struct extent_info ei;
- if (!f2fs_may_extent_tree(inode))
- return;
+ if (!f2fs_may_extent_tree(inode)) {
+ /* drop largest extent */
+ if (i_ext && i_ext->len) {
+ i_ext->len = 0;
+ return true;
+ }
+ return false;
+ }
et = __grab_extent_tree(inode);
- if (!i_ext || le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
- return;
+ if (!i_ext || !i_ext->len)
+ return false;
- set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
- le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
+ get_extent_info(&ei, i_ext);
write_lock(&et->lock);
- if (et->count)
+ if (atomic_read(&et->node_cnt))
goto out;
en = __init_extent_tree(sbi, et, &ei);
@@ -202,6 +212,7 @@ void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
}
out:
write_unlock(&et->lock);
+ return false;
}
static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
@@ -230,9 +241,10 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
if (en) {
*ei = en->ei;
spin_lock(&sbi->extent_lock);
- if (!list_empty(&en->list))
+ if (!list_empty(&en->list)) {
list_move_tail(&en->list, &sbi->extent_list);
- et->cached_en = en;
+ et->cached_en = en;
+ }
spin_unlock(&sbi->extent_lock);
ret = true;
}
@@ -325,12 +337,12 @@ lookup_neighbors:
return en;
}
-static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
+static struct extent_node *__try_merge_extent_node(struct inode *inode,
struct extent_tree *et, struct extent_info *ei,
- struct extent_node **den,
struct extent_node *prev_ex,
struct extent_node *next_ex)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_node *en = NULL;
if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
@@ -340,28 +352,34 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
}
if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
- if (en) {
- __detach_extent_node(sbi, et, prev_ex);
- *den = prev_ex;
- }
+ if (en)
+ __release_extent_node(sbi, et, prev_ex);
next_ex->ei.fofs = ei->fofs;
next_ex->ei.blk = ei->blk;
next_ex->ei.len += ei->len;
en = next_ex;
}
- if (en) {
- __try_update_largest_extent(et, en);
+ if (!en)
+ return NULL;
+
+ __try_update_largest_extent(inode, et, en);
+
+ spin_lock(&sbi->extent_lock);
+ if (!list_empty(&en->list)) {
+ list_move_tail(&en->list, &sbi->extent_list);
et->cached_en = en;
}
+ spin_unlock(&sbi->extent_lock);
return en;
}
-static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
+static struct extent_node *__insert_extent_tree(struct inode *inode,
struct extent_tree *et, struct extent_info *ei,
struct rb_node **insert_p,
struct rb_node *insert_parent)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct rb_node **p = &et->root.rb_node;
struct rb_node *parent = NULL;
struct extent_node *en = NULL;
@@ -388,8 +406,13 @@ do_insert:
if (!en)
return NULL;
- __try_update_largest_extent(et, en);
+ __try_update_largest_extent(inode, et, en);
+
+ /* update in global extent list */
+ spin_lock(&sbi->extent_lock);
+ list_add_tail(&en->list, &sbi->extent_list);
et->cached_en = en;
+ spin_unlock(&sbi->extent_lock);
return en;
}
@@ -412,7 +435,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
write_lock(&et->lock);
- if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) {
+ if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
write_unlock(&et->lock);
return false;
}
@@ -454,7 +477,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
set_extent_info(&ei, end,
end - dei.fofs + dei.blk,
org_end - end);
- en1 = __insert_extent_tree(sbi, et, &ei,
+ en1 = __insert_extent_tree(inode, et, &ei,
NULL, NULL);
next_en = en1;
} else {
@@ -475,9 +498,9 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
}
if (parts)
- __try_update_largest_extent(et, en);
+ __try_update_largest_extent(inode, et, en);
else
- __detach_extent_node(sbi, et, en);
+ __release_extent_node(sbi, et, en);
/*
* if original extent is split into zero or two parts, extent
@@ -488,58 +511,28 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
insert_p = NULL;
insert_parent = NULL;
}
-
- /* update in global extent list */
- spin_lock(&sbi->extent_lock);
- if (!parts && !list_empty(&en->list))
- list_del(&en->list);
- if (en1)
- list_add_tail(&en1->list, &sbi->extent_list);
- spin_unlock(&sbi->extent_lock);
-
- /* release extent node */
- if (!parts)
- kmem_cache_free(extent_node_slab, en);
-
en = next_en;
}
/* 3. update extent in extent cache */
if (blkaddr) {
- struct extent_node *den = NULL;
set_extent_info(&ei, fofs, blkaddr, len);
- en1 = __try_merge_extent_node(sbi, et, &ei, &den,
- prev_en, next_en);
- if (!en1)
- en1 = __insert_extent_tree(sbi, et, &ei,
+ if (!__try_merge_extent_node(inode, et, &ei, prev_en, next_en))
+ __insert_extent_tree(inode, et, &ei,
insert_p, insert_parent);
/* give up extent_cache, if split and small updates happen */
if (dei.len >= 1 &&
prev.len < F2FS_MIN_EXTENT_LEN &&
et->largest.len < F2FS_MIN_EXTENT_LEN) {
- et->largest.len = 0;
- set_inode_flag(F2FS_I(inode), FI_NO_EXTENT);
- }
-
- spin_lock(&sbi->extent_lock);
- if (en1) {
- if (list_empty(&en1->list))
- list_add_tail(&en1->list, &sbi->extent_list);
- else
- list_move_tail(&en1->list, &sbi->extent_list);
+ __drop_largest_extent(inode, 0, UINT_MAX);
+ set_inode_flag(inode, FI_NO_EXTENT);
}
- if (den && !list_empty(&den->list))
- list_del(&den->list);
- spin_unlock(&sbi->extent_lock);
-
- if (den)
- kmem_cache_free(extent_node_slab, den);
}
- if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
- __free_extent_tree(sbi, et, true);
+ if (is_inode_flag_set(inode, FI_NO_EXTENT))
+ __free_extent_tree(sbi, et);
write_unlock(&et->lock);
@@ -548,46 +541,42 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
{
- struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
- struct extent_node *en, *tmp;
- unsigned long ino = F2FS_ROOT_INO(sbi);
- struct radix_tree_root *root = &sbi->extent_tree_root;
- unsigned int found;
+ struct extent_tree *et, *next;
+ struct extent_node *en;
unsigned int node_cnt = 0, tree_cnt = 0;
int remained;
if (!test_opt(sbi, EXTENT_CACHE))
return 0;
+ if (!atomic_read(&sbi->total_zombie_tree))
+ goto free_node;
+
if (!down_write_trylock(&sbi->extent_tree_lock))
goto out;
/* 1. remove unreferenced extent tree */
- while ((found = radix_tree_gang_lookup(root,
- (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
- unsigned i;
-
- ino = treevec[found - 1]->ino + 1;
- for (i = 0; i < found; i++) {
- struct extent_tree *et = treevec[i];
-
- if (!atomic_read(&et->refcount)) {
- write_lock(&et->lock);
- node_cnt += __free_extent_tree(sbi, et, true);
- write_unlock(&et->lock);
-
- radix_tree_delete(root, et->ino);
- kmem_cache_free(extent_tree_slab, et);
- sbi->total_ext_tree--;
- tree_cnt++;
-
- if (node_cnt + tree_cnt >= nr_shrink)
- goto unlock_out;
- }
+ list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
+ if (atomic_read(&et->node_cnt)) {
+ write_lock(&et->lock);
+ node_cnt += __free_extent_tree(sbi, et);
+ write_unlock(&et->lock);
}
+ f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
+ list_del_init(&et->list);
+ radix_tree_delete(&sbi->extent_tree_root, et->ino);
+ kmem_cache_free(extent_tree_slab, et);
+ atomic_dec(&sbi->total_ext_tree);
+ atomic_dec(&sbi->total_zombie_tree);
+ tree_cnt++;
+
+ if (node_cnt + tree_cnt >= nr_shrink)
+ goto unlock_out;
+ cond_resched();
}
up_write(&sbi->extent_tree_lock);
+free_node:
/* 2. remove LRU extent entries */
if (!down_write_trylock(&sbi->extent_tree_lock))
goto out;
@@ -595,34 +584,29 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
remained = nr_shrink - (node_cnt + tree_cnt);
spin_lock(&sbi->extent_lock);
- list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
- if (!remained--)
+ for (; remained > 0; remained--) {
+ if (list_empty(&sbi->extent_list))
break;
- list_del_init(&en->list);
- }
- spin_unlock(&sbi->extent_lock);
-
- /*
- * reset ino for searching victims from beginning of global extent tree.
- */
- ino = F2FS_ROOT_INO(sbi);
-
- while ((found = radix_tree_gang_lookup(root,
- (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
- unsigned i;
+ en = list_first_entry(&sbi->extent_list,
+ struct extent_node, list);
+ et = en->et;
+ if (!write_trylock(&et->lock)) {
+ /* refresh this extent node's position in extent list */
+ list_move_tail(&en->list, &sbi->extent_list);
+ continue;
+ }
- ino = treevec[found - 1]->ino + 1;
- for (i = 0; i < found; i++) {
- struct extent_tree *et = treevec[i];
+ list_del_init(&en->list);
+ spin_unlock(&sbi->extent_lock);
- write_lock(&et->lock);
- node_cnt += __free_extent_tree(sbi, et, false);
- write_unlock(&et->lock);
+ __detach_extent_node(sbi, et, en);
- if (node_cnt + tree_cnt >= nr_shrink)
- goto unlock_out;
- }
+ write_unlock(&et->lock);
+ node_cnt++;
+ spin_lock(&sbi->extent_lock);
}
+ spin_unlock(&sbi->extent_lock);
+
unlock_out:
up_write(&sbi->extent_tree_lock);
out:
@@ -637,16 +621,29 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode)
struct extent_tree *et = F2FS_I(inode)->extent_tree;
unsigned int node_cnt = 0;
- if (!et)
+ if (!et || !atomic_read(&et->node_cnt))
return 0;
write_lock(&et->lock);
- node_cnt = __free_extent_tree(sbi, et, true);
+ node_cnt = __free_extent_tree(sbi, et);
write_unlock(&et->lock);
return node_cnt;
}
+void f2fs_drop_extent_tree(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct extent_tree *et = F2FS_I(inode)->extent_tree;
+
+ set_inode_flag(inode, FI_NO_EXTENT);
+
+ write_lock(&et->lock);
+ __free_extent_tree(sbi, et);
+ __drop_largest_extent(inode, 0, UINT_MAX);
+ write_unlock(&et->lock);
+}
+
void f2fs_destroy_extent_tree(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -656,8 +653,12 @@ void f2fs_destroy_extent_tree(struct inode *inode)
if (!et)
return;
- if (inode->i_nlink && !is_bad_inode(inode) && et->count) {
- atomic_dec(&et->refcount);
+ if (inode->i_nlink && !is_bad_inode(inode) &&
+ atomic_read(&et->node_cnt)) {
+ down_write(&sbi->extent_tree_lock);
+ list_add_tail(&et->list, &sbi->zombie_list);
+ atomic_inc(&sbi->total_zombie_tree);
+ up_write(&sbi->extent_tree_lock);
return;
}
@@ -666,11 +667,10 @@ void f2fs_destroy_extent_tree(struct inode *inode)
/* delete extent tree entry in radix tree */
down_write(&sbi->extent_tree_lock);
- atomic_dec(&et->refcount);
- f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
+ f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
kmem_cache_free(extent_tree_slab, et);
- sbi->total_ext_tree--;
+ atomic_dec(&sbi->total_ext_tree);
up_write(&sbi->extent_tree_lock);
F2FS_I(inode)->extent_tree = NULL;
@@ -689,20 +689,20 @@ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
void f2fs_update_extent_cache(struct dnode_of_data *dn)
{
- struct f2fs_inode_info *fi = F2FS_I(dn->inode);
pgoff_t fofs;
+ block_t blkaddr;
if (!f2fs_may_extent_tree(dn->inode))
return;
- f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
-
-
- fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
- dn->ofs_in_node;
+ if (dn->data_blkaddr == NEW_ADDR)
+ blkaddr = NULL_ADDR;
+ else
+ blkaddr = dn->data_blkaddr;
- if (f2fs_update_extent_tree_range(dn->inode, fofs, dn->data_blkaddr, 1))
- sync_inode_page(dn);
+ fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
+ dn->ofs_in_node;
+ f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
}
void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
@@ -712,8 +712,7 @@ void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
if (!f2fs_may_extent_tree(dn->inode))
return;
- if (f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len))
- sync_inode_page(dn);
+ f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
}
void init_extent_cache_info(struct f2fs_sb_info *sbi)
@@ -722,7 +721,9 @@ void init_extent_cache_info(struct f2fs_sb_info *sbi)
init_rwsem(&sbi->extent_tree_lock);
INIT_LIST_HEAD(&sbi->extent_list);
spin_lock_init(&sbi->extent_lock);
- sbi->total_ext_tree = 0;
+ atomic_set(&sbi->total_ext_tree, 0);
+ INIT_LIST_HEAD(&sbi->zombie_list);
+ atomic_set(&sbi->total_zombie_tree, 0);
atomic_set(&sbi->total_ext_node, 0);
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 9dfbfe6dc775..eb01a0329e90 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -21,10 +21,12 @@
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/fscrypto.h>
+#include <crypto/hash.h>
#ifdef CONFIG_F2FS_CHECK_FS
#define f2fs_bug_on(sbi, condition) BUG_ON(condition)
-#define f2fs_down_write(x, y) down_write_nest_lock(x, y)
#else
#define f2fs_bug_on(sbi, condition) \
do { \
@@ -33,7 +35,30 @@
set_sbi_flag(sbi, SBI_NEED_FSCK); \
} \
} while (0)
-#define f2fs_down_write(x, y) down_write(x)
+#endif
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+enum {
+ FAULT_KMALLOC,
+ FAULT_PAGE_ALLOC,
+ FAULT_ALLOC_NID,
+ FAULT_ORPHAN,
+ FAULT_BLOCK,
+ FAULT_DIR_DEPTH,
+ FAULT_EVICT_INODE,
+ FAULT_IO,
+ FAULT_CHECKPOINT,
+ FAULT_MAX,
+};
+
+struct f2fs_fault_info {
+ atomic_t inject_ops;
+ unsigned int inject_rate;
+ unsigned int inject_type;
+};
+
+extern char *fault_name[FAULT_MAX];
+#define IS_FAULT_SET(fi, type) (fi->inject_type & (1 << (type)))
#endif
/*
@@ -54,6 +79,10 @@
#define F2FS_MOUNT_FASTBOOT 0x00001000
#define F2FS_MOUNT_EXTENT_CACHE 0x00002000
#define F2FS_MOUNT_FORCE_FG_GC 0x00004000
+#define F2FS_MOUNT_DATA_FLUSH 0x00008000
+#define F2FS_MOUNT_FAULT_INJECTION 0x00010000
+#define F2FS_MOUNT_ADAPTIVE 0x00020000
+#define F2FS_MOUNT_LFS 0x00040000
#define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
#define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
@@ -74,6 +103,7 @@ struct f2fs_mount_info {
};
#define F2FS_FEATURE_ENCRYPT 0x0001
+#define F2FS_FEATURE_BLKZONED 0x0002
#define F2FS_HAS_FEATURE(sb, mask) \
((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -82,25 +112,52 @@ struct f2fs_mount_info {
#define F2FS_CLEAR_FEATURE(sb, mask) \
F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask)
-#define CRCPOLY_LE 0xedb88320
-
-static inline __u32 f2fs_crc32(void *buf, size_t len)
+/**
+ * wq_has_sleeper - check if there are any waiting processes
+ * @wq: wait queue head
+ *
+ * Returns true if wq has waiting processes
+ *
+ * Please refer to the comment for waitqueue_active.
+ */
+static inline bool wq_has_sleeper(wait_queue_head_t *wq)
{
- unsigned char *p = (unsigned char *)buf;
- __u32 crc = F2FS_SUPER_MAGIC;
- int i;
+ /*
+ * We need to be sure we are in sync with the
+ * add_wait_queue modifications to the wait queue.
+ *
+ * This memory barrier should be paired with one on the
+ * waiting side.
+ */
+ smp_mb();
+ return waitqueue_active(wq);
+}
- while (len--) {
- crc ^= *p++;
- for (i = 0; i < 8; i++)
- crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
- }
- return crc;
+static inline void inode_nohighmem(struct inode *inode)
+{
+ mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
}
-static inline bool f2fs_crc_valid(__u32 blk_crc, void *buf, size_t buf_size)
+/**
+ * current_time - Return FS time
+ * @inode: inode.
+ *
+ * Return the current time truncated to the time granularity supported by
+ * the fs.
+ *
+ * Note that inode and inode->sb cannot be NULL.
+ * Otherwise, the function warns and returns time without truncation.
+ */
+static inline struct timespec current_time(struct inode *inode)
{
- return f2fs_crc32(buf, buf_size) == blk_crc;
+ struct timespec now = current_kernel_time();
+
+ if (unlikely(!inode->i_sb)) {
+ WARN(1, "current_time() called with uninitialized super_block in the inode");
+ return now;
+ }
+
+ return timespec_trunc(now, inode->i_sb->s_time_gran);
}
/*
@@ -119,12 +176,13 @@ enum {
CP_DISCARD,
};
-#define DEF_BATCHED_TRIM_SECTIONS 32
+#define DEF_BATCHED_TRIM_SECTIONS 2
#define BATCHED_TRIM_SEGMENTS(sbi) \
(SM_I(sbi)->trim_sections * (sbi)->segs_per_sec)
#define BATCHED_TRIM_BLOCKS(sbi) \
(BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
#define DEF_CP_INTERVAL 60 /* 60 secs */
+#define DEF_IDLE_INTERVAL 5 /* 5 secs */
struct cp_control {
int reason;
@@ -158,13 +216,7 @@ struct ino_entry {
nid_t ino; /* inode number */
};
-/*
- * for the list of directory inodes or gc inodes.
- * NOTE: there are two slab users for this structure, if we add/modify/delete
- * fields in structure for one of slab users, it may affect fields or size of
- * other one, in this condition, it's better to split both of slab and related
- * data structure.
- */
+/* for the list of inodes to be GCed */
struct inode_entry {
struct list_head list; /* list head */
struct inode *inode; /* vfs inode pointer */
@@ -177,46 +229,52 @@ struct discard_entry {
int len; /* # of consecutive blocks of the discard */
};
+struct bio_entry {
+ struct list_head list;
+ struct bio *bio;
+ struct completion event;
+ int error;
+};
+
/* for the list of fsync inodes, used only during recovery */
struct fsync_inode_entry {
struct list_head list; /* list head */
struct inode *inode; /* vfs inode pointer */
block_t blkaddr; /* block address locating the last fsync */
block_t last_dentry; /* block address locating the last dentry */
- block_t last_inode; /* block address locating the last inode */
};
-#define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats))
-#define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits))
+#define nats_in_cursum(jnl) (le16_to_cpu(jnl->n_nats))
+#define sits_in_cursum(jnl) (le16_to_cpu(jnl->n_sits))
-#define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne)
-#define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid)
-#define sit_in_journal(sum, i) (sum->sit_j.entries[i].se)
-#define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno)
+#define nat_in_journal(jnl, i) (jnl->nat_j.entries[i].ne)
+#define nid_in_journal(jnl, i) (jnl->nat_j.entries[i].nid)
+#define sit_in_journal(jnl, i) (jnl->sit_j.entries[i].se)
+#define segno_in_journal(jnl, i) (jnl->sit_j.entries[i].segno)
-#define MAX_NAT_JENTRIES(sum) (NAT_JOURNAL_ENTRIES - nats_in_cursum(sum))
-#define MAX_SIT_JENTRIES(sum) (SIT_JOURNAL_ENTRIES - sits_in_cursum(sum))
+#define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
+#define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
-static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i)
+static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
{
- int before = nats_in_cursum(rs);
- rs->n_nats = cpu_to_le16(before + i);
+ int before = nats_in_cursum(journal);
+ journal->n_nats = cpu_to_le16(before + i);
return before;
}
-static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
+static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
{
- int before = sits_in_cursum(rs);
- rs->n_sits = cpu_to_le16(before + i);
+ int before = sits_in_cursum(journal);
+ journal->n_sits = cpu_to_le16(before + i);
return before;
}
-static inline bool __has_cursum_space(struct f2fs_summary_block *sum, int size,
- int type)
+static inline bool __has_cursum_space(struct f2fs_journal *journal,
+ int size, int type)
{
if (type == NAT_JOURNAL)
- return size <= MAX_NAT_JENTRIES(sum);
- return size <= MAX_SIT_JENTRIES(sum);
+ return size <= MAX_NAT_JENTRIES(journal);
+ return size <= MAX_SIT_JENTRIES(journal);
}
/*
@@ -234,13 +292,13 @@ static inline bool __has_cursum_space(struct f2fs_summary_block *sum, int size,
#define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5)
#define F2FS_IOC_GARBAGE_COLLECT _IO(F2FS_IOCTL_MAGIC, 6)
#define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7)
+#define F2FS_IOC_DEFRAGMENT _IO(F2FS_IOCTL_MAGIC, 8)
+#define F2FS_IOC_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
+ struct f2fs_move_range)
-#define F2FS_IOC_SET_ENCRYPTION_POLICY \
- _IOR('f', 19, struct f2fs_encryption_policy)
-#define F2FS_IOC_GET_ENCRYPTION_PWSALT \
- _IOW('f', 20, __u8[16])
-#define F2FS_IOC_GET_ENCRYPTION_POLICY \
- _IOW('f', 21, struct f2fs_encryption_policy)
+#define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
+#define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
+#define F2FS_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT
/*
* should be same as XFS_IOC_GOINGDOWN.
@@ -256,33 +314,27 @@ static inline bool __has_cursum_space(struct f2fs_summary_block *sum, int size,
/*
* ioctl commands in 32 bit emulation
*/
-#define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
-#define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
+#define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
+#define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
+#define F2FS_IOC32_GETVERSION FS_IOC32_GETVERSION
#endif
-/*
- * For INODE and NODE manager
- */
-/* for directory operations */
-struct f2fs_str {
- unsigned char *name;
- u32 len;
+struct f2fs_defragment {
+ u64 start;
+ u64 len;
};
-struct f2fs_filename {
- const struct qstr *usr_fname;
- struct f2fs_str disk_name;
- f2fs_hash_t hash;
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- struct f2fs_str crypto_buf;
-#endif
+struct f2fs_move_range {
+ u32 dst_fd; /* destination fd */
+ u64 pos_in; /* start position in src_fd */
+ u64 pos_out; /* start position in dst_fd */
+ u64 len; /* size to move */
};
-#define FSTR_INIT(n, l) { .name = n, .len = l }
-#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
-#define fname_name(p) ((p)->disk_name.name)
-#define fname_len(p) ((p)->disk_name.len)
-
+/*
+ * For INODE and NODE manager
+ */
+/* for directory operations */
struct f2fs_dentry_ptr {
struct inode *inode;
const void *bitmap;
@@ -350,6 +402,7 @@ struct extent_node {
struct rb_node rb_node; /* rb node located in rb-tree */
struct list_head list; /* node in global extent list of sbi */
struct extent_info ei; /* extent info */
+ struct extent_tree *et; /* extent tree pointer */
};
struct extent_tree {
@@ -357,9 +410,9 @@ struct extent_tree {
struct rb_root root; /* root of extent info rb-tree */
struct extent_node *cached_en; /* recently accessed extent node */
struct extent_info largest; /* largested extent info */
+ struct list_head list; /* to be used by sbi->zombie_list */
rwlock_t lock; /* protect extent info rb-tree */
- atomic_t refcount; /* reference count of rb-tree */
- unsigned int count; /* # of extent node in rb-tree*/
+ atomic_t node_cnt; /* # of extent node in rb-tree*/
};
/*
@@ -378,6 +431,7 @@ struct f2fs_map_blocks {
block_t m_lblk;
unsigned int m_len;
unsigned int m_flags;
+ pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
};
/* for flag in get_data_block */
@@ -385,6 +439,8 @@ struct f2fs_map_blocks {
#define F2FS_GET_BLOCK_DIO 1
#define F2FS_GET_BLOCK_FIEMAP 2
#define F2FS_GET_BLOCK_BMAP 3
+#define F2FS_GET_BLOCK_PRE_DIO 4
+#define F2FS_GET_BLOCK_PRE_AIO 5
/*
* i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
@@ -393,6 +449,7 @@ struct f2fs_map_blocks {
#define FADVISE_LOST_PINO_BIT 0x02
#define FADVISE_ENCRYPT_BIT 0x04
#define FADVISE_ENC_NAME_BIT 0x08
+#define FADVISE_KEEP_SIZE_BIT 0x10
#define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
@@ -405,15 +462,8 @@ struct f2fs_map_blocks {
#define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT)
#define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT)
#define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
-
-/* Encryption algorithms */
-#define F2FS_ENCRYPTION_MODE_INVALID 0
-#define F2FS_ENCRYPTION_MODE_AES_256_XTS 1
-#define F2FS_ENCRYPTION_MODE_AES_256_GCM 2
-#define F2FS_ENCRYPTION_MODE_AES_256_CBC 3
-#define F2FS_ENCRYPTION_MODE_AES_256_CTS 4
-
-#include "f2fs_crypto.h"
+#define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT)
+#define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
#define DEF_DIR_LEVEL 0
@@ -434,25 +484,22 @@ struct f2fs_inode_info {
unsigned int clevel; /* maximum level of given file name */
nid_t i_xattr_nid; /* node id that contains xattrs */
unsigned long long xattr_ver; /* cp version of xattr modification */
- struct inode_entry *dirty_dir; /* the pointer of dirty dir */
+ loff_t last_disk_size; /* lastly written file size */
+ struct list_head dirty_list; /* dirty list for dirs and files */
+ struct list_head gdirty_list; /* linked in global dirty list */
struct list_head inmem_pages; /* inmemory pages managed by f2fs */
struct mutex inmem_lock; /* lock for inmemory pages */
-
struct extent_tree *extent_tree; /* cached extent_tree entry */
-
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- /* Encryption params */
- struct f2fs_crypt_info *i_crypt_info;
-#endif
+ struct rw_semaphore dio_rwsem[2];/* avoid racing between dio and gc */
};
static inline void get_extent_info(struct extent_info *ext,
- struct f2fs_extent i_ext)
+ struct f2fs_extent *i_ext)
{
- ext->fofs = le32_to_cpu(i_ext.fofs);
- ext->blk = le32_to_cpu(i_ext.blk);
- ext->len = le32_to_cpu(i_ext.len);
+ ext->fofs = le32_to_cpu(i_ext->fofs);
+ ext->blk = le32_to_cpu(i_ext->blk);
+ ext->len = le32_to_cpu(i_ext->len);
}
static inline void set_raw_extent(struct extent_info *ext,
@@ -497,20 +544,30 @@ static inline bool __is_front_mergeable(struct extent_info *cur,
return __is_extent_mergeable(cur, front);
}
-static inline void __try_update_largest_extent(struct extent_tree *et,
- struct extent_node *en)
+extern void f2fs_mark_inode_dirty_sync(struct inode *, bool);
+static inline void __try_update_largest_extent(struct inode *inode,
+ struct extent_tree *et, struct extent_node *en)
{
- if (en->ei.len > et->largest.len)
+ if (en->ei.len > et->largest.len) {
et->largest = en->ei;
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
}
+enum nid_list {
+ FREE_NID_LIST,
+ ALLOC_NID_LIST,
+ MAX_NID_LIST,
+};
+
struct f2fs_nm_info {
block_t nat_blkaddr; /* base disk address of NAT */
nid_t max_nid; /* maximum possible node ids */
- nid_t available_nids; /* maximum available node ids */
+ nid_t available_nids; /* # of available node ids */
nid_t next_scan_nid; /* the next nid to be scanned */
unsigned int ram_thresh; /* control the memory footprint */
unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */
+ unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */
/* NAT cache management */
struct radix_tree_root nat_root;/* root of the nat entry cache */
@@ -522,9 +579,9 @@ struct f2fs_nm_info {
/* free node ids management */
struct radix_tree_root free_nid_root;/* root of the free_nid cache */
- struct list_head free_nid_list; /* a list for free nids */
- spinlock_t free_nid_list_lock; /* protect free nid list */
- unsigned int fcnt; /* the number of free node id */
+ struct list_head nid_list[MAX_NID_LIST];/* lists for free nids */
+ unsigned int nid_cnt[MAX_NID_LIST]; /* the number of free node id */
+ spinlock_t nid_list_lock; /* protect nid lists ops */
struct mutex build_lock; /* lock for build free nids */
/* for checkpoint */
@@ -544,6 +601,9 @@ struct dnode_of_data {
nid_t nid; /* node id of the direct node block */
unsigned int ofs_in_node; /* data offset in the node page */
bool inode_page_locked; /* inode page is locked or not */
+ bool node_changed; /* is node block changed */
+ char cur_level; /* level of hole node page */
+ char max_level; /* level of current page located */
block_t data_blkaddr; /* block address of the node block */
};
@@ -582,7 +642,6 @@ enum {
CURSEG_WARM_NODE, /* direct node blocks of normal files */
CURSEG_COLD_NODE, /* indirect node blocks */
NO_CHECK_TYPE,
- CURSEG_DIRECT_IO, /* to use for the direct IO path */
};
struct flush_cmd {
@@ -594,6 +653,7 @@ struct flush_cmd {
struct flush_cmd_control {
struct task_struct *f2fs_issue_flush; /* flush thread */
wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
+ atomic_t submit_flush; /* # of issued flushes */
struct llist_head issue_list; /* list for command issue */
struct llist_node *dispatch_list; /* list for command dispatch */
};
@@ -618,6 +678,7 @@ struct f2fs_sm_info {
/* for small discard management */
struct list_head discard_list; /* 4KB discard list */
+ struct list_head wait_list; /* linked with issued discard bio */
int nr_discards; /* # of discards in the list */
int max_discards; /* max. discards to be issued */
@@ -644,12 +705,16 @@ struct f2fs_sm_info {
* f2fs monitors the number of several block types such as on-writeback,
* dirty dentry blocks, dirty node blocks, and dirty meta blocks.
*/
+#define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
enum count_type {
- F2FS_WRITEBACK,
F2FS_DIRTY_DENTS,
+ F2FS_DIRTY_DATA,
F2FS_DIRTY_NODES,
F2FS_DIRTY_META,
F2FS_INMEM_PAGES,
+ F2FS_DIRTY_IMETA,
+ F2FS_WB_CP_DATA,
+ F2FS_WB_DATA,
NR_COUNT_TYPE,
};
@@ -673,6 +738,7 @@ enum page_type {
META_FLUSH,
INMEM, /* the below types are used by tracepoints only. */
INMEM_DROP,
+ INMEM_REVOKE,
IPU,
OPU,
};
@@ -681,7 +747,8 @@ struct f2fs_io_info {
struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
int rw; /* contains R/RS/W/WS with REQ_META/REQ_PRIO */
- block_t blk_addr; /* block address to be written */
+ block_t new_blkaddr; /* new block address to be written */
+ block_t old_blkaddr; /* old block address before Cow */
struct page *page; /* page to be written */
struct page *encrypted_page; /* encrypted page */
};
@@ -695,6 +762,27 @@ struct f2fs_bio_info {
struct rw_semaphore io_rwsem; /* blocking op for bio */
};
+#define FDEV(i) (sbi->devs[i])
+#define RDEV(i) (raw_super->devs[i])
+struct f2fs_dev_info {
+ struct block_device *bdev;
+ char path[MAX_PATH_LEN];
+ unsigned int total_segments;
+ block_t start_blk;
+ block_t end_blk;
+#ifdef CONFIG_BLK_DEV_ZONED
+ unsigned int nr_blkz; /* Total number of zones */
+ u8 *blkz_type; /* Array of zones type */
+#endif
+};
+
+enum inode_type {
+ DIR_INODE, /* for dirty dir inode */
+ FILE_INODE, /* for dirty regular/symlink inode */
+ DIRTY_META, /* for all dirtied inode metadata */
+ NR_INODE_TYPE,
+};
+
/* for inner inode cache management */
struct inode_management {
struct radix_tree_root ino_root; /* ino entry array */
@@ -709,14 +797,36 @@ enum {
SBI_IS_CLOSE, /* specify unmounting */
SBI_NEED_FSCK, /* need fsck.f2fs to fix */
SBI_POR_DOING, /* recovery is doing or not */
+ SBI_NEED_SB_WRITE, /* need to recover superblock */
+ SBI_NEED_CP, /* need to checkpoint */
};
+enum {
+ CP_TIME,
+ REQ_TIME,
+ MAX_TIME,
+};
+
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#define F2FS_KEY_DESC_PREFIX "f2fs:"
+#define F2FS_KEY_DESC_PREFIX_SIZE 5
+#endif
struct f2fs_sb_info {
struct super_block *sb; /* pointer to VFS super block */
struct proc_dir_entry *s_proc; /* proc entry */
- struct buffer_head *raw_super_buf; /* buffer head of raw sb */
struct f2fs_super_block *raw_super; /* raw super block pointer */
- int s_flag; /* flags for sbi */
+ int valid_super_block; /* valid super block no */
+ unsigned long s_flag; /* flags for sbi */
+
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+ u8 key_prefix[F2FS_KEY_DESC_PREFIX_SIZE];
+ u8 key_prefix_size;
+#endif
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ unsigned int blocks_per_blkz; /* F2FS blocks per zone */
+ unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */
+#endif
/* for node-related operations */
struct f2fs_nm_info *nm_info; /* node manager */
@@ -728,32 +838,37 @@ struct f2fs_sb_info {
/* for bio operations */
struct f2fs_bio_info read_io; /* for read bios */
struct f2fs_bio_info write_io[NR_PAGE_TYPE]; /* for write bios */
+ struct mutex wio_mutex[NODE + 1]; /* bio ordering for NODE/DATA */
/* for checkpoint */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
+ int cur_cp_pack; /* remain current cp pack */
+ spinlock_t cp_lock; /* for flag in ckpt */
struct inode *meta_inode; /* cache meta blocks */
struct mutex cp_mutex; /* checkpoint procedure lock */
struct rw_semaphore cp_rwsem; /* blocking FS operations */
struct rw_semaphore node_write; /* locking node writes */
- struct mutex writepages; /* mutex for writepages() */
wait_queue_head_t cp_wait;
- long cp_expires, cp_interval; /* next expected periodic cp */
+ unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
+ long interval_time[MAX_TIME]; /* to store thresholds */
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
/* for orphan inode, use 0'th array */
unsigned int max_orphans; /* max orphan inodes */
- /* for directory inode management */
- struct list_head dir_inode_list; /* dir inode list */
- spinlock_t dir_inode_lock; /* for dir inode list lock */
+ /* for inode management */
+ struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */
+ spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */
/* for extent tree cache */
struct radix_tree_root extent_tree_root;/* cache extent cache entries */
struct rw_semaphore extent_tree_lock; /* locking extent radix tree */
struct list_head extent_list; /* lru list for shrinker */
spinlock_t extent_lock; /* locking extent lru list */
- int total_ext_tree; /* extent tree count */
+ atomic_t total_ext_tree; /* extent tree count */
+ struct list_head zombie_list; /* extent zombie tree list */
+ atomic_t total_zombie_tree; /* extent zombie tree count */
atomic_t total_ext_node; /* extent info count */
/* basic filesystem units */
@@ -770,17 +885,23 @@ struct f2fs_sb_info {
unsigned int total_sections; /* total section count */
unsigned int total_node_count; /* total node block count */
unsigned int total_valid_node_count; /* valid node block count */
- unsigned int total_valid_inode_count; /* valid inode count */
+ loff_t max_file_blocks; /* max block index of file */
int active_logs; /* # of active logs */
int dir_level; /* directory level */
block_t user_block_count; /* # of user blocks */
block_t total_valid_block_count; /* # of valid blocks */
- block_t alloc_valid_block_count; /* # of allocated blocks */
block_t discard_blks; /* discard command candidats */
block_t last_valid_block_count; /* for recovery */
u32 s_next_generation; /* for NFS support */
- atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */
+
+ /* # of pages, see count_type */
+ atomic_t nr_pages[NR_COUNT_TYPE];
+ /* # of allocated blocks */
+ struct percpu_counter alloc_valid_block_count;
+
+ /* valid inode count */
+ struct percpu_counter total_valid_inode_count;
struct f2fs_mount_info mount_opt; /* mount options */
@@ -809,7 +930,7 @@ struct f2fs_sb_info {
atomic_t inline_inode; /* # of inline_data inodes */
atomic_t inline_dir; /* # of inline_dentry inodes */
int bg_gc; /* background gc calls */
- unsigned int n_dirty_dirs; /* # of dir inodes */
+ unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
#endif
unsigned int last_victim[2]; /* last victim segment # */
spinlock_t stat_lock; /* lock for stat operations */
@@ -820,13 +941,106 @@ struct f2fs_sb_info {
/* For shrinker support */
struct list_head s_list;
+ int s_ndevs; /* number of devices */
+ struct f2fs_dev_info *devs; /* for device list */
struct mutex umount_mutex;
unsigned int shrinker_run_no;
+
+ /* For write statistics */
+ u64 sectors_written_start;
+ u64 kbytes_written;
+
+ /* Reference to checksum algorithm driver via cryptoapi */
+ struct crypto_shash *s_chksum_driver;
+
+ /* For fault injection */
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ struct f2fs_fault_info fault_info;
+#endif
};
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
+{
+ struct f2fs_fault_info *ffi = &sbi->fault_info;
+
+ if (!ffi->inject_rate)
+ return false;
+
+ if (!IS_FAULT_SET(ffi, type))
+ return false;
+
+ atomic_inc(&ffi->inject_ops);
+ if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
+ atomic_set(&ffi->inject_ops, 0);
+ printk("%sF2FS-fs : inject %s in %pF\n",
+ KERN_INFO,
+ fault_name[type],
+ __builtin_return_address(0));
+ return true;
+ }
+ return false;
+}
+#endif
+
+/* For write statistics. Suppose sector size is 512 bytes,
+ * and the return value is in kbytes. s is of struct f2fs_sb_info.
+ */
+#define BD_PART_WRITTEN(s) \
+(((u64)part_stat_read(s->sb->s_bdev->bd_part, sectors[1]) - \
+ s->sectors_written_start) >> 1)
+
+static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
+{
+ sbi->last_time[type] = jiffies;
+}
+
+static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
+{
+ struct timespec ts = {sbi->interval_time[type], 0};
+ unsigned long interval = timespec_to_jiffies(&ts);
+
+ return time_after(jiffies, sbi->last_time[type] + interval);
+}
+
+static inline bool is_idle(struct f2fs_sb_info *sbi)
+{
+ struct block_device *bdev = sbi->sb->s_bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+ struct request_list *rl = &q->root_rl;
+
+ if (rl->count[BLK_RW_SYNC] || rl->count[BLK_RW_ASYNC])
+ return 0;
+
+ return f2fs_time_over(sbi, REQ_TIME);
+}
+
/*
* Inline functions
*/
+static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
+ unsigned int length)
+{
+ SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
+ u32 *ctx = (u32 *)shash_desc_ctx(shash);
+ int err;
+
+ shash->tfm = sbi->s_chksum_driver;
+ shash->flags = 0;
+ *ctx = F2FS_SUPER_MAGIC;
+
+ err = crypto_shash_update(shash, address, length);
+ BUG_ON(err);
+
+ return *ctx;
+}
+
+static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
+ void *buf, size_t buf_size)
+{
+ return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
+}
+
static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
{
return container_of(inode, struct f2fs_inode_info, vfs_inode);
@@ -909,17 +1123,17 @@ static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
{
- return sbi->s_flag & (0x01 << type);
+ return test_bit(type, &sbi->s_flag);
}
static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
{
- sbi->s_flag |= (0x01 << type);
+ set_bit(type, &sbi->s_flag);
}
static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
{
- sbi->s_flag &= ~(0x01 << type);
+ clear_bit(type, &sbi->s_flag);
}
static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
@@ -927,26 +1141,50 @@ static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
return le64_to_cpu(cp->checkpoint_ver);
}
-static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
{
unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+
return ckpt_flags & f;
}
-static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
{
- unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+ return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
+}
+
+static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+ unsigned int ckpt_flags;
+
+ ckpt_flags = le32_to_cpu(cp->ckpt_flags);
ckpt_flags |= f;
cp->ckpt_flags = cpu_to_le32(ckpt_flags);
}
-static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
{
- unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+ spin_lock(&sbi->cp_lock);
+ __set_ckpt_flags(F2FS_CKPT(sbi), f);
+ spin_unlock(&sbi->cp_lock);
+}
+
+static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+ unsigned int ckpt_flags;
+
+ ckpt_flags = le32_to_cpu(cp->ckpt_flags);
ckpt_flags &= (~f);
cp->ckpt_flags = cpu_to_le32(ckpt_flags);
}
+static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
+{
+ spin_lock(&sbi->cp_lock);
+ __clear_ckpt_flags(F2FS_CKPT(sbi), f);
+ spin_unlock(&sbi->cp_lock);
+}
+
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{
down_read(&sbi->cp_rwsem);
@@ -959,7 +1197,7 @@ static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
{
- f2fs_down_write(&sbi->cp_rwsem, &sbi->cp_mutex);
+ down_write(&sbi->cp_rwsem);
}
static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
@@ -985,8 +1223,8 @@ static inline bool __remain_node_summaries(int reason)
static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
{
- return (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) ||
- is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FASTBOOT_FLAG));
+ return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
+ is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
}
/*
@@ -1019,22 +1257,37 @@ static inline bool f2fs_has_xattr_block(unsigned int ofs)
return ofs == XATTR_NODE_OFFSET;
}
+static inline void f2fs_i_blocks_write(struct inode *, blkcnt_t, bool);
static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
- struct inode *inode, blkcnt_t count)
+ struct inode *inode, blkcnt_t *count)
{
- block_t valid_block_count;
+ blkcnt_t diff;
- spin_lock(&sbi->stat_lock);
- valid_block_count =
- sbi->total_valid_block_count + (block_t)count;
- if (unlikely(valid_block_count > sbi->user_block_count)) {
- spin_unlock(&sbi->stat_lock);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_BLOCK))
return false;
+#endif
+ /*
+ * let's increase this in prior to actual block count change in order
+ * for f2fs_sync_file to avoid data races when deciding checkpoint.
+ */
+ percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
+
+ spin_lock(&sbi->stat_lock);
+ sbi->total_valid_block_count += (block_t)(*count);
+ if (unlikely(sbi->total_valid_block_count > sbi->user_block_count)) {
+ diff = sbi->total_valid_block_count - sbi->user_block_count;
+ *count -= diff;
+ sbi->total_valid_block_count = sbi->user_block_count;
+ if (!*count) {
+ spin_unlock(&sbi->stat_lock);
+ percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
+ return false;
+ }
}
- inode->i_blocks += count;
- sbi->total_valid_block_count = valid_block_count;
- sbi->alloc_valid_block_count += (block_t)count;
spin_unlock(&sbi->stat_lock);
+
+ f2fs_i_blocks_write(inode, *count, true);
return true;
}
@@ -1045,22 +1298,27 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
spin_lock(&sbi->stat_lock);
f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
f2fs_bug_on(sbi, inode->i_blocks < count);
- inode->i_blocks -= count;
sbi->total_valid_block_count -= (block_t)count;
spin_unlock(&sbi->stat_lock);
+ f2fs_i_blocks_write(inode, count, false);
}
static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
{
atomic_inc(&sbi->nr_pages[count_type]);
+
+ if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES ||
+ count_type == F2FS_WB_CP_DATA || count_type == F2FS_WB_DATA)
+ return;
+
set_sbi_flag(sbi, SBI_IS_DIRTY);
}
static inline void inode_inc_dirty_pages(struct inode *inode)
{
atomic_inc(&F2FS_I(inode)->dirty_pages);
- if (S_ISDIR(inode->i_mode))
- inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_DENTS);
+ inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
+ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
}
static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
@@ -1075,12 +1333,11 @@ static inline void inode_dec_dirty_pages(struct inode *inode)
return;
atomic_dec(&F2FS_I(inode)->dirty_pages);
-
- if (S_ISDIR(inode->i_mode))
- dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_DENTS);
+ dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
+ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
}
-static inline int get_pages(struct f2fs_sb_info *sbi, int count_type)
+static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
{
return atomic_read(&sbi->nr_pages[count_type]);
}
@@ -1092,10 +1349,11 @@ static inline int get_dirty_pages(struct inode *inode)
static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
{
- unsigned int pages_per_sec = sbi->segs_per_sec *
- (1 << sbi->log_blocks_per_seg);
- return ((get_pages(sbi, block_type) + pages_per_sec - 1)
- >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
+ unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
+ unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
+ sbi->log_blocks_per_seg;
+
+ return segs / sbi->segs_per_sec;
}
static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
@@ -1103,6 +1361,11 @@ static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
return sbi->total_valid_block_count;
}
+static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
+{
+ return sbi->discard_blks;
+}
+
static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
@@ -1140,22 +1403,27 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
{
- block_t start_addr;
- struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
- unsigned long long ckpt_version = cur_cp_version(ckpt);
-
- start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+ block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
- /*
- * odd numbered checkpoint should at cp segment 0
- * and even segment must be at cp segment 1
- */
- if (!(ckpt_version & 1))
+ if (sbi->cur_cp_pack == 2)
start_addr += sbi->blocks_per_seg;
+ return start_addr;
+}
+static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
+{
+ block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+
+ if (sbi->cur_cp_pack == 1)
+ start_addr += sbi->blocks_per_seg;
return start_addr;
}
+static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
+{
+ sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
+}
+
static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
{
return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
@@ -1182,13 +1450,13 @@ static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
}
if (inode)
- inode->i_blocks++;
+ f2fs_i_blocks_write(inode, 1, true);
- sbi->alloc_valid_block_count++;
sbi->total_valid_node_count++;
sbi->total_valid_block_count++;
spin_unlock(&sbi->stat_lock);
+ percpu_counter_inc(&sbi->alloc_valid_block_count);
return true;
}
@@ -1201,7 +1469,7 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
f2fs_bug_on(sbi, !sbi->total_valid_node_count);
f2fs_bug_on(sbi, !inode->i_blocks);
- inode->i_blocks--;
+ f2fs_i_blocks_write(inode, 1, false);
sbi->total_valid_node_count--;
sbi->total_valid_block_count--;
@@ -1215,28 +1483,30 @@ static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
{
- spin_lock(&sbi->stat_lock);
- f2fs_bug_on(sbi, sbi->total_valid_inode_count == sbi->total_node_count);
- sbi->total_valid_inode_count++;
- spin_unlock(&sbi->stat_lock);
+ percpu_counter_inc(&sbi->total_valid_inode_count);
}
static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
{
- spin_lock(&sbi->stat_lock);
- f2fs_bug_on(sbi, !sbi->total_valid_inode_count);
- sbi->total_valid_inode_count--;
- spin_unlock(&sbi->stat_lock);
+ percpu_counter_dec(&sbi->total_valid_inode_count);
}
-static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi)
+static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
{
- return sbi->total_valid_inode_count;
+ return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
}
static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
pgoff_t index, bool for_write)
{
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ struct page *page = find_lock_page(mapping, index);
+ if (page)
+ return page;
+
+ if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
+ return NULL;
+#endif
if (!for_write)
return grab_cache_page(mapping, index);
return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
@@ -1261,7 +1531,7 @@ static inline void f2fs_put_page(struct page *page, int unlock)
f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
unlock_page(page);
}
- page_cache_release(page);
+ put_page(page);
}
static inline void f2fs_put_dnode(struct dnode_of_data *dn)
@@ -1396,13 +1666,12 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
enum {
FI_NEW_INODE, /* indicate newly allocated inode */
FI_DIRTY_INODE, /* indicate inode is dirty or not */
+ FI_AUTO_RECOVER, /* indicate inode is recoverable */
FI_DIRTY_DIR, /* indicate directory has dirty pages */
FI_INC_LINK, /* need to increment i_nlink */
FI_ACL_MODE, /* indicate acl mode */
FI_NO_ALLOC, /* should not allocate any blocks */
FI_FREE_NID, /* free allocated nide */
- FI_UPDATE_DIR, /* should update inode block for consistency */
- FI_DELAY_IPUT, /* used for the recovery */
FI_NO_EXTENT, /* not to use the extent cache */
FI_INLINE_XATTR, /* used for inline xattr */
FI_INLINE_DATA, /* used for inline data*/
@@ -1416,71 +1685,145 @@ enum {
FI_DROP_CACHE, /* drop dirty page cache */
FI_DATA_EXIST, /* indicate data exists */
FI_INLINE_DOTS, /* indicate inline dot dentries */
+ FI_DO_DEFRAG, /* indicate defragment is running */
+ FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
};
-static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
+static inline void __mark_inode_dirty_flag(struct inode *inode,
+ int flag, bool set)
+{
+ switch (flag) {
+ case FI_INLINE_XATTR:
+ case FI_INLINE_DATA:
+ case FI_INLINE_DENTRY:
+ if (set)
+ return;
+ case FI_DATA_EXIST:
+ case FI_INLINE_DOTS:
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
+}
+
+static inline void set_inode_flag(struct inode *inode, int flag)
+{
+ if (!test_bit(flag, &F2FS_I(inode)->flags))
+ set_bit(flag, &F2FS_I(inode)->flags);
+ __mark_inode_dirty_flag(inode, flag, true);
+}
+
+static inline int is_inode_flag_set(struct inode *inode, int flag)
+{
+ return test_bit(flag, &F2FS_I(inode)->flags);
+}
+
+static inline void clear_inode_flag(struct inode *inode, int flag)
+{
+ if (test_bit(flag, &F2FS_I(inode)->flags))
+ clear_bit(flag, &F2FS_I(inode)->flags);
+ __mark_inode_dirty_flag(inode, flag, false);
+}
+
+static inline void set_acl_inode(struct inode *inode, umode_t mode)
{
- if (!test_bit(flag, &fi->flags))
- set_bit(flag, &fi->flags);
+ F2FS_I(inode)->i_acl_mode = mode;
+ set_inode_flag(inode, FI_ACL_MODE);
+ f2fs_mark_inode_dirty_sync(inode, false);
}
-static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag)
+static inline void f2fs_i_links_write(struct inode *inode, bool inc)
{
- return test_bit(flag, &fi->flags);
+ if (inc)
+ inc_nlink(inode);
+ else
+ drop_nlink(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
}
-static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag)
+static inline void f2fs_i_blocks_write(struct inode *inode,
+ blkcnt_t diff, bool add)
{
- if (test_bit(flag, &fi->flags))
- clear_bit(flag, &fi->flags);
+ bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
+ bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
+
+ inode->i_blocks = add ? inode->i_blocks + diff :
+ inode->i_blocks - diff;
+ f2fs_mark_inode_dirty_sync(inode, true);
+ if (clean || recover)
+ set_inode_flag(inode, FI_AUTO_RECOVER);
}
-static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode)
+static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
{
- fi->i_acl_mode = mode;
- set_inode_flag(fi, FI_ACL_MODE);
+ bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
+ bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
+
+ if (i_size_read(inode) == i_size)
+ return;
+
+ i_size_write(inode, i_size);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ if (clean || recover)
+ set_inode_flag(inode, FI_AUTO_RECOVER);
}
-static inline void get_inline_info(struct f2fs_inode_info *fi,
- struct f2fs_inode *ri)
+static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
{
+ F2FS_I(inode)->i_current_depth = depth;
+ f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
+{
+ F2FS_I(inode)->i_xattr_nid = xnid;
+ f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
+{
+ F2FS_I(inode)->i_pino = pino;
+ f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+
if (ri->i_inline & F2FS_INLINE_XATTR)
- set_inode_flag(fi, FI_INLINE_XATTR);
+ set_bit(FI_INLINE_XATTR, &fi->flags);
if (ri->i_inline & F2FS_INLINE_DATA)
- set_inode_flag(fi, FI_INLINE_DATA);
+ set_bit(FI_INLINE_DATA, &fi->flags);
if (ri->i_inline & F2FS_INLINE_DENTRY)
- set_inode_flag(fi, FI_INLINE_DENTRY);
+ set_bit(FI_INLINE_DENTRY, &fi->flags);
if (ri->i_inline & F2FS_DATA_EXIST)
- set_inode_flag(fi, FI_DATA_EXIST);
+ set_bit(FI_DATA_EXIST, &fi->flags);
if (ri->i_inline & F2FS_INLINE_DOTS)
- set_inode_flag(fi, FI_INLINE_DOTS);
+ set_bit(FI_INLINE_DOTS, &fi->flags);
}
-static inline void set_raw_inline(struct f2fs_inode_info *fi,
- struct f2fs_inode *ri)
+static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
{
ri->i_inline = 0;
- if (is_inode_flag_set(fi, FI_INLINE_XATTR))
+ if (is_inode_flag_set(inode, FI_INLINE_XATTR))
ri->i_inline |= F2FS_INLINE_XATTR;
- if (is_inode_flag_set(fi, FI_INLINE_DATA))
+ if (is_inode_flag_set(inode, FI_INLINE_DATA))
ri->i_inline |= F2FS_INLINE_DATA;
- if (is_inode_flag_set(fi, FI_INLINE_DENTRY))
+ if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
ri->i_inline |= F2FS_INLINE_DENTRY;
- if (is_inode_flag_set(fi, FI_DATA_EXIST))
+ if (is_inode_flag_set(inode, FI_DATA_EXIST))
ri->i_inline |= F2FS_DATA_EXIST;
- if (is_inode_flag_set(fi, FI_INLINE_DOTS))
+ if (is_inode_flag_set(inode, FI_INLINE_DOTS))
ri->i_inline |= F2FS_INLINE_DOTS;
}
static inline int f2fs_has_inline_xattr(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_XATTR);
+ return is_inode_flag_set(inode, FI_INLINE_XATTR);
}
-static inline unsigned int addrs_per_inode(struct f2fs_inode_info *fi)
+static inline unsigned int addrs_per_inode(struct inode *inode)
{
- if (f2fs_has_inline_xattr(&fi->vfs_inode))
+ if (f2fs_has_inline_xattr(inode))
return DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS;
return DEF_ADDRS_PER_INODE;
}
@@ -1502,43 +1845,43 @@ static inline int inline_xattr_size(struct inode *inode)
static inline int f2fs_has_inline_data(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DATA);
+ return is_inode_flag_set(inode, FI_INLINE_DATA);
}
static inline void f2fs_clear_inline_inode(struct inode *inode)
{
- clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
- clear_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
+ clear_inode_flag(inode, FI_INLINE_DATA);
+ clear_inode_flag(inode, FI_DATA_EXIST);
}
static inline int f2fs_exist_data(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_DATA_EXIST);
+ return is_inode_flag_set(inode, FI_DATA_EXIST);
}
static inline int f2fs_has_inline_dots(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DOTS);
+ return is_inode_flag_set(inode, FI_INLINE_DOTS);
}
static inline bool f2fs_is_atomic_file(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_ATOMIC_FILE);
+ return is_inode_flag_set(inode, FI_ATOMIC_FILE);
}
static inline bool f2fs_is_volatile_file(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_VOLATILE_FILE);
+ return is_inode_flag_set(inode, FI_VOLATILE_FILE);
}
static inline bool f2fs_is_first_block_written(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
}
static inline bool f2fs_is_drop_cache(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_DROP_CACHE);
+ return is_inode_flag_set(inode, FI_DROP_CACHE);
}
static inline void *inline_data_addr(struct page *page)
@@ -1549,7 +1892,7 @@ static inline void *inline_data_addr(struct page *page)
static inline int f2fs_has_inline_dentry(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DENTRY);
+ return is_inode_flag_set(inode, FI_INLINE_DENTRY);
}
static inline void f2fs_dentry_kunmap(struct inode *dir, struct page *page)
@@ -1566,27 +1909,41 @@ static inline int is_file(struct inode *inode, int type)
static inline void set_file(struct inode *inode, int type)
{
F2FS_I(inode)->i_advise |= type;
+ f2fs_mark_inode_dirty_sync(inode, true);
}
static inline void clear_file(struct inode *inode, int type)
{
F2FS_I(inode)->i_advise &= ~type;
+ f2fs_mark_inode_dirty_sync(inode, true);
}
-static inline int f2fs_readonly(struct super_block *sb)
+static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
{
- return sb->s_flags & MS_RDONLY;
+ if (dsync) {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ bool ret;
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ ret = list_empty(&F2FS_I(inode)->gdirty_list);
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return ret;
+ }
+ if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
+ file_keep_isize(inode) ||
+ i_size_read(inode) & PAGE_MASK)
+ return false;
+ return F2FS_I(inode)->last_disk_size == i_size_read(inode);
}
-static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
+static inline int f2fs_readonly(struct super_block *sb)
{
- return is_set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
+ return sb->s_flags & MS_RDONLY;
}
-static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi)
+static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
{
- set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
- sbi->sb->s_flags |= MS_RDONLY;
+ return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
}
static inline bool is_dot_dotdot(const struct qstr *str)
@@ -1602,13 +1959,21 @@ static inline bool is_dot_dotdot(const struct qstr *str)
static inline bool f2fs_may_extent_tree(struct inode *inode)
{
- mode_t mode = inode->i_mode;
-
if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
- is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
+ is_inode_flag_set(inode, FI_NO_EXTENT))
return false;
- return S_ISREG(mode);
+ return S_ISREG(inode->i_mode);
+}
+
+static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
+ size_t size, gfp_t flags)
+{
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_KMALLOC))
+ return NULL;
+#endif
+ return kmalloc(size, flags);
}
static inline void *f2fs_kvmalloc(size_t size, gfp_t flags)
@@ -1632,14 +1997,14 @@ static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
}
#define get_inode_mode(i) \
- ((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \
+ ((is_inode_flag_set(i, FI_ACL_MODE)) ? \
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
/* get offset of first page in next direct node */
-#define PGOFS_OF_NEXT_DNODE(pgofs, fi) \
- ((pgofs < ADDRS_PER_INODE(fi)) ? ADDRS_PER_INODE(fi) : \
- (pgofs - ADDRS_PER_INODE(fi) + ADDRS_PER_BLOCK) / \
- ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi))
+#define PGOFS_OF_NEXT_DNODE(pgofs, inode) \
+ ((pgofs < ADDRS_PER_INODE(inode)) ? ADDRS_PER_INODE(inode) : \
+ (pgofs - ADDRS_PER_INODE(inode) + ADDRS_PER_BLOCK) / \
+ ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode))
/*
* file.c
@@ -1647,7 +2012,7 @@ static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
int f2fs_sync_file(struct file *, loff_t, loff_t, int);
void truncate_data_blocks(struct dnode_of_data *);
int truncate_blocks(struct inode *, u64, bool);
-int f2fs_truncate(struct inode *, bool);
+int f2fs_truncate(struct inode *);
int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
int f2fs_setattr(struct dentry *, struct iattr *);
int truncate_hole(struct inode *, pgoff_t, pgoff_t);
@@ -1660,9 +2025,10 @@ long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
*/
void f2fs_set_inode_flags(struct inode *);
struct inode *f2fs_iget(struct super_block *, unsigned long);
+struct inode *f2fs_iget_retry(struct super_block *, unsigned long);
int try_to_free_nats(struct f2fs_sb_info *, int);
-void update_inode(struct inode *, struct page *);
-void update_inode_page(struct inode *);
+int update_inode(struct inode *, struct page *);
+int update_inode_page(struct inode *);
int f2fs_write_inode(struct inode *, struct writeback_control *);
void f2fs_evict_inode(struct inode *);
void handle_failed_inode(struct inode *);
@@ -1675,29 +2041,34 @@ struct dentry *f2fs_get_parent(struct dentry *child);
/*
* dir.c
*/
-extern unsigned char f2fs_filetype_table[F2FS_FT_MAX];
void set_de_type(struct f2fs_dir_entry *, umode_t);
-
-struct f2fs_dir_entry *find_target_dentry(struct f2fs_filename *,
+unsigned char get_de_type(struct f2fs_dir_entry *);
+struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *,
f2fs_hash_t, int *, struct f2fs_dentry_ptr *);
-bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
- unsigned int, struct f2fs_str *);
+int f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
+ unsigned int, struct fscrypt_str *);
void do_make_empty_dir(struct inode *, struct inode *,
struct f2fs_dentry_ptr *);
struct page *init_inode_metadata(struct inode *, struct inode *,
- const struct qstr *, struct page *);
+ const struct qstr *, const struct qstr *, struct page *);
void update_parent_metadata(struct inode *, struct inode *, unsigned int);
int room_for_filename(const void *, int, int);
-void f2fs_drop_nlink(struct inode *, struct inode *, struct page *);
-struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *,
+void f2fs_drop_nlink(struct inode *, struct inode *);
+struct f2fs_dir_entry *__f2fs_find_entry(struct inode *, struct fscrypt_name *,
+ struct page **);
+struct f2fs_dir_entry *f2fs_find_entry(struct inode *, const struct qstr *,
struct page **);
struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
-ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
+ino_t f2fs_inode_by_name(struct inode *, const struct qstr *, struct page **);
void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
struct page *, struct inode *);
int update_dent_inode(struct inode *, struct inode *, const struct qstr *);
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *,
const struct qstr *, f2fs_hash_t , unsigned int);
+int f2fs_add_regular_entry(struct inode *, const struct qstr *,
+ const struct qstr *, struct inode *, nid_t, umode_t);
+int __f2fs_do_add_link(struct inode *, struct fscrypt_name*, struct inode *,
+ nid_t, umode_t);
int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t,
umode_t);
void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *,
@@ -1714,17 +2085,19 @@ static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
/*
* super.c
*/
+int f2fs_inode_dirtied(struct inode *, bool);
+void f2fs_inode_synced(struct inode *);
int f2fs_commit_super(struct f2fs_sb_info *, bool);
loff_t max_file_size(unsigned bits);
int f2fs_sync_fs(struct super_block *, int);
extern __printf(3, 4)
void f2fs_msg(struct super_block *, const char *, const char *, ...);
+int sanity_check_ckpt(struct f2fs_sb_info *sbi);
/*
* hash.c
*/
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
- struct f2fs_filename *fname);
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *);
/*
* node.c
@@ -1737,6 +2110,7 @@ int need_dentry_mark(struct f2fs_sb_info *, nid_t);
bool is_checkpointed_node(struct f2fs_sb_info *, nid_t);
bool need_inode_block_update(struct f2fs_sb_info *, nid_t);
void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
+pgoff_t get_next_page_offset(struct dnode_of_data *, pgoff_t);
int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
int truncate_inode_blocks(struct inode *, pgoff_t);
int truncate_xattr_node(struct inode *, struct page *);
@@ -1747,8 +2121,11 @@ struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
void ra_node_page(struct f2fs_sb_info *, nid_t);
struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_node_page_ra(struct page *, int);
-void sync_inode_page(struct dnode_of_data *);
-int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
+void move_node_page(struct page *, int);
+int fsync_node_pages(struct f2fs_sb_info *, struct inode *,
+ struct writeback_control *, bool);
+int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *);
+void build_free_nids(struct f2fs_sb_info *, bool);
bool alloc_nid(struct f2fs_sb_info *, nid_t *);
void alloc_nid_done(struct f2fs_sb_info *, nid_t);
void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
@@ -1768,18 +2145,18 @@ void destroy_node_manager_caches(void);
* segment.c
*/
void register_inmem_page(struct inode *, struct page *);
-int commit_inmem_pages(struct inode *, bool);
-void f2fs_balance_fs(struct f2fs_sb_info *);
+void drop_inmem_pages(struct inode *);
+int commit_inmem_pages(struct inode *);
+void f2fs_balance_fs(struct f2fs_sb_info *, bool);
void f2fs_balance_fs_bg(struct f2fs_sb_info *);
int f2fs_issue_flush(struct f2fs_sb_info *);
int create_flush_cmd_control(struct f2fs_sb_info *);
-void destroy_flush_cmd_control(struct f2fs_sb_info *);
+void destroy_flush_cmd_control(struct f2fs_sb_info *, bool);
void invalidate_blocks(struct f2fs_sb_info *, block_t);
bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
void release_discard_addrs(struct f2fs_sb_info *);
-bool discard_next_dnode(struct f2fs_sb_info *, block_t);
int npages_for_summary_flush(struct f2fs_sb_info *, bool);
void allocate_new_segments(struct f2fs_sb_info *);
int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
@@ -1789,16 +2166,17 @@ void write_meta_page(struct f2fs_sb_info *, struct page *);
void write_node_page(unsigned int, struct f2fs_io_info *);
void write_data_page(struct dnode_of_data *, struct f2fs_io_info *);
void rewrite_data_page(struct f2fs_io_info *);
+void __f2fs_replace_block(struct f2fs_sb_info *, struct f2fs_summary *,
+ block_t, block_t, bool, bool);
void f2fs_replace_block(struct f2fs_sb_info *, struct dnode_of_data *,
- block_t, block_t, unsigned char, bool);
+ block_t, block_t, unsigned char, bool, bool);
void allocate_data_block(struct f2fs_sb_info *, struct page *,
block_t, block_t *, struct f2fs_summary *, int);
-void f2fs_wait_on_page_writeback(struct page *, enum page_type);
+void f2fs_wait_on_page_writeback(struct page *, enum page_type, bool);
void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *, block_t);
void write_data_summaries(struct f2fs_sb_info *, block_t);
void write_node_summaries(struct f2fs_sb_info *, block_t);
-int lookup_journal_in_cursum(struct f2fs_summary_block *,
- int, unsigned int, int);
+int lookup_journal_in_cursum(struct f2fs_journal *, int, unsigned int, int);
void flush_sit_entries(struct f2fs_sb_info *, struct cp_control *);
int build_segment_manager(struct f2fs_sb_info *);
void destroy_segment_manager(struct f2fs_sb_info *);
@@ -1808,6 +2186,7 @@ void destroy_segment_manager_caches(void);
/*
* checkpoint.c
*/
+void f2fs_stop_checkpoint(struct f2fs_sb_info *, bool);
struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t);
@@ -1815,21 +2194,21 @@ bool is_valid_blkaddr(struct f2fs_sb_info *, block_t, int);
int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int, bool);
void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t);
long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
-void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
-void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
-void release_dirty_inode(struct f2fs_sb_info *);
+void add_ino_entry(struct f2fs_sb_info *, nid_t, int type);
+void remove_ino_entry(struct f2fs_sb_info *, nid_t, int type);
+void release_ino_entry(struct f2fs_sb_info *, bool);
bool exist_written_data(struct f2fs_sb_info *, nid_t, int);
+int f2fs_sync_inode_meta(struct f2fs_sb_info *);
int acquire_orphan_inode(struct f2fs_sb_info *);
void release_orphan_inode(struct f2fs_sb_info *);
-void add_orphan_inode(struct f2fs_sb_info *, nid_t);
+void add_orphan_inode(struct inode *);
void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
int recover_orphan_inodes(struct f2fs_sb_info *);
int get_valid_checkpoint(struct f2fs_sb_info *);
void update_dirty_page(struct inode *, struct page *);
-void add_dirty_dir_inode(struct inode *);
-void remove_dirty_dir_inode(struct inode *);
-void sync_dirty_dir_inodes(struct f2fs_sb_info *);
-void write_checkpoint(struct f2fs_sb_info *, struct cp_control *);
+void remove_dirty_inode(struct inode *);
+int sync_dirty_inodes(struct f2fs_sb_info *, enum inode_type);
+int write_checkpoint(struct f2fs_sb_info *, struct cp_control *);
void init_ino_entry_info(struct f2fs_sb_info *);
int __init create_checkpoint_caches(void);
void destroy_checkpoint_caches(void);
@@ -1838,34 +2217,49 @@ void destroy_checkpoint_caches(void);
* data.c
*/
void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, int);
+void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *, struct inode *,
+ struct page *, nid_t, enum page_type, int);
+void f2fs_flush_merged_bios(struct f2fs_sb_info *);
int f2fs_submit_page_bio(struct f2fs_io_info *);
void f2fs_submit_page_mbio(struct f2fs_io_info *);
+struct block_device *f2fs_target_device(struct f2fs_sb_info *,
+ block_t, struct bio *);
+int f2fs_target_device_index(struct f2fs_sb_info *, block_t);
void set_data_blkaddr(struct dnode_of_data *);
+void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t);
+int reserve_new_blocks(struct dnode_of_data *, blkcnt_t);
int reserve_new_block(struct dnode_of_data *);
int f2fs_get_block(struct dnode_of_data *, pgoff_t);
+int f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
struct page *get_read_data_page(struct inode *, pgoff_t, int, bool);
struct page *find_data_page(struct inode *, pgoff_t);
struct page *get_lock_data_page(struct inode *, pgoff_t, bool);
struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
int do_write_data_page(struct f2fs_io_info *);
+int f2fs_map_blocks(struct inode *, struct f2fs_map_blocks *, int, int);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64);
+void f2fs_set_page_dirty_nobuffers(struct page *);
void f2fs_invalidate_page(struct page *, unsigned int, unsigned int);
int f2fs_release_page(struct page *, gfp_t);
+#ifdef CONFIG_MIGRATION
+int f2fs_migrate_page(struct address_space *, struct page *, struct page *,
+ enum migrate_mode);
+#endif
/*
* gc.c
*/
int start_gc_thread(struct f2fs_sb_info *);
void stop_gc_thread(struct f2fs_sb_info *);
-block_t start_bidx_of_node(unsigned int, struct f2fs_inode_info *);
-int f2fs_gc(struct f2fs_sb_info *, bool);
+block_t start_bidx_of_node(unsigned int, struct inode *);
+int f2fs_gc(struct f2fs_sb_info *, bool, bool);
void build_gc_manager(struct f2fs_sb_info *);
/*
* recovery.c
*/
-int recover_fsync_data(struct f2fs_sb_info *);
+int recover_fsync_data(struct f2fs_sb_info *, bool);
bool space_for_roll_forward(struct f2fs_sb_info *);
/*
@@ -1879,18 +2273,20 @@ struct f2fs_stat_info {
int main_area_segs, main_area_sections, main_area_zones;
unsigned long long hit_largest, hit_cached, hit_rbtree;
unsigned long long hit_total, total_ext;
- int ext_tree, ext_node;
- int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
- int nats, dirty_nats, sits, dirty_sits, fnids;
+ int ext_tree, zombie_tree, ext_node;
+ int ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta;
+ int inmem_pages;
+ unsigned int ndirty_dirs, ndirty_files, ndirty_all;
+ int nats, dirty_nats, sits, dirty_sits, free_nids, alloc_nids;
int total_count, utilization;
- int bg_gc, inmem_pages, wb_pages;
- int inline_xattr, inline_inode, inline_dir;
- unsigned int valid_count, valid_node_count, valid_inode_count;
+ int bg_gc, nr_wb_cp_data, nr_wb_data;
+ int inline_xattr, inline_inode, inline_dir, orphans;
+ unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
unsigned int bimodal, avg_vblocks;
int util_free, util_valid, util_invalid;
int rsvd_segs, overp_segs;
int dirty_count, node_pages, meta_pages;
- int prefree_count, call_count, cp_count;
+ int prefree_count, call_count, cp_count, bg_cp_count;
int tot_segs, node_segs, data_segs, free_segs, free_secs;
int bg_node_segs, bg_data_segs;
int tot_blks, data_blks, node_blks;
@@ -1911,10 +2307,11 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
}
#define stat_inc_cp_count(si) ((si)->cp_count++)
+#define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
#define stat_inc_call_count(si) ((si)->call_count++)
#define stat_inc_bggc_count(sbi) ((sbi)->bg_gc++)
-#define stat_inc_dirty_dir(sbi) ((sbi)->n_dirty_dirs++)
-#define stat_dec_dirty_dir(sbi) ((sbi)->n_dirty_dirs--)
+#define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
+#define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
#define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext))
#define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree))
#define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
@@ -1989,14 +2386,15 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
int f2fs_build_stats(struct f2fs_sb_info *);
void f2fs_destroy_stats(struct f2fs_sb_info *);
-void __init f2fs_create_root_stats(void);
+int __init f2fs_create_root_stats(void);
void f2fs_destroy_root_stats(void);
#else
#define stat_inc_cp_count(si)
+#define stat_inc_bg_cp_count(si)
#define stat_inc_call_count(si)
#define stat_inc_bggc_count(si)
-#define stat_inc_dirty_dir(sbi)
-#define stat_dec_dirty_dir(sbi)
+#define stat_inc_dirty_inode(sbi, type)
+#define stat_dec_dirty_inode(sbi, type)
#define stat_inc_total_hit(sb)
#define stat_inc_rbtree_node_hit(sb)
#define stat_inc_largest_node_hit(sbi)
@@ -2017,7 +2415,7 @@ void f2fs_destroy_root_stats(void);
static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
-static inline void __init f2fs_create_root_stats(void) { }
+static inline int __init f2fs_create_root_stats(void) { return 0; }
static inline void f2fs_destroy_root_stats(void) { }
#endif
@@ -2046,16 +2444,15 @@ int f2fs_convert_inline_inode(struct inode *);
int f2fs_write_inline_data(struct inode *, struct page *);
bool recover_inline_data(struct inode *, struct page *);
struct f2fs_dir_entry *find_in_inline_dir(struct inode *,
- struct f2fs_filename *, struct page **);
-struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *, struct page **);
+ struct fscrypt_name *, struct page **);
int make_empty_inline_dir(struct inode *inode, struct inode *, struct page *);
-int f2fs_add_inline_entry(struct inode *, const struct qstr *, struct inode *,
- nid_t, umode_t);
+int f2fs_add_inline_entry(struct inode *, const struct qstr *,
+ const struct qstr *, struct inode *, nid_t, umode_t);
void f2fs_delete_inline_entry(struct f2fs_dir_entry *, struct page *,
struct inode *, struct inode *);
bool f2fs_empty_inline_dir(struct inode *);
int f2fs_read_inline_dir(struct file *, struct dir_context *,
- struct f2fs_str *);
+ struct fscrypt_str *);
int f2fs_inline_data_fiemap(struct inode *,
struct fiemap_extent_info *, __u64, __u64);
@@ -2071,8 +2468,8 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *);
* extent_cache.c
*/
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
-void f2fs_drop_largest_extent(struct inode *, pgoff_t);
-void f2fs_init_extent_tree(struct inode *, struct f2fs_extent *);
+bool f2fs_init_extent_tree(struct inode *, struct f2fs_extent *);
+void f2fs_drop_extent_tree(struct inode *);
unsigned int f2fs_destroy_extent_node(struct inode *);
void f2fs_destroy_extent_tree(struct inode *);
bool f2fs_lookup_extent_cache(struct inode *, pgoff_t, struct extent_info *);
@@ -2086,13 +2483,9 @@ void destroy_extent_cache(void);
/*
* crypto support
*/
-static inline int f2fs_encrypted_inode(struct inode *inode)
+static inline bool f2fs_encrypted_inode(struct inode *inode)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
return file_is_encrypt(inode);
-#else
- return 0;
-#endif
}
static inline void f2fs_set_encrypted_inode(struct inode *inode)
@@ -2104,113 +2497,88 @@ static inline void f2fs_set_encrypted_inode(struct inode *inode)
static inline bool f2fs_bio_encrypted(struct bio *bio)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- return unlikely(bio->bi_private != NULL);
-#else
- return false;
-#endif
+ return bio->bi_private != NULL;
}
static inline int f2fs_sb_has_crypto(struct super_block *sb)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_ENCRYPT);
-#else
- return 0;
-#endif
}
-static inline bool f2fs_may_encrypt(struct inode *inode)
+static inline int f2fs_sb_mounted_blkzoned(struct super_block *sb)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- mode_t mode = inode->i_mode;
-
- return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
-#else
- return 0;
-#endif
+ return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_BLKZONED);
}
-/* crypto_policy.c */
-int f2fs_is_child_context_consistent_with_parent(struct inode *,
- struct inode *);
-int f2fs_inherit_context(struct inode *, struct inode *, struct page *);
-int f2fs_process_policy(const struct f2fs_encryption_policy *, struct inode *);
-int f2fs_get_policy(struct inode *, struct f2fs_encryption_policy *);
-
-/* crypt.c */
-extern struct kmem_cache *f2fs_crypt_info_cachep;
-bool f2fs_valid_contents_enc_mode(uint32_t);
-uint32_t f2fs_validate_encryption_key_size(uint32_t, uint32_t);
-struct f2fs_crypto_ctx *f2fs_get_crypto_ctx(struct inode *);
-void f2fs_release_crypto_ctx(struct f2fs_crypto_ctx *);
-struct page *f2fs_encrypt(struct inode *, struct page *);
-int f2fs_decrypt(struct f2fs_crypto_ctx *, struct page *);
-int f2fs_decrypt_one(struct inode *, struct page *);
-void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *, struct bio *);
-
-/* crypto_key.c */
-void f2fs_free_encryption_info(struct inode *, struct f2fs_crypt_info *);
-int _f2fs_get_encryption_info(struct inode *inode);
-
-/* crypto_fname.c */
-bool f2fs_valid_filenames_enc_mode(uint32_t);
-u32 f2fs_fname_crypto_round_up(u32, u32);
-int f2fs_fname_crypto_alloc_buffer(struct inode *, u32, struct f2fs_str *);
-int f2fs_fname_disk_to_usr(struct inode *, f2fs_hash_t *,
- const struct f2fs_str *, struct f2fs_str *);
-int f2fs_fname_usr_to_disk(struct inode *, const struct qstr *,
- struct f2fs_str *);
-
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
-void f2fs_restore_and_release_control_page(struct page **);
-void f2fs_restore_control_page(struct page *);
-
-int __init f2fs_init_crypto(void);
-int f2fs_crypto_initialize(void);
-void f2fs_exit_crypto(void);
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline int get_blkz_type(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkaddr)
+{
+ unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
+ int i;
-int f2fs_has_encryption_key(struct inode *);
+ for (i = 0; i < sbi->s_ndevs; i++)
+ if (FDEV(i).bdev == bdev)
+ return FDEV(i).blkz_type[zno];
+ return -EINVAL;
+}
+#endif
-static inline int f2fs_get_encryption_info(struct inode *inode)
+static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
{
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
+ struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
- if (!ci ||
- (ci->ci_keyring_key &&
- (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED) |
- (1 << KEY_FLAG_DEAD)))))
- return _f2fs_get_encryption_info(inode);
- return 0;
+ return blk_queue_discard(q) || f2fs_sb_mounted_blkzoned(sbi->sb);
}
-void f2fs_fname_crypto_free_buffer(struct f2fs_str *);
-int f2fs_fname_setup_filename(struct inode *, const struct qstr *,
- int lookup, struct f2fs_filename *);
-void f2fs_fname_free_filename(struct f2fs_filename *);
-#else
-static inline void f2fs_restore_and_release_control_page(struct page **p) { }
-static inline void f2fs_restore_control_page(struct page *p) { }
-
-static inline int __init f2fs_init_crypto(void) { return 0; }
-static inline void f2fs_exit_crypto(void) { }
+static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
+{
+ clear_opt(sbi, ADAPTIVE);
+ clear_opt(sbi, LFS);
-static inline int f2fs_has_encryption_key(struct inode *i) { return 0; }
-static inline int f2fs_get_encryption_info(struct inode *i) { return 0; }
-static inline void f2fs_fname_crypto_free_buffer(struct f2fs_str *p) { }
+ switch (mt) {
+ case F2FS_MOUNT_ADAPTIVE:
+ set_opt(sbi, ADAPTIVE);
+ break;
+ case F2FS_MOUNT_LFS:
+ set_opt(sbi, LFS);
+ break;
+ }
+}
-static inline int f2fs_fname_setup_filename(struct inode *dir,
- const struct qstr *iname,
- int lookup, struct f2fs_filename *fname)
+static inline bool f2fs_may_encrypt(struct inode *inode)
{
- memset(fname, 0, sizeof(struct f2fs_filename));
- fname->usr_fname = iname;
- fname->disk_name.name = (unsigned char *)iname->name;
- fname->disk_name.len = iname->len;
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+ umode_t mode = inode->i_mode;
+
+ return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
+#else
return 0;
+#endif
}
-static inline void f2fs_fname_free_filename(struct f2fs_filename *fname) { }
+#ifndef CONFIG_F2FS_FS_ENCRYPTION
+#define fscrypt_set_d_op(i)
+#define fscrypt_get_ctx fscrypt_notsupp_get_ctx
+#define fscrypt_release_ctx fscrypt_notsupp_release_ctx
+#define fscrypt_encrypt_page fscrypt_notsupp_encrypt_page
+#define fscrypt_decrypt_page fscrypt_notsupp_decrypt_page
+#define fscrypt_decrypt_bio_pages fscrypt_notsupp_decrypt_bio_pages
+#define fscrypt_pullback_bio_page fscrypt_notsupp_pullback_bio_page
+#define fscrypt_restore_control_page fscrypt_notsupp_restore_control_page
+#define fscrypt_zeroout_range fscrypt_notsupp_zeroout_range
+#define fscrypt_process_policy fscrypt_notsupp_process_policy
+#define fscrypt_get_policy fscrypt_notsupp_get_policy
+#define fscrypt_has_permitted_context fscrypt_notsupp_has_permitted_context
+#define fscrypt_inherit_context fscrypt_notsupp_inherit_context
+#define fscrypt_get_encryption_info fscrypt_notsupp_get_encryption_info
+#define fscrypt_put_encryption_info fscrypt_notsupp_put_encryption_info
+#define fscrypt_setup_filename fscrypt_notsupp_setup_filename
+#define fscrypt_free_filename fscrypt_notsupp_free_filename
+#define fscrypt_fname_encrypted_size fscrypt_notsupp_fname_encrypted_size
+#define fscrypt_fname_alloc_buffer fscrypt_notsupp_fname_alloc_buffer
+#define fscrypt_fname_free_buffer fscrypt_notsupp_fname_free_buffer
+#define fscrypt_fname_disk_to_usr fscrypt_notsupp_fname_disk_to_usr
+#define fscrypt_fname_usr_to_disk fscrypt_notsupp_fname_usr_to_disk
#endif
#endif
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 4b449d263333..5808d5c709a7 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -21,6 +21,8 @@
#include <linux/mount.h>
#include <linux/pagevec.h>
#include <linux/random.h>
+#include <linux/uuid.h>
+#include <linux/file.h>
#include "f2fs.h"
#include "node.h"
@@ -40,8 +42,6 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
struct dnode_of_data dn;
int err;
- f2fs_balance_fs(sbi);
-
sb_start_pagefault(inode->i_sb);
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
@@ -57,6 +57,8 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
+
file_update_time(vma->vm_file);
lock_page(page);
if (unlikely(page->mapping != inode->i_mapping ||
@@ -74,28 +76,28 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
goto mapped;
/* page is wholly or partially inside EOF */
- if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
+ if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
i_size_read(inode)) {
unsigned offset;
- offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
- zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ offset = i_size_read(inode) & ~PAGE_MASK;
+ zero_user_segment(page, offset, PAGE_SIZE);
}
set_page_dirty(page);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
trace_f2fs_vm_page_mkwrite(page, DATA);
mapped:
/* fill the page */
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page, DATA, false);
/* wait for GCed encrypted page writeback */
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
- /* if gced page is attached, don't write to cold segment */
- clear_cold_data(page);
out:
sb_end_pagefault(inode->i_sb);
+ f2fs_update_time(sbi, REQ_TIME);
return block_page_mkwrite_return(err);
}
@@ -132,7 +134,7 @@ static inline bool need_do_checkpoint(struct inode *inode)
if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
need_cp = true;
- else if (file_enc_name(inode) && need_dentry_mark(sbi, inode->i_ino))
+ else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
need_cp = true;
else if (file_wrong_pino(inode))
need_cp = true;
@@ -170,21 +172,16 @@ static void try_to_fix_pino(struct inode *inode)
fi->xattr_ver = 0;
if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
get_parent_ino(inode, &pino)) {
- fi->i_pino = pino;
+ f2fs_i_pino_write(inode, pino);
file_got_pino(inode);
- up_write(&fi->i_sem);
-
- mark_inode_dirty_sync(inode);
- f2fs_write_inode(inode, NULL);
- } else {
- up_write(&fi->i_sem);
}
+ up_write(&fi->i_sem);
}
-int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
+ int datasync, bool atomic)
{
struct inode *inode = file->f_mapping->host;
- struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t ino = inode->i_ino;
int ret = 0;
@@ -201,10 +198,10 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
trace_f2fs_sync_file_enter(inode);
/* if fdatasync is triggered, let's do in-place-update */
- if (get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
- set_inode_flag(fi, FI_NEED_IPU);
+ if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
+ set_inode_flag(inode, FI_NEED_IPU);
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
- clear_inode_flag(fi, FI_NEED_IPU);
+ clear_inode_flag(inode, FI_NEED_IPU);
if (ret) {
trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
@@ -212,7 +209,7 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
}
/* if the inode is dirty, let's recover all the time */
- if (!datasync) {
+ if (!f2fs_skip_inode_update(inode, datasync)) {
f2fs_write_inode(inode, NULL);
goto go_write;
}
@@ -220,29 +217,26 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
/*
* if there is no written data, don't waste time to write recovery info.
*/
- if (!is_inode_flag_set(fi, FI_APPEND_WRITE) &&
+ if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
!exist_written_data(sbi, ino, APPEND_INO)) {
/* it may call write_inode just prior to fsync */
if (need_inode_page_update(sbi, ino))
goto go_write;
- if (is_inode_flag_set(fi, FI_UPDATE_WRITE) ||
+ if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
exist_written_data(sbi, ino, UPDATE_INO))
goto flush_out;
goto out;
}
go_write:
- /* guarantee free sections for fsync */
- f2fs_balance_fs(sbi);
-
/*
* Both of fdatasync() and fsync() are able to be recovered from
* sudden-power-off.
*/
- down_read(&fi->i_sem);
+ down_read(&F2FS_I(inode)->i_sem);
need_cp = need_do_checkpoint(inode);
- up_read(&fi->i_sem);
+ up_read(&F2FS_I(inode)->i_sem);
if (need_cp) {
/* all the dirty node pages should be flushed for POR */
@@ -253,19 +247,23 @@ go_write:
* will be used only for fsynced inodes after checkpoint.
*/
try_to_fix_pino(inode);
- clear_inode_flag(fi, FI_APPEND_WRITE);
- clear_inode_flag(fi, FI_UPDATE_WRITE);
+ clear_inode_flag(inode, FI_APPEND_WRITE);
+ clear_inode_flag(inode, FI_UPDATE_WRITE);
goto out;
}
sync_nodes:
- sync_node_pages(sbi, ino, &wbc);
+ ret = fsync_node_pages(sbi, inode, &wbc, atomic);
+ if (ret)
+ goto out;
/* if cp_error was enabled, we should avoid infinite loop */
- if (unlikely(f2fs_cp_error(sbi)))
+ if (unlikely(f2fs_cp_error(sbi))) {
+ ret = -EIO;
goto out;
+ }
if (need_inode_block_update(sbi, ino)) {
- mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
f2fs_write_inode(inode, NULL);
goto sync_nodes;
}
@@ -275,18 +273,24 @@ sync_nodes:
goto out;
/* once recovery info is written, don't need to tack this */
- remove_dirty_inode(sbi, ino, APPEND_INO);
- clear_inode_flag(fi, FI_APPEND_WRITE);
+ remove_ino_entry(sbi, ino, APPEND_INO);
+ clear_inode_flag(inode, FI_APPEND_WRITE);
flush_out:
- remove_dirty_inode(sbi, ino, UPDATE_INO);
- clear_inode_flag(fi, FI_UPDATE_WRITE);
+ remove_ino_entry(sbi, ino, UPDATE_INO);
+ clear_inode_flag(inode, FI_UPDATE_WRITE);
ret = f2fs_issue_flush(sbi);
+ f2fs_update_time(sbi, REQ_TIME);
out:
trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
f2fs_trace_ios(NULL, 1);
return ret;
}
+int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ return f2fs_do_sync_file(file, start, end, datasync, false);
+}
+
static pgoff_t __get_first_dirty_index(struct address_space *mapping,
pgoff_t pgofs, int whence)
{
@@ -300,7 +304,7 @@ static pgoff_t __get_first_dirty_index(struct address_space *mapping,
pagevec_init(&pvec, 0);
nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
PAGECACHE_TAG_DIRTY, 1);
- pgofs = nr_pages ? pvec.pages[0]->index : LONG_MAX;
+ pgofs = nr_pages ? pvec.pages[0]->index : ULONG_MAX;
pagevec_release(&pvec);
return pgofs;
}
@@ -332,7 +336,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
loff_t isize;
int err = 0;
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
isize = i_size_read(inode);
if (offset >= isize)
@@ -345,32 +349,31 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
goto found;
}
- pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT);
+ pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
- for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
+ for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
+ err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
if (err && err != -ENOENT) {
goto fail;
} else if (err == -ENOENT) {
/* direct node does not exists */
if (whence == SEEK_DATA) {
- pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
- F2FS_I(inode));
+ pgofs = get_next_page_offset(&dn, pgofs);
continue;
} else {
goto found;
}
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
/* find data/hole in dnode block */
for (; dn.ofs_in_node < end_offset;
dn.ofs_in_node++, pgofs++,
- data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
+ data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
block_t blkaddr;
blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
@@ -387,10 +390,10 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
found:
if (whence == SEEK_HOLE && data_ofs > isize)
data_ofs = isize;
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
return vfs_setpos(file, data_ofs, maxbytes);
fail:
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
return -ENXIO;
}
@@ -418,19 +421,20 @@ static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file_inode(file);
+ int err;
if (f2fs_encrypted_inode(inode)) {
- int err = f2fs_get_encryption_info(inode);
+ err = fscrypt_get_encryption_info(inode);
if (err)
return 0;
+ if (!f2fs_encrypted_inode(inode))
+ return -ENOKEY;
}
/* we don't need to use inline_data strictly */
- if (f2fs_has_inline_data(inode)) {
- int err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
- }
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
file_accessed(file);
vma->vm_ops = &f2fs_file_vm_ops;
@@ -440,12 +444,22 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
int ret = generic_file_open(inode, filp);
+ struct dentry *dir;
if (!ret && f2fs_encrypted_inode(inode)) {
- ret = f2fs_get_encryption_info(inode);
+ ret = fscrypt_get_encryption_info(inode);
if (ret)
- ret = -EACCES;
+ return -EACCES;
+ if (!fscrypt_has_encryption_key(inode))
+ return -ENOKEY;
+ }
+ dir = dget_parent(file_dentry(filp));
+ if (f2fs_encrypted_inode(d_inode(dir)) &&
+ !fscrypt_has_permitted_context(d_inode(dir), inode)) {
+ dput(dir);
+ return -EPERM;
}
+ dput(dir);
return ret;
}
@@ -468,8 +482,7 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
set_data_blkaddr(dn);
invalidate_blocks(sbi, blkaddr);
if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
- clear_inode_flag(F2FS_I(dn->inode),
- FI_FIRST_BLOCK_WRITTEN);
+ clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
nr_free++;
}
@@ -480,14 +493,13 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
* we will invalidate all blkaddr in the whole range.
*/
fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
- F2FS_I(dn->inode)) + ofs;
+ dn->inode) + ofs;
f2fs_update_extent_cache_range(dn, fofs, 0, len);
dec_valid_block_count(sbi, dn->inode, nr_free);
- set_page_dirty(dn->node_page);
- sync_inode_page(dn);
}
dn->ofs_in_node = ofs;
+ f2fs_update_time(sbi, REQ_TIME);
trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
dn->ofs_in_node, nr_free);
return nr_free;
@@ -501,8 +513,8 @@ void truncate_data_blocks(struct dnode_of_data *dn)
static int truncate_partial_data_page(struct inode *inode, u64 from,
bool cache_only)
{
- unsigned offset = from & (PAGE_CACHE_SIZE - 1);
- pgoff_t index = from >> PAGE_CACHE_SHIFT;
+ unsigned offset = from & (PAGE_SIZE - 1);
+ pgoff_t index = from >> PAGE_SHIFT;
struct address_space *mapping = inode->i_mapping;
struct page *page;
@@ -510,7 +522,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
return 0;
if (cache_only) {
- page = f2fs_grab_cache_page(mapping, index, false);
+ page = find_lock_page(mapping, index);
if (page && PageUptodate(page))
goto truncate_out;
f2fs_put_page(page, 1);
@@ -521,9 +533,10 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
if (IS_ERR(page))
return 0;
truncate_out:
- f2fs_wait_on_page_writeback(page, DATA);
- zero_user(page, offset, PAGE_CACHE_SIZE - offset);
- if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
+ f2fs_wait_on_page_writeback(page, DATA, true);
+ zero_user(page, offset, PAGE_SIZE - offset);
+ if (!cache_only || !f2fs_encrypted_inode(inode) ||
+ !S_ISREG(inode->i_mode))
set_page_dirty(page);
f2fs_put_page(page, 1);
return 0;
@@ -543,6 +556,9 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
+ if (free_from >= sbi->max_file_blocks)
+ goto free_partial;
+
if (lock)
f2fs_lock_op(sbi);
@@ -561,14 +577,14 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
}
set_new_dnode(&dn, inode, ipage, NULL, 0);
- err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
+ err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
if (err) {
if (err == -ENOENT)
goto free_next;
goto out;
}
- count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+ count = ADDRS_PER_PAGE(dn.node_page, inode);
count -= dn.ofs_in_node;
f2fs_bug_on(sbi, count < 0);
@@ -584,7 +600,7 @@ free_next:
out:
if (lock)
f2fs_unlock_op(sbi);
-
+free_partial:
/* lastly zero out the first data page */
if (!err)
err = truncate_partial_data_page(inode, from, truncate_page);
@@ -593,7 +609,7 @@ out:
return err;
}
-int f2fs_truncate(struct inode *inode, bool lock)
+int f2fs_truncate(struct inode *inode)
{
int err;
@@ -604,18 +620,18 @@ int f2fs_truncate(struct inode *inode, bool lock)
trace_f2fs_truncate(inode);
/* we should check inline_data size */
- if (f2fs_has_inline_data(inode) && !f2fs_may_inline_data(inode)) {
+ if (!f2fs_may_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
if (err)
return err;
}
- err = truncate_blocks(inode, i_size_read(inode), lock);
+ err = truncate_blocks(inode, i_size_read(inode), true);
if (err)
return err;
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
return 0;
}
@@ -631,7 +647,6 @@ int f2fs_getattr(struct vfsmount *mnt,
#ifdef CONFIG_F2FS_FS_POSIX_ACL
static void __setattr_copy(struct inode *inode, const struct iattr *attr)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int ia_valid = attr->ia_valid;
if (ia_valid & ATTR_UID)
@@ -652,7 +667,7 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
mode &= ~S_ISGID;
- set_acl_inode(fi, mode);
+ set_acl_inode(inode, mode);
}
}
#else
@@ -662,8 +677,8 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
- struct f2fs_inode_info *fi = F2FS_I(inode);
int err;
+ bool size_changed = false;
err = inode_change_ok(inode, attr);
if (err)
@@ -671,36 +686,49 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
if (attr->ia_valid & ATTR_SIZE) {
if (f2fs_encrypted_inode(inode) &&
- f2fs_get_encryption_info(inode))
+ fscrypt_get_encryption_info(inode))
return -EACCES;
if (attr->ia_size <= i_size_read(inode)) {
truncate_setsize(inode, attr->ia_size);
- err = f2fs_truncate(inode, true);
+ err = f2fs_truncate(inode);
if (err)
return err;
- f2fs_balance_fs(F2FS_I_SB(inode));
} else {
/*
* do not trim all blocks after i_size if target size is
* larger than i_size.
*/
truncate_setsize(inode, attr->ia_size);
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+
+ /* should convert inline inode here */
+ if (!f2fs_may_inline_data(inode)) {
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
+ }
+ inode->i_mtime = inode->i_ctime = current_time(inode);
}
+
+ size_changed = true;
}
__setattr_copy(inode, attr);
if (attr->ia_valid & ATTR_MODE) {
err = posix_acl_chmod(inode, get_inode_mode(inode));
- if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
- inode->i_mode = fi->i_acl_mode;
- clear_inode_flag(fi, FI_ACL_MODE);
+ if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
+ inode->i_mode = F2FS_I(inode)->i_acl_mode;
+ clear_inode_flag(inode, FI_ACL_MODE);
}
}
- mark_inode_dirty(inode);
+ /* file size may changed here */
+ f2fs_mark_inode_dirty_sync(inode, size_changed);
+
+ /* inode change will produce dirty node pages flushed by checkpoint */
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
+
return err;
}
@@ -727,7 +755,7 @@ static int fill_zero(struct inode *inode, pgoff_t index,
if (!len)
return 0;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
page = get_new_data_page(inode, NULL, index, false);
@@ -736,7 +764,7 @@ static int fill_zero(struct inode *inode, pgoff_t index,
if (IS_ERR(page))
return PTR_ERR(page);
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page, DATA, true);
zero_user(page, start, len);
set_page_dirty(page);
f2fs_put_page(page, 1);
@@ -761,7 +789,7 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
return err;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
@@ -778,19 +806,17 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
{
pgoff_t pg_start, pg_end;
loff_t off_start, off_end;
- int ret = 0;
+ int ret;
- if (f2fs_has_inline_data(inode)) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- }
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ return ret;
- pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
- pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+ pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
- off_start = offset & (PAGE_CACHE_SIZE - 1);
- off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+ off_start = offset & (PAGE_SIZE - 1);
+ off_end = (offset + len) & (PAGE_SIZE - 1);
if (pg_start == pg_end) {
ret = fill_zero(inode, pg_start, off_start,
@@ -800,7 +826,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
} else {
if (off_start) {
ret = fill_zero(inode, pg_start++, off_start,
- PAGE_CACHE_SIZE - off_start);
+ PAGE_SIZE - off_start);
if (ret)
return ret;
}
@@ -815,10 +841,10 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
loff_t blk_start, blk_end;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
- blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
- blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
+ blk_start = (loff_t)pg_start << PAGE_SHIFT;
+ blk_end = (loff_t)pg_end << PAGE_SHIFT;
truncate_inode_pages_range(mapping, blk_start,
blk_end - 1);
@@ -831,83 +857,199 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
return ret;
}
-static int __exchange_data_block(struct inode *inode, pgoff_t src,
- pgoff_t dst, bool full)
+static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
+ int *do_replace, pgoff_t off, pgoff_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
- block_t new_addr;
- bool do_replace = false;
- int ret;
+ int ret, done, i;
+next_dnode:
set_new_dnode(&dn, inode, NULL, NULL, 0);
- ret = get_dnode_of_data(&dn, src, LOOKUP_NODE_RA);
+ ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
if (ret && ret != -ENOENT) {
return ret;
} else if (ret == -ENOENT) {
- new_addr = NULL_ADDR;
- } else {
- new_addr = dn.data_blkaddr;
- if (!is_checkpointed_data(sbi, new_addr)) {
- dn.data_blkaddr = NULL_ADDR;
+ if (dn.max_level == 0)
+ return -ENOENT;
+ done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
+ blkaddr += done;
+ do_replace += done;
+ goto next;
+ }
+
+ done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
+ dn.ofs_in_node, len);
+ for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
+ *blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+ if (!is_checkpointed_data(sbi, *blkaddr)) {
+
+ if (test_opt(sbi, LFS)) {
+ f2fs_put_dnode(&dn);
+ return -ENOTSUPP;
+ }
+
/* do not invalidate this block address */
- set_data_blkaddr(&dn);
- f2fs_update_extent_cache(&dn);
- do_replace = true;
+ f2fs_update_data_blkaddr(&dn, NULL_ADDR);
+ *do_replace = 1;
}
- f2fs_put_dnode(&dn);
}
+ f2fs_put_dnode(&dn);
+next:
+ len -= done;
+ off += done;
+ if (len)
+ goto next_dnode;
+ return 0;
+}
- if (new_addr == NULL_ADDR)
- return full ? truncate_hole(inode, dst, dst + 1) : 0;
+static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
+ int *do_replace, pgoff_t off, int len)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct dnode_of_data dn;
+ int ret, i;
- if (do_replace) {
- struct page *ipage = get_node_page(sbi, inode->i_ino);
- struct node_info ni;
+ for (i = 0; i < len; i++, do_replace++, blkaddr++) {
+ if (*do_replace == 0)
+ continue;
- if (IS_ERR(ipage)) {
- ret = PTR_ERR(ipage);
- goto err_out;
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
+ if (ret) {
+ dec_valid_block_count(sbi, inode, 1);
+ invalidate_blocks(sbi, *blkaddr);
+ } else {
+ f2fs_update_data_blkaddr(&dn, *blkaddr);
}
+ f2fs_put_dnode(&dn);
+ }
+ return 0;
+}
- set_new_dnode(&dn, inode, ipage, NULL, 0);
- ret = f2fs_reserve_block(&dn, dst);
- if (ret)
- goto err_out;
+static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
+ block_t *blkaddr, int *do_replace,
+ pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
+ pgoff_t i = 0;
+ int ret;
+
+ while (i < len) {
+ if (blkaddr[i] == NULL_ADDR && !full) {
+ i++;
+ continue;
+ }
- truncate_data_blocks_range(&dn, 1);
+ if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
+ struct dnode_of_data dn;
+ struct node_info ni;
+ size_t new_size;
+ pgoff_t ilen;
- get_node_info(sbi, dn.nid, &ni);
- f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
- ni.version, true);
- f2fs_put_dnode(&dn);
- } else {
- struct page *psrc, *pdst;
+ set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
+ ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
+ if (ret)
+ return ret;
+
+ get_node_info(sbi, dn.nid, &ni);
+ ilen = min((pgoff_t)
+ ADDRS_PER_PAGE(dn.node_page, dst_inode) -
+ dn.ofs_in_node, len - i);
+ do {
+ dn.data_blkaddr = datablock_addr(dn.node_page,
+ dn.ofs_in_node);
+ truncate_data_blocks_range(&dn, 1);
+
+ if (do_replace[i]) {
+ f2fs_i_blocks_write(src_inode,
+ 1, false);
+ f2fs_i_blocks_write(dst_inode,
+ 1, true);
+ f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
+ blkaddr[i], ni.version, true, false);
+
+ do_replace[i] = 0;
+ }
+ dn.ofs_in_node++;
+ i++;
+ new_size = (dst + i) << PAGE_SHIFT;
+ if (dst_inode->i_size < new_size)
+ f2fs_i_size_write(dst_inode, new_size);
+ } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
- psrc = get_lock_data_page(inode, src, true);
- if (IS_ERR(psrc))
- return PTR_ERR(psrc);
- pdst = get_new_data_page(inode, NULL, dst, false);
- if (IS_ERR(pdst)) {
+ f2fs_put_dnode(&dn);
+ } else {
+ struct page *psrc, *pdst;
+
+ psrc = get_lock_data_page(src_inode, src + i, true);
+ if (IS_ERR(psrc))
+ return PTR_ERR(psrc);
+ pdst = get_new_data_page(dst_inode, NULL, dst + i,
+ true);
+ if (IS_ERR(pdst)) {
+ f2fs_put_page(psrc, 1);
+ return PTR_ERR(pdst);
+ }
+ f2fs_copy_page(psrc, pdst);
+ set_page_dirty(pdst);
+ f2fs_put_page(pdst, 1);
f2fs_put_page(psrc, 1);
- return PTR_ERR(pdst);
- }
- f2fs_copy_page(psrc, pdst);
- set_page_dirty(pdst);
- f2fs_put_page(pdst, 1);
- f2fs_put_page(psrc, 1);
- return truncate_hole(inode, src, src + 1);
+ ret = truncate_hole(src_inode, src + i, src + i + 1);
+ if (ret)
+ return ret;
+ i++;
+ }
}
return 0;
+}
-err_out:
- if (!get_dnode_of_data(&dn, src, LOOKUP_NODE)) {
- dn.data_blkaddr = new_addr;
- set_data_blkaddr(&dn);
- f2fs_update_extent_cache(&dn);
- f2fs_put_dnode(&dn);
+static int __exchange_data_block(struct inode *src_inode,
+ struct inode *dst_inode, pgoff_t src, pgoff_t dst,
+ pgoff_t len, bool full)
+{
+ block_t *src_blkaddr;
+ int *do_replace;
+ pgoff_t olen;
+ int ret;
+
+ while (len) {
+ olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
+
+ src_blkaddr = f2fs_kvzalloc(sizeof(block_t) * olen, GFP_KERNEL);
+ if (!src_blkaddr)
+ return -ENOMEM;
+
+ do_replace = f2fs_kvzalloc(sizeof(int) * olen, GFP_KERNEL);
+ if (!do_replace) {
+ kvfree(src_blkaddr);
+ return -ENOMEM;
+ }
+
+ ret = __read_out_blkaddrs(src_inode, src_blkaddr,
+ do_replace, src, olen);
+ if (ret)
+ goto roll_back;
+
+ ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
+ do_replace, src, dst, olen, full);
+ if (ret)
+ goto roll_back;
+
+ src += olen;
+ dst += olen;
+ len -= olen;
+
+ kvfree(src_blkaddr);
+ kvfree(do_replace);
}
+ return 0;
+
+roll_back:
+ __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len);
+ kvfree(src_blkaddr);
+ kvfree(do_replace);
return ret;
}
@@ -915,16 +1057,15 @@ static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
- int ret = 0;
+ int ret;
- for (; end < nrpages; start++, end++) {
- f2fs_balance_fs(sbi);
- f2fs_lock_op(sbi);
- ret = __exchange_data_block(inode, end, start, true);
- f2fs_unlock_op(sbi);
- if (ret)
- break;
- }
+ f2fs_balance_fs(sbi, true);
+ f2fs_lock_op(sbi);
+
+ f2fs_drop_extent_tree(inode);
+
+ ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
+ f2fs_unlock_op(sbi);
return ret;
}
@@ -941,16 +1082,12 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
return -EINVAL;
- f2fs_balance_fs(F2FS_I_SB(inode));
-
- if (f2fs_has_inline_data(inode)) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- }
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ return ret;
- pg_start = offset >> PAGE_CACHE_SHIFT;
- pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = offset >> PAGE_SHIFT;
+ pg_end = (offset + len) >> PAGE_SHIFT;
/* write out all dirty pages from offset */
ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
@@ -972,7 +1109,50 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
ret = truncate_blocks(inode, new_size, true);
if (!ret)
- i_size_write(inode, new_size);
+ f2fs_i_size_write(inode, new_size);
+
+ return ret;
+}
+
+static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
+ pgoff_t end)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ pgoff_t index = start;
+ unsigned int ofs_in_node = dn->ofs_in_node;
+ blkcnt_t count = 0;
+ int ret;
+
+ for (; index < end; index++, dn->ofs_in_node++) {
+ if (datablock_addr(dn->node_page, dn->ofs_in_node) == NULL_ADDR)
+ count++;
+ }
+
+ dn->ofs_in_node = ofs_in_node;
+ ret = reserve_new_blocks(dn, count);
+ if (ret)
+ return ret;
+
+ dn->ofs_in_node = ofs_in_node;
+ for (index = start; index < end; index++, dn->ofs_in_node++) {
+ dn->data_blkaddr =
+ datablock_addr(dn->node_page, dn->ofs_in_node);
+ /*
+ * reserve_new_blocks will not guarantee entire block
+ * allocation.
+ */
+ if (dn->data_blkaddr == NULL_ADDR) {
+ ret = -ENOSPC;
+ break;
+ }
+ if (dn->data_blkaddr != NEW_ADDR) {
+ invalidate_blocks(sbi, dn->data_blkaddr);
+ dn->data_blkaddr = NEW_ADDR;
+ set_data_blkaddr(dn);
+ }
+ }
+
+ f2fs_update_extent_cache_range(dn, start, 0, index - start);
return ret;
}
@@ -991,13 +1171,9 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
if (ret)
return ret;
- f2fs_balance_fs(sbi);
-
- if (f2fs_has_inline_data(inode)) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- }
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ return ret;
ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
if (ret)
@@ -1005,11 +1181,11 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
truncate_pagecache_range(inode, offset, offset + len - 1);
- pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
- pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+ pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
- off_start = offset & (PAGE_CACHE_SIZE - 1);
- off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+ off_start = offset & (PAGE_SIZE - 1);
+ off_end = (offset + len) & (PAGE_SIZE - 1);
if (pg_start == pg_end) {
ret = fill_zero(inode, pg_start, off_start,
@@ -1023,48 +1199,43 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
} else {
if (off_start) {
ret = fill_zero(inode, pg_start++, off_start,
- PAGE_CACHE_SIZE - off_start);
+ PAGE_SIZE - off_start);
if (ret)
return ret;
new_size = max_t(loff_t, new_size,
- (loff_t)pg_start << PAGE_CACHE_SHIFT);
+ (loff_t)pg_start << PAGE_SHIFT);
}
- for (index = pg_start; index < pg_end; index++) {
+ for (index = pg_start; index < pg_end;) {
struct dnode_of_data dn;
- struct page *ipage;
+ unsigned int end_offset;
+ pgoff_t end;
f2fs_lock_op(sbi);
- ipage = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- ret = PTR_ERR(ipage);
- f2fs_unlock_op(sbi);
- goto out;
- }
-
- set_new_dnode(&dn, inode, ipage, NULL, 0);
- ret = f2fs_reserve_block(&dn, index);
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
if (ret) {
f2fs_unlock_op(sbi);
goto out;
}
- if (dn.data_blkaddr != NEW_ADDR) {
- invalidate_blocks(sbi, dn.data_blkaddr);
-
- dn.data_blkaddr = NEW_ADDR;
- set_data_blkaddr(&dn);
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end = min(pg_end, end_offset - dn.ofs_in_node + index);
- dn.data_blkaddr = NULL_ADDR;
- f2fs_update_extent_cache(&dn);
- }
+ ret = f2fs_do_zero_range(&dn, index, end);
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
+
+ if (ret)
+ goto out;
+
+ index = end;
new_size = max_t(loff_t, new_size,
- (loff_t)(index + 1) << PAGE_CACHE_SHIFT);
+ (loff_t)index << PAGE_SHIFT);
}
if (off_end) {
@@ -1077,11 +1248,8 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
}
out:
- if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) {
- i_size_write(inode, new_size);
- mark_inode_dirty(inode);
- update_inode_page(inode);
- }
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
+ f2fs_i_size_write(inode, new_size);
return ret;
}
@@ -1089,7 +1257,7 @@ out:
static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- pgoff_t pg_start, pg_end, delta, nrpages, idx;
+ pgoff_t nr, pg_start, pg_end, delta, idx;
loff_t new_size;
int ret = 0;
@@ -1104,13 +1272,11 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
return -EINVAL;
- f2fs_balance_fs(sbi);
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ return ret;
- if (f2fs_has_inline_data(inode)) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- }
+ f2fs_balance_fs(sbi, true);
ret = truncate_blocks(inode, i_size_read(inode), true);
if (ret)
@@ -1123,17 +1289,23 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
truncate_pagecache(inode, offset);
- pg_start = offset >> PAGE_CACHE_SHIFT;
- pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = offset >> PAGE_SHIFT;
+ pg_end = (offset + len) >> PAGE_SHIFT;
delta = pg_end - pg_start;
- nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+ idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ while (!ret && idx > pg_start) {
+ nr = idx - pg_start;
+ if (nr > delta)
+ nr = delta;
+ idx -= nr;
- for (idx = nrpages - 1; idx >= pg_start && idx != -1; idx--) {
f2fs_lock_op(sbi);
- ret = __exchange_data_block(inode, idx, idx + delta, false);
+ f2fs_drop_extent_tree(inode);
+
+ ret = __exchange_data_block(inode, inode, idx,
+ idx + delta, nr, false);
f2fs_unlock_op(sbi);
- if (ret)
- break;
}
/* write out all moved pages, if possible */
@@ -1141,7 +1313,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
truncate_pagecache(inode, offset);
if (!ret)
- i_size_write(inode, new_size);
+ f2fs_i_size_write(inode, new_size);
return ret;
}
@@ -1149,62 +1321,50 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
loff_t len, int mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- pgoff_t index, pg_start, pg_end;
+ struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
+ pgoff_t pg_end;
loff_t new_size = i_size_read(inode);
- loff_t off_start, off_end;
- int ret = 0;
+ loff_t off_end;
+ int err;
- f2fs_balance_fs(sbi);
+ err = inode_newsize_ok(inode, (len + offset));
+ if (err)
+ return err;
- ret = inode_newsize_ok(inode, (len + offset));
- if (ret)
- return ret;
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
- if (f2fs_has_inline_data(inode)) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- }
+ f2fs_balance_fs(sbi, true);
- pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
- pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+ pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
+ off_end = (offset + len) & (PAGE_SIZE - 1);
- off_start = offset & (PAGE_CACHE_SIZE - 1);
- off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+ map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
+ map.m_len = pg_end - map.m_lblk;
+ if (off_end)
+ map.m_len++;
- f2fs_lock_op(sbi);
+ err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+ if (err) {
+ pgoff_t last_off;
- for (index = pg_start; index <= pg_end; index++) {
- struct dnode_of_data dn;
+ if (!map.m_len)
+ return err;
- if (index == pg_end && !off_end)
- goto noalloc;
+ last_off = map.m_lblk + map.m_len - 1;
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- ret = f2fs_reserve_block(&dn, index);
- if (ret)
- break;
-noalloc:
- if (pg_start == pg_end)
- new_size = offset + len;
- else if (index == pg_start && off_start)
- new_size = (loff_t)(index + 1) << PAGE_CACHE_SHIFT;
- else if (index == pg_end)
- new_size = ((loff_t)index << PAGE_CACHE_SHIFT) +
- off_end;
- else
- new_size += PAGE_CACHE_SIZE;
+ /* update new size to the failed position */
+ new_size = (last_off == pg_end) ? offset + len:
+ (loff_t)(last_off + 1) << PAGE_SHIFT;
+ } else {
+ new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
}
- if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- i_size_read(inode) < new_size) {
- i_size_write(inode, new_size);
- mark_inode_dirty(inode);
- update_inode_page(inode);
- }
- f2fs_unlock_op(sbi);
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
+ f2fs_i_size_write(inode, new_size);
- return ret;
+ return err;
}
static long f2fs_fallocate(struct file *file, int mode,
@@ -1226,7 +1386,7 @@ static long f2fs_fallocate(struct file *file, int mode,
FALLOC_FL_INSERT_RANGE))
return -EOPNOTSUPP;
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
if (mode & FALLOC_FL_PUNCH_HOLE) {
if (offset >= inode->i_size)
@@ -1244,12 +1404,15 @@ static long f2fs_fallocate(struct file *file, int mode,
}
if (!ret) {
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
+ if (mode & FALLOC_FL_KEEP_SIZE)
+ file_set_keep_isize(inode);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
}
out:
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
trace_f2fs_fallocate(inode, mode, offset, len, ret);
return ret;
@@ -1257,13 +1420,22 @@ out:
static int f2fs_release_file(struct inode *inode, struct file *filp)
{
+ /*
+ * f2fs_relase_file is called at every close calls. So we should
+ * not drop any inmemory pages by close called by other process.
+ */
+ if (!(filp->f_mode & FMODE_WRITE) ||
+ atomic_read(&inode->i_writecount) != 1)
+ return 0;
+
/* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode))
- commit_inmem_pages(inode, true);
+ drop_inmem_pages(inode);
if (f2fs_is_volatile_file(inode)) {
- set_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
+ clear_inode_flag(inode, FI_VOLATILE_FILE);
+ set_inode_flag(inode, FI_DROP_CACHE);
filemap_fdatawrite(inode->i_mapping);
- clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
+ clear_inode_flag(inode, FI_DROP_CACHE);
}
return 0;
}
@@ -1293,33 +1465,29 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
- unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
+ unsigned int flags;
unsigned int oldflags;
int ret;
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
+ if (get_user(flags, (int __user *)arg))
+ return -EFAULT;
+
ret = mnt_want_write_file(filp);
if (ret)
return ret;
- if (!inode_owner_or_capable(inode)) {
- ret = -EACCES;
- goto out;
- }
-
- if (get_user(flags, (int __user *)arg)) {
- ret = -EFAULT;
- goto out;
- }
-
flags = f2fs_mask_flags(inode->i_mode, flags);
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
oldflags = fi->i_flags;
if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
if (!capable(CAP_LINUX_IMMUTABLE)) {
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
ret = -EPERM;
goto out;
}
@@ -1328,11 +1496,10 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
flags = flags & FS_FL_USER_MODIFIABLE;
flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
fi->i_flags = flags;
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
+ inode->i_ctime = current_time(inode);
f2fs_set_inode_flags(inode);
- inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(inode);
out:
mnt_drop_write_file(filp);
return ret;
@@ -1353,17 +1520,35 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
if (!inode_owner_or_capable(inode))
return -EACCES;
- f2fs_balance_fs(F2FS_I_SB(inode));
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
if (f2fs_is_atomic_file(inode))
- return 0;
+ goto out;
ret = f2fs_convert_inline_inode(inode);
if (ret)
- return ret;
+ goto out;
- set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
- return 0;
+ set_inode_flag(inode, FI_ATOMIC_FILE);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+
+ if (!get_dirty_pages(inode))
+ goto out;
+
+ f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
+ "Unexpected flush for atomic writes: ino=%lu, npages=%u",
+ inode->i_ino, get_dirty_pages(inode));
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
+ if (ret)
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
+out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return ret;
}
static int f2fs_ioc_commit_atomic_write(struct file *filp)
@@ -1374,22 +1559,27 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
if (!inode_owner_or_capable(inode))
return -EACCES;
- if (f2fs_is_volatile_file(inode))
- return 0;
-
ret = mnt_want_write_file(filp);
if (ret)
return ret;
+ inode_lock(inode);
+
+ if (f2fs_is_volatile_file(inode))
+ goto err_out;
+
if (f2fs_is_atomic_file(inode)) {
- clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
- ret = commit_inmem_pages(inode, false);
- if (ret)
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
+ ret = commit_inmem_pages(inode);
+ if (ret) {
+ set_inode_flag(inode, FI_ATOMIC_FILE);
goto err_out;
+ }
}
- ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0);
+ ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
err_out:
+ inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
}
@@ -1402,31 +1592,54 @@ static int f2fs_ioc_start_volatile_write(struct file *filp)
if (!inode_owner_or_capable(inode))
return -EACCES;
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
+
if (f2fs_is_volatile_file(inode))
- return 0;
+ goto out;
ret = f2fs_convert_inline_inode(inode);
if (ret)
- return ret;
+ goto out;
- set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
- return 0;
+ set_inode_flag(inode, FI_VOLATILE_FILE);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return ret;
}
static int f2fs_ioc_release_volatile_write(struct file *filp)
{
struct inode *inode = file_inode(filp);
+ int ret;
if (!inode_owner_or_capable(inode))
return -EACCES;
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
+
if (!f2fs_is_volatile_file(inode))
- return 0;
+ goto out;
- if (!f2fs_is_first_block_written(inode))
- return truncate_partial_data_page(inode, 0, true);
+ if (!f2fs_is_first_block_written(inode)) {
+ ret = truncate_partial_data_page(inode, 0, true);
+ goto out;
+ }
- return punch_hole(inode, 0, F2FS_BLKSIZE);
+ ret = punch_hole(inode, 0, F2FS_BLKSIZE);
+out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return ret;
}
static int f2fs_ioc_abort_volatile_write(struct file *filp)
@@ -1441,13 +1654,19 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
if (ret)
return ret;
- f2fs_balance_fs(F2FS_I_SB(inode));
+ inode_lock(inode);
+
+ if (f2fs_is_atomic_file(inode))
+ drop_inmem_pages(inode);
+ if (f2fs_is_volatile_file(inode)) {
+ clear_inode_flag(inode, FI_VOLATILE_FILE);
+ ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
+ }
- clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
- clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
- commit_inmem_pages(inode, true);
+ inode_unlock(inode);
mnt_drop_write_file(filp);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return ret;
}
@@ -1457,6 +1676,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
__u32 in;
+ int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1464,30 +1684,38 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
if (get_user(in, (__u32 __user *)arg))
return -EFAULT;
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
switch (in) {
case F2FS_GOING_DOWN_FULLSYNC:
sb = freeze_bdev(sb->s_bdev);
if (sb && !IS_ERR(sb)) {
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
thaw_bdev(sb->s_bdev, sb);
}
break;
case F2FS_GOING_DOWN_METASYNC:
/* do checkpoint only */
f2fs_sync_fs(sb, 1);
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
break;
case F2FS_GOING_DOWN_NOSYNC:
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
break;
case F2FS_GOING_DOWN_METAFLUSH:
sync_meta_pages(sbi, META, LONG_MAX);
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- return 0;
+ f2fs_update_time(sbi, REQ_TIME);
+out:
+ mnt_drop_write_file(filp);
+ return ret;
}
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
@@ -1508,15 +1736,21 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
sizeof(range)))
return -EFAULT;
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
range.minlen = max((unsigned int)range.minlen,
q->limits.discard_granularity);
ret = f2fs_trim_fs(F2FS_SB(sb), &range);
+ mnt_drop_write_file(filp);
if (ret < 0)
return ret;
if (copy_to_user((struct fstrim_range __user *)arg, &range,
sizeof(range)))
return -EFAULT;
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return 0;
}
@@ -1532,45 +1766,31 @@ static bool uuid_is_nonzero(__u8 u[16])
static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- struct f2fs_encryption_policy policy;
+ struct fscrypt_policy policy;
struct inode *inode = file_inode(filp);
- int err;
- if (copy_from_user(&policy, (struct f2fs_encryption_policy __user *)arg,
- sizeof(policy)))
+ if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg,
+ sizeof(policy)))
return -EFAULT;
- mutex_lock(&inode->i_mutex);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
- err = f2fs_process_policy(&policy, inode);
-
- mutex_unlock(&inode->i_mutex);
-
- return err;
-#else
- return -EOPNOTSUPP;
-#endif
+ return fscrypt_process_policy(filp, &policy);
}
static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- struct f2fs_encryption_policy policy;
+ struct fscrypt_policy policy;
struct inode *inode = file_inode(filp);
int err;
- err = f2fs_get_policy(inode, &policy);
+ err = fscrypt_get_policy(inode, &policy);
if (err)
return err;
- if (copy_to_user((struct f2fs_encryption_policy __user *)arg, &policy,
- sizeof(policy)))
+ if (copy_to_user((struct fscrypt_policy __user *)arg, &policy, sizeof(policy)))
return -EFAULT;
return 0;
-#else
- return -EOPNOTSUPP;
-#endif
}
static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
@@ -1593,13 +1813,13 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
err = f2fs_commit_super(sbi, false);
-
- mnt_drop_write_file(filp);
if (err) {
/* undo new data */
memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
+ mnt_drop_write_file(filp);
return err;
}
+ mnt_drop_write_file(filp);
got_it:
if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
16))
@@ -1612,6 +1832,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
__u32 sync;
+ int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1622,21 +1843,30 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
if (f2fs_readonly(sbi->sb))
return -EROFS;
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
if (!sync) {
- if (!mutex_trylock(&sbi->gc_mutex))
- return -EBUSY;
+ if (!mutex_trylock(&sbi->gc_mutex)) {
+ ret = -EBUSY;
+ goto out;
+ }
} else {
mutex_lock(&sbi->gc_mutex);
}
- return f2fs_gc(sbi, sync);
+ ret = f2fs_gc(sbi, sync, true);
+out:
+ mnt_drop_write_file(filp);
+ return ret;
}
static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct cp_control cpc;
+ int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1644,13 +1874,343 @@ static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
if (f2fs_readonly(sbi->sb))
return -EROFS;
- cpc.reason = __get_cp_reason(sbi);
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ ret = f2fs_sync_fs(sbi->sb, 1);
- mutex_lock(&sbi->gc_mutex);
- write_checkpoint(sbi, &cpc);
- mutex_unlock(&sbi->gc_mutex);
+ mnt_drop_write_file(filp);
+ return ret;
+}
- return 0;
+static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
+ struct file *filp,
+ struct f2fs_defragment *range)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
+ struct extent_info ei;
+ pgoff_t pg_start, pg_end;
+ unsigned int blk_per_seg = sbi->blocks_per_seg;
+ unsigned int total = 0, sec_num;
+ unsigned int pages_per_sec = sbi->segs_per_sec * blk_per_seg;
+ block_t blk_end = 0;
+ bool fragmented = false;
+ int err;
+
+ /* if in-place-update policy is enabled, don't waste time here */
+ if (need_inplace_update(inode))
+ return -EINVAL;
+
+ pg_start = range->start >> PAGE_SHIFT;
+ pg_end = (range->start + range->len) >> PAGE_SHIFT;
+
+ f2fs_balance_fs(sbi, true);
+
+ inode_lock(inode);
+
+ /* writeback all dirty pages in the range */
+ err = filemap_write_and_wait_range(inode->i_mapping, range->start,
+ range->start + range->len - 1);
+ if (err)
+ goto out;
+
+ /*
+ * lookup mapping info in extent cache, skip defragmenting if physical
+ * block addresses are continuous.
+ */
+ if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
+ if (ei.fofs + ei.len >= pg_end)
+ goto out;
+ }
+
+ map.m_lblk = pg_start;
+
+ /*
+ * lookup mapping info in dnode page cache, skip defragmenting if all
+ * physical block addresses are continuous even if there are hole(s)
+ * in logical blocks.
+ */
+ while (map.m_lblk < pg_end) {
+ map.m_len = pg_end - map.m_lblk;
+ err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
+ if (err)
+ goto out;
+
+ if (!(map.m_flags & F2FS_MAP_FLAGS)) {
+ map.m_lblk++;
+ continue;
+ }
+
+ if (blk_end && blk_end != map.m_pblk) {
+ fragmented = true;
+ break;
+ }
+ blk_end = map.m_pblk + map.m_len;
+
+ map.m_lblk += map.m_len;
+ }
+
+ if (!fragmented)
+ goto out;
+
+ map.m_lblk = pg_start;
+ map.m_len = pg_end - pg_start;
+
+ sec_num = (map.m_len + pages_per_sec - 1) / pages_per_sec;
+
+ /*
+ * make sure there are enough free section for LFS allocation, this can
+ * avoid defragment running in SSR mode when free section are allocated
+ * intensively
+ */
+ if (has_not_enough_free_secs(sbi, 0, sec_num)) {
+ err = -EAGAIN;
+ goto out;
+ }
+
+ while (map.m_lblk < pg_end) {
+ pgoff_t idx;
+ int cnt = 0;
+
+do_map:
+ map.m_len = pg_end - map.m_lblk;
+ err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
+ if (err)
+ goto clear_out;
+
+ if (!(map.m_flags & F2FS_MAP_FLAGS)) {
+ map.m_lblk++;
+ continue;
+ }
+
+ set_inode_flag(inode, FI_DO_DEFRAG);
+
+ idx = map.m_lblk;
+ while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
+ struct page *page;
+
+ page = get_lock_data_page(inode, idx, true);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto clear_out;
+ }
+
+ set_page_dirty(page);
+ f2fs_put_page(page, 1);
+
+ idx++;
+ cnt++;
+ total++;
+ }
+
+ map.m_lblk = idx;
+
+ if (idx < pg_end && cnt < blk_per_seg)
+ goto do_map;
+
+ clear_inode_flag(inode, FI_DO_DEFRAG);
+
+ err = filemap_fdatawrite(inode->i_mapping);
+ if (err)
+ goto out;
+ }
+clear_out:
+ clear_inode_flag(inode, FI_DO_DEFRAG);
+out:
+ inode_unlock(inode);
+ if (!err)
+ range->len = (u64)total << PAGE_SHIFT;
+ return err;
+}
+
+static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_defragment range;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ err = mnt_want_write_file(filp);
+ if (err)
+ return err;
+
+ if (f2fs_readonly(sbi->sb)) {
+ err = -EROFS;
+ goto out;
+ }
+
+ if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
+ sizeof(range))) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ /* verify alignment of offset & size */
+ if (range.start & (F2FS_BLKSIZE - 1) ||
+ range.len & (F2FS_BLKSIZE - 1)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = f2fs_defragment_range(sbi, filp, &range);
+ f2fs_update_time(sbi, REQ_TIME);
+ if (err < 0)
+ goto out;
+
+ if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
+ sizeof(range)))
+ err = -EFAULT;
+out:
+ mnt_drop_write_file(filp);
+ return err;
+}
+
+static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, size_t len)
+{
+ struct inode *src = file_inode(file_in);
+ struct inode *dst = file_inode(file_out);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(src);
+ size_t olen = len, dst_max_i_size = 0;
+ size_t dst_osize;
+ int ret;
+
+ if (file_in->f_path.mnt != file_out->f_path.mnt ||
+ src->i_sb != dst->i_sb)
+ return -EXDEV;
+
+ if (unlikely(f2fs_readonly(src->i_sb)))
+ return -EROFS;
+
+ if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
+ return -EINVAL;
+
+ if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
+ return -EOPNOTSUPP;
+
+ if (src == dst) {
+ if (pos_in == pos_out)
+ return 0;
+ if (pos_out > pos_in && pos_out < pos_in + len)
+ return -EINVAL;
+ }
+
+ inode_lock(src);
+ if (src != dst) {
+ if (!inode_trylock(dst)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+
+ ret = -EINVAL;
+ if (pos_in + len > src->i_size || pos_in + len < pos_in)
+ goto out_unlock;
+ if (len == 0)
+ olen = len = src->i_size - pos_in;
+ if (pos_in + len == src->i_size)
+ len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
+ if (len == 0) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ dst_osize = dst->i_size;
+ if (pos_out + olen > dst->i_size)
+ dst_max_i_size = pos_out + olen;
+
+ /* verify the end result is block aligned */
+ if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
+ !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
+ !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
+ goto out_unlock;
+
+ ret = f2fs_convert_inline_inode(src);
+ if (ret)
+ goto out_unlock;
+
+ ret = f2fs_convert_inline_inode(dst);
+ if (ret)
+ goto out_unlock;
+
+ /* write out all dirty pages from offset */
+ ret = filemap_write_and_wait_range(src->i_mapping,
+ pos_in, pos_in + len);
+ if (ret)
+ goto out_unlock;
+
+ ret = filemap_write_and_wait_range(dst->i_mapping,
+ pos_out, pos_out + len);
+ if (ret)
+ goto out_unlock;
+
+ f2fs_balance_fs(sbi, true);
+ f2fs_lock_op(sbi);
+ ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
+ pos_out >> F2FS_BLKSIZE_BITS,
+ len >> F2FS_BLKSIZE_BITS, false);
+
+ if (!ret) {
+ if (dst_max_i_size)
+ f2fs_i_size_write(dst, dst_max_i_size);
+ else if (dst_osize != dst->i_size)
+ f2fs_i_size_write(dst, dst_osize);
+ }
+ f2fs_unlock_op(sbi);
+out_unlock:
+ if (src != dst)
+ inode_unlock(dst);
+out:
+ inode_unlock(src);
+ return ret;
+}
+
+static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
+{
+ struct f2fs_move_range range;
+ struct fd dst;
+ int err;
+
+ if (!(filp->f_mode & FMODE_READ) ||
+ !(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
+ sizeof(range)))
+ return -EFAULT;
+
+ dst = fdget(range.dst_fd);
+ if (!dst.file)
+ return -EBADF;
+
+ if (!(dst.file->f_mode & FMODE_WRITE)) {
+ err = -EBADF;
+ goto err_out;
+ }
+
+ err = mnt_want_write_file(filp);
+ if (err)
+ goto err_out;
+
+ err = f2fs_move_file_range(filp, range.pos_in, dst.file,
+ range.pos_out, range.len);
+
+ mnt_drop_write_file(filp);
+
+ if (copy_to_user((struct f2fs_move_range __user *)arg,
+ &range, sizeof(range)))
+ err = -EFAULT;
+err_out:
+ fdput(dst);
+ return err;
}
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
@@ -1686,6 +2246,10 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_gc(filp, arg);
case F2FS_IOC_WRITE_CHECKPOINT:
return f2fs_ioc_write_checkpoint(filp, arg);
+ case F2FS_IOC_DEFRAGMENT:
+ return f2fs_ioc_defragment(filp, arg);
+ case F2FS_IOC_MOVE_RANGE:
+ return f2fs_ioc_move_range(filp, arg);
default:
return -ENOTTY;
}
@@ -1693,14 +2257,39 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- struct inode *inode = file_inode(iocb->ki_filp);
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct blk_plug plug;
+ ssize_t ret;
if (f2fs_encrypted_inode(inode) &&
- !f2fs_has_encryption_key(inode) &&
- f2fs_get_encryption_info(inode))
+ !fscrypt_has_encryption_key(inode) &&
+ fscrypt_get_encryption_info(inode))
return -EACCES;
- return generic_file_write_iter(iocb, from);
+ inode_lock(inode);
+ ret = generic_write_checks(iocb, from);
+ if (ret > 0) {
+ int err = f2fs_preallocate_blocks(iocb, from);
+
+ if (err) {
+ inode_unlock(inode);
+ return err;
+ }
+ blk_start_plug(&plug);
+ ret = __generic_file_write_iter(iocb, from);
+ blk_finish_plug(&plug);
+ }
+ inode_unlock(inode);
+
+ if (ret > 0) {
+ ssize_t err;
+
+ err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+ if (err < 0)
+ ret = err;
+ }
+ return ret;
}
#ifdef CONFIG_COMPAT
@@ -1713,6 +2302,24 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC32_SETFLAGS:
cmd = F2FS_IOC_SETFLAGS;
break;
+ case F2FS_IOC32_GETVERSION:
+ cmd = F2FS_IOC_GETVERSION;
+ break;
+ case F2FS_IOC_START_ATOMIC_WRITE:
+ case F2FS_IOC_COMMIT_ATOMIC_WRITE:
+ case F2FS_IOC_START_VOLATILE_WRITE:
+ case F2FS_IOC_RELEASE_VOLATILE_WRITE:
+ case F2FS_IOC_ABORT_VOLATILE_WRITE:
+ case F2FS_IOC_SHUTDOWN:
+ case F2FS_IOC_SET_ENCRYPTION_POLICY:
+ case F2FS_IOC_GET_ENCRYPTION_PWSALT:
+ case F2FS_IOC_GET_ENCRYPTION_POLICY:
+ case F2FS_IOC_GARBAGE_COLLECT:
+ case F2FS_IOC_WRITE_CHECKPOINT:
+ case F2FS_IOC_DEFRAGMENT:
+ break;
+ case F2FS_IOC_MOVE_RANGE:
+ break;
default:
return -ENOIOCTLCMD;
}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index fedbf67a0842..6390d45c1b68 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -16,7 +16,6 @@
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/freezer.h>
-#include <linux/blkdev.h>
#include "f2fs.h"
#include "node.h"
@@ -48,6 +47,11 @@ static int gc_thread_func(void *data)
continue;
}
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_CHECKPOINT))
+ f2fs_stop_checkpoint(sbi, false);
+#endif
+
/*
* [GC triggering condition]
* 0. GC is not conducted currently.
@@ -78,7 +82,7 @@ static int gc_thread_func(void *data)
stat_inc_bggc_count(sbi);
/* if return value is not zero, no victim was selected */
- if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC)))
+ if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true))
wait_ms = gc_th->no_gc_sleep_time;
trace_f2fs_background_gc(sbi->sb, wait_ms,
@@ -97,7 +101,7 @@ int start_gc_thread(struct f2fs_sb_info *sbi)
dev_t dev = sbi->sb->s_bdev->bd_dev;
int err = 0;
- gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
+ gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
if (!gc_th) {
err = -ENOMEM;
goto out;
@@ -173,9 +177,9 @@ static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
{
/* SSR allocates in a segment unit */
if (p->alloc_mode == SSR)
- return 1 << sbi->log_blocks_per_seg;
+ return sbi->blocks_per_seg;
if (p->gc_mode == GC_GREEDY)
- return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
+ return sbi->blocks_per_seg * p->ofs_unit;
else if (p->gc_mode == GC_CB)
return UINT_MAX;
else /* No other gc_mode */
@@ -246,6 +250,18 @@ static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
return get_cb_cost(sbi, segno);
}
+static unsigned int count_bits(const unsigned long *addr,
+ unsigned int offset, unsigned int len)
+{
+ unsigned int end = offset + len, sum = 0;
+
+ while (offset < end) {
+ if (test_bit(offset++, addr))
+ ++sum;
+ }
+ return sum;
+}
+
/*
* This function is called from two paths.
* One is garbage collection and the other is SSR segment selection.
@@ -259,9 +275,9 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct victim_sel_policy p;
- unsigned int secno, max_cost;
+ unsigned int secno, last_victim;
unsigned int last_segment = MAIN_SEGS(sbi);
- int nsearched = 0;
+ unsigned int nsearched = 0;
mutex_lock(&dirty_i->seglist_lock);
@@ -269,11 +285,12 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
select_policy(sbi, gc_type, type, &p);
p.min_segno = NULL_SEGNO;
- p.min_cost = max_cost = get_max_cost(sbi, &p);
+ p.min_cost = get_max_cost(sbi, &p);
if (p.max_search == 0)
goto out;
+ last_victim = sbi->last_victim[p.gc_mode];
if (p.alloc_mode == LFS && gc_type == FG_GC) {
p.min_segno = check_bg_victims(sbi);
if (p.min_segno != NULL_SEGNO)
@@ -296,27 +313,35 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
}
p.offset = segno + p.ofs_unit;
- if (p.ofs_unit > 1)
+ if (p.ofs_unit > 1) {
p.offset -= segno % p.ofs_unit;
+ nsearched += count_bits(p.dirty_segmap,
+ p.offset - p.ofs_unit,
+ p.ofs_unit);
+ } else {
+ nsearched++;
+ }
+
secno = GET_SECNO(sbi, segno);
if (sec_usage_check(sbi, secno))
- continue;
+ goto next;
if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
- continue;
+ goto next;
cost = get_gc_cost(sbi, segno, &p);
if (p.min_cost > cost) {
p.min_segno = segno;
p.min_cost = cost;
- } else if (unlikely(cost == max_cost)) {
- continue;
}
-
- if (nsearched++ >= p.max_search) {
- sbi->last_victim[p.gc_mode] = segno;
+next:
+ if (nsearched >= p.max_search) {
+ if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
+ sbi->last_victim[p.gc_mode] = last_victim + 1;
+ else
+ sbi->last_victim[p.gc_mode] = segno + 1;
break;
}
}
@@ -400,13 +425,13 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
* On validity, copy that node with cold status, otherwise (invalid node)
* ignore that.
*/
-static int gc_node_segment(struct f2fs_sb_info *sbi,
+static void gc_node_segment(struct f2fs_sb_info *sbi,
struct f2fs_summary *sum, unsigned int segno, int gc_type)
{
- bool initial = true;
struct f2fs_summary *entry;
block_t start_addr;
int off;
+ int phase = 0;
start_addr = START_BLOCK(sbi, segno);
@@ -419,16 +444,24 @@ next_step:
struct node_info ni;
/* stop BG_GC if there is not enough free sections. */
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
- return 0;
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
+ return;
if (check_valid_map(sbi, segno, off) == 0)
continue;
- if (initial) {
+ if (phase == 0) {
+ ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
+ META_NAT, true);
+ continue;
+ }
+
+ if (phase == 1) {
ra_node_page(sbi, nid);
continue;
}
+
+ /* phase == 2 */
node_page = get_node_page(sbi, nid);
if (IS_ERR(node_page))
continue;
@@ -445,36 +478,12 @@ next_step:
continue;
}
- /* set page dirty and write it */
- if (gc_type == FG_GC) {
- f2fs_wait_on_page_writeback(node_page, NODE);
- set_page_dirty(node_page);
- } else {
- if (!PageWriteback(node_page))
- set_page_dirty(node_page);
- }
- f2fs_put_page(node_page, 1);
+ move_node_page(node_page, gc_type);
stat_inc_node_blk_count(sbi, 1, gc_type);
}
- if (initial) {
- initial = false;
+ if (++phase < 3)
goto next_step;
- }
-
- if (gc_type == FG_GC) {
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = LONG_MAX,
- .for_reclaim = 0,
- };
- sync_node_pages(sbi, 0, &wbc);
-
- /* return 1 only if FG_GC succefully reclaimed one */
- if (get_valid_blocks(sbi, segno, 1) == 0)
- return 1;
- }
- return 0;
}
/*
@@ -484,7 +493,7 @@ next_step:
* as indirect or double indirect node blocks, are given, it must be a caller's
* bug.
*/
-block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
+block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
{
unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
unsigned int bidx;
@@ -501,7 +510,7 @@ block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
bidx = node_ofs - 5 - dec;
}
- return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
+ return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
}
static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
@@ -535,7 +544,8 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
return true;
}
-static void move_encrypted_block(struct inode *inode, block_t bidx)
+static void move_encrypted_block(struct inode *inode, block_t bidx,
+ unsigned int segno, int off)
{
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
@@ -547,6 +557,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
struct f2fs_summary sum;
struct node_info ni;
struct page *page;
+ block_t newaddr;
int err;
/* do not read out */
@@ -554,6 +565,9 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
if (!page)
return;
+ if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+ goto out;
+
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
if (err)
@@ -568,21 +582,24 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
* don't cache encrypted data into meta inode until previous dirty
* data were writebacked to avoid racing between GC and flush.
*/
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page, DATA, true);
get_node_info(fio.sbi, dn.nid, &ni);
set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
/* read page */
fio.page = page;
- fio.blk_addr = dn.data_blkaddr;
+ fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
- fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi),
- fio.blk_addr,
- FGP_LOCK|FGP_CREAT,
- GFP_NOFS);
- if (!fio.encrypted_page)
- goto put_out;
+ allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
+ &sum, CURSEG_COLD_DATA);
+
+ fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
+ FGP_LOCK | FGP_CREAT, GFP_NOFS);
+ if (!fio.encrypted_page) {
+ err = -ENOMEM;
+ goto recover_block;
+ }
err = f2fs_submit_page_bio(&fio);
if (err)
@@ -591,40 +608,47 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
/* write page */
lock_page(fio.encrypted_page);
- if (unlikely(!PageUptodate(fio.encrypted_page)))
+ if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
+ err = -EIO;
goto put_page_out;
- if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
+ }
+ if (unlikely(!PageUptodate(fio.encrypted_page))) {
+ err = -EIO;
goto put_page_out;
+ }
set_page_dirty(fio.encrypted_page);
- f2fs_wait_on_page_writeback(fio.encrypted_page, DATA);
+ f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
if (clear_page_dirty_for_io(fio.encrypted_page))
dec_page_count(fio.sbi, F2FS_DIRTY_META);
set_page_writeback(fio.encrypted_page);
/* allocate block address */
- f2fs_wait_on_page_writeback(dn.node_page, NODE);
- allocate_data_block(fio.sbi, NULL, fio.blk_addr,
- &fio.blk_addr, &sum, CURSEG_COLD_DATA);
+ f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
+
fio.rw = WRITE_SYNC;
+ fio.new_blkaddr = newaddr;
f2fs_submit_page_mbio(&fio);
- dn.data_blkaddr = fio.blk_addr;
- set_data_blkaddr(&dn);
- f2fs_update_extent_cache(&dn);
- set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
+ f2fs_update_data_blkaddr(&dn, newaddr);
+ set_inode_flag(inode, FI_APPEND_WRITE);
if (page->index == 0)
- set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
put_page_out:
f2fs_put_page(fio.encrypted_page, 1);
+recover_block:
+ if (err)
+ __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
+ true, true);
put_out:
f2fs_put_dnode(&dn);
out:
f2fs_put_page(page, 1);
}
-static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
+static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
+ unsigned int segno, int off)
{
struct page *page;
@@ -632,6 +656,9 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
if (IS_ERR(page))
return;
+ if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+ goto out;
+
if (gc_type == BG_GC) {
if (PageWriteback(page))
goto out;
@@ -645,13 +672,24 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
.page = page,
.encrypted_page = NULL,
};
+ bool is_dirty = PageDirty(page);
+ int err;
+
+retry:
set_page_dirty(page);
- f2fs_wait_on_page_writeback(page, DATA);
- if (clear_page_dirty_for_io(page))
+ f2fs_wait_on_page_writeback(page, DATA, true);
+ if (clear_page_dirty_for_io(page)) {
inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
+ }
+
set_cold_data(page);
- do_write_data_page(&fio);
- clear_cold_data(page);
+
+ err = do_write_data_page(&fio);
+ if (err == -ENOMEM && is_dirty) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
}
out:
f2fs_put_page(page, 1);
@@ -664,7 +702,7 @@ out:
* If the parent node is not valid or the data block address is different,
* the victim data block is ignored.
*/
-static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
{
struct super_block *sb = sbi->sb;
@@ -684,16 +722,23 @@ next_step:
struct node_info dni; /* dnode info for the data */
unsigned int ofs_in_node, nofs;
block_t start_bidx;
+ nid_t nid = le32_to_cpu(entry->nid);
/* stop BG_GC if there is not enough free sections. */
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
- return 0;
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
+ return;
if (check_valid_map(sbi, segno, off) == 0)
continue;
if (phase == 0) {
- ra_node_page(sbi, le32_to_cpu(entry->nid));
+ ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
+ META_NAT, true);
+ continue;
+ }
+
+ if (phase == 1) {
+ ra_node_page(sbi, nid);
continue;
}
@@ -701,14 +746,14 @@ next_step:
if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
continue;
- if (phase == 1) {
+ if (phase == 2) {
ra_node_page(sbi, dni.ino);
continue;
}
ofs_in_node = le16_to_cpu(entry->ofs_in_node);
- if (phase == 2) {
+ if (phase == 3) {
inode = f2fs_iget(sb, dni.ino);
if (IS_ERR(inode) || is_bad_inode(inode))
continue;
@@ -720,7 +765,7 @@ next_step:
continue;
}
- start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
+ start_bidx = start_bidx_of_node(nofs, inode);
data_page = get_read_data_page(inode,
start_bidx + ofs_in_node, READA, true);
if (IS_ERR(data_page)) {
@@ -733,30 +778,41 @@ next_step:
continue;
}
- /* phase 3 */
+ /* phase 4 */
inode = find_gc_inode(gc_list, dni.ino);
if (inode) {
- start_bidx = start_bidx_of_node(nofs, F2FS_I(inode))
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ bool locked = false;
+
+ if (S_ISREG(inode->i_mode)) {
+ if (!down_write_trylock(&fi->dio_rwsem[READ]))
+ continue;
+ if (!down_write_trylock(
+ &fi->dio_rwsem[WRITE])) {
+ up_write(&fi->dio_rwsem[READ]);
+ continue;
+ }
+ locked = true;
+ }
+
+ start_bidx = start_bidx_of_node(nofs, inode)
+ ofs_in_node;
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- move_encrypted_block(inode, start_bidx);
+ move_encrypted_block(inode, start_bidx, segno, off);
else
- move_data_page(inode, start_bidx, gc_type);
+ move_data_page(inode, start_bidx, gc_type, segno, off);
+
+ if (locked) {
+ up_write(&fi->dio_rwsem[WRITE]);
+ up_write(&fi->dio_rwsem[READ]);
+ }
+
stat_inc_data_blk_count(sbi, 1, gc_type);
}
}
- if (++phase < 4)
+ if (++phase < 5)
goto next_step;
-
- if (gc_type == FG_GC) {
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
-
- /* return 1 only if FG_GC succefully reclaimed one */
- if (get_valid_blocks(sbi, segno, 1) == 0)
- return 1;
- }
- return 0;
}
static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
@@ -772,51 +828,84 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
return ret;
}
-static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
+static int do_garbage_collect(struct f2fs_sb_info *sbi,
+ unsigned int start_segno,
struct gc_inode_list *gc_list, int gc_type)
{
struct page *sum_page;
struct f2fs_summary_block *sum;
struct blk_plug plug;
- int nfree = 0;
+ unsigned int segno = start_segno;
+ unsigned int end_segno = start_segno + sbi->segs_per_sec;
+ int sec_freed = 0;
+ unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
+ SUM_TYPE_DATA : SUM_TYPE_NODE;
+
+ /* readahead multi ssa blocks those have contiguous address */
+ if (sbi->segs_per_sec > 1)
+ ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
+ sbi->segs_per_sec, META_SSA, true);
- /* read segment summary of victim */
- sum_page = get_sum_page(sbi, segno);
+ /* reference all summary page */
+ while (segno < end_segno) {
+ sum_page = get_sum_page(sbi, segno++);
+ unlock_page(sum_page);
+ }
blk_start_plug(&plug);
- sum = page_address(sum_page);
+ for (segno = start_segno; segno < end_segno; segno++) {
- /*
- * this is to avoid deadlock:
- * - lock_page(sum_page) - f2fs_replace_block
- * - check_valid_map() - mutex_lock(sentry_lock)
- * - mutex_lock(sentry_lock) - change_curseg()
- * - lock_page(sum_page)
- */
- unlock_page(sum_page);
-
- switch (GET_SUM_TYPE((&sum->footer))) {
- case SUM_TYPE_NODE:
- nfree = gc_node_segment(sbi, sum->entries, segno, gc_type);
- break;
- case SUM_TYPE_DATA:
- nfree = gc_data_segment(sbi, sum->entries, gc_list,
- segno, gc_type);
- break;
+ /* find segment summary of victim */
+ sum_page = find_get_page(META_MAPPING(sbi),
+ GET_SUM_BLOCK(sbi, segno));
+ f2fs_put_page(sum_page, 0);
+
+ if (get_valid_blocks(sbi, segno, 1) == 0 ||
+ !PageUptodate(sum_page) ||
+ unlikely(f2fs_cp_error(sbi)))
+ goto next;
+
+ sum = page_address(sum_page);
+ f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
+
+ /*
+ * this is to avoid deadlock:
+ * - lock_page(sum_page) - f2fs_replace_block
+ * - check_valid_map() - mutex_lock(sentry_lock)
+ * - mutex_lock(sentry_lock) - change_curseg()
+ * - lock_page(sum_page)
+ */
+
+ if (type == SUM_TYPE_NODE)
+ gc_node_segment(sbi, sum->entries, segno, gc_type);
+ else
+ gc_data_segment(sbi, sum->entries, gc_list, segno,
+ gc_type);
+
+ stat_inc_seg_count(sbi, type, gc_type);
+next:
+ f2fs_put_page(sum_page, 0);
}
+
+ if (gc_type == FG_GC)
+ f2fs_submit_merged_bio(sbi,
+ (type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
+
blk_finish_plug(&plug);
- stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type);
+ if (gc_type == FG_GC &&
+ get_valid_blocks(sbi, start_segno, sbi->segs_per_sec) == 0)
+ sec_freed = 1;
+
stat_inc_call_count(sbi->stat_info);
- f2fs_put_page(sum_page, 0);
- return nfree;
+ return sec_freed;
}
-int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
+int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background)
{
- unsigned int segno, i;
+ unsigned int segno;
int gc_type = sync ? FG_GC : BG_GC;
int sec_freed = 0;
int ret = -EINVAL;
@@ -832,46 +921,51 @@ gc_more:
if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
goto stop;
- if (unlikely(f2fs_cp_error(sbi)))
+ if (unlikely(f2fs_cp_error(sbi))) {
+ ret = -EIO;
goto stop;
+ }
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed, 0)) {
gc_type = FG_GC;
- if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
- write_checkpoint(sbi, &cpc);
+ /*
+ * If there is no victim and no prefree segment but still not
+ * enough free sections, we should flush dent/node blocks and do
+ * garbage collections.
+ */
+ if (__get_victim(sbi, &segno, gc_type) ||
+ prefree_segments(sbi)) {
+ ret = write_checkpoint(sbi, &cpc);
+ if (ret)
+ goto stop;
+ segno = NULL_SEGNO;
+ } else if (has_not_enough_free_secs(sbi, 0, 0)) {
+ ret = write_checkpoint(sbi, &cpc);
+ if (ret)
+ goto stop;
+ }
+ } else if (gc_type == BG_GC && !background) {
+ /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
+ goto stop;
}
if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
goto stop;
ret = 0;
- /* readahead multi ssa blocks those have contiguous address */
- if (sbi->segs_per_sec > 1)
- ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
- META_SSA, true);
-
- for (i = 0; i < sbi->segs_per_sec; i++) {
- /*
- * for FG_GC case, halt gcing left segments once failed one
- * of segments in selected section to avoid long latency.
- */
- if (!do_garbage_collect(sbi, segno + i, &gc_list, gc_type) &&
- gc_type == FG_GC)
- break;
- }
-
- if (i == sbi->segs_per_sec && gc_type == FG_GC)
+ if (do_garbage_collect(sbi, segno, &gc_list, gc_type) &&
+ gc_type == FG_GC)
sec_freed++;
if (gc_type == FG_GC)
sbi->cur_victim_sec = NULL_SEGNO;
if (!sync) {
- if (has_not_enough_free_secs(sbi, sec_freed))
+ if (has_not_enough_free_secs(sbi, sec_freed, 0))
goto gc_more;
if (gc_type == FG_GC)
- write_checkpoint(sbi, &cpc);
+ ret = write_checkpoint(sbi, &cpc);
}
stop:
mutex_unlock(&sbi->gc_mutex);
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index b4a65be9f7d3..a993967dcdb9 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -100,11 +100,3 @@ static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
return true;
return false;
}
-
-static inline int is_idle(struct f2fs_sb_info *sbi)
-{
- struct block_device *bdev = sbi->sb->s_bdev;
- struct request_queue *q = bdev_get_queue(bdev);
- struct request_list *rl = &q->root_rl;
- return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
-}
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index b238d2fec3e5..71b7206c431e 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -70,8 +70,7 @@ static void str2hashbuf(const unsigned char *msg, size_t len,
*buf++ = pad;
}
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
- struct f2fs_filename *fname)
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info)
{
__u32 hash;
f2fs_hash_t f2fs_hash;
@@ -80,10 +79,6 @@ f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
const unsigned char *name = name_info->name;
size_t len = name_info->len;
- /* encrypted bigname case */
- if (fname && !fname->disk_name.name)
- return cpu_to_le32(fname->hash);
-
if (is_dot_dotdot(name_info))
return 0;
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index f35f3eb3541f..b85987703d1e 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -17,9 +17,6 @@
bool f2fs_may_inline_data(struct inode *inode)
{
- if (!test_opt(F2FS_I_SB(inode), INLINE_DATA))
- return false;
-
if (f2fs_is_atomic_file(inode))
return false;
@@ -55,7 +52,7 @@ void read_inline_data(struct page *page, struct page *ipage)
f2fs_bug_on(F2FS_P_SB(page), page->index);
- zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+ zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
/* Copy the whole inline data block */
src_addr = inline_data_addr(ipage);
@@ -63,7 +60,8 @@ void read_inline_data(struct page *page, struct page *ipage)
memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
flush_dcache_page(page);
kunmap_atomic(dst_addr);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
}
bool truncate_inline_inode(struct page *ipage, u64 from)
@@ -75,9 +73,9 @@ bool truncate_inline_inode(struct page *ipage, u64 from)
addr = inline_data_addr(ipage);
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
memset(addr + from, 0, MAX_INLINE_DATA - from);
-
+ set_page_dirty(ipage);
return true;
}
@@ -112,11 +110,12 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
}
if (page->index)
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
else
read_inline_data(page, ipage);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
f2fs_put_page(ipage, 1);
trace_android_fs_dataread_end(inode, page_offset(page),
PAGE_SIZE);
@@ -126,7 +125,6 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
- void *src_addr, *dst_addr;
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(dn->inode),
.type = DATA,
@@ -136,8 +134,6 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
};
int dirty, err;
- f2fs_bug_on(F2FS_I_SB(dn->inode), page->index);
-
if (!f2fs_exist_data(dn->inode))
goto clear_out;
@@ -145,21 +141,9 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
if (err)
return err;
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
- if (PageUptodate(page))
- goto no_update;
-
- zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
-
- /* Copy the whole inline data block */
- src_addr = inline_data_addr(dn->inode_page);
- dst_addr = kmap_atomic(page);
- memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
- flush_dcache_page(page);
- kunmap_atomic(dst_addr);
- SetPageUptodate(page);
-no_update:
+ read_inline_data(page, dn->inode_page);
set_page_dirty(page);
/* clear dirty state */
@@ -167,23 +151,23 @@ no_update:
/* write data page to try to make data consistent */
set_page_writeback(page);
- fio.blk_addr = dn->data_blkaddr;
+ fio.old_blkaddr = dn->data_blkaddr;
write_data_page(dn, &fio);
- set_data_blkaddr(dn);
- f2fs_update_extent_cache(dn);
- f2fs_wait_on_page_writeback(page, DATA);
- if (dirty)
+ f2fs_wait_on_page_writeback(page, DATA, true);
+ if (dirty) {
inode_dec_dirty_pages(dn->inode);
+ remove_dirty_inode(dn->inode);
+ }
/* this converted inline_data should be recovered. */
- set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);
+ set_inode_flag(dn->inode, FI_APPEND_WRITE);
/* clear inline data and flag after data writeback */
truncate_inline_inode(dn->inode_page, 0);
+ clear_inline_node(dn->inode_page);
clear_out:
stat_dec_inline_inode(dn->inode);
f2fs_clear_inline_inode(dn->inode);
- sync_inode_page(dn);
f2fs_put_dnode(dn);
return 0;
}
@@ -195,7 +179,10 @@ int f2fs_convert_inline_inode(struct inode *inode)
struct page *ipage, *page;
int err = 0;
- page = grab_cache_page(inode->i_mapping, 0);
+ if (!f2fs_has_inline_data(inode))
+ return 0;
+
+ page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
if (!page)
return -ENOMEM;
@@ -217,6 +204,9 @@ out:
f2fs_unlock_op(sbi);
f2fs_put_page(page, 1);
+
+ f2fs_balance_fs(sbi, dn.node_changed);
+
return err;
}
@@ -238,16 +228,17 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
f2fs_bug_on(F2FS_I_SB(inode), page->index);
- f2fs_wait_on_page_writeback(dn.inode_page, NODE);
+ f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
src_addr = kmap_atomic(page);
dst_addr = inline_data_addr(dn.inode_page);
memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
kunmap_atomic(src_addr);
+ set_page_dirty(dn.inode_page);
- set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
+ set_inode_flag(inode, FI_APPEND_WRITE);
+ set_inode_flag(inode, FI_DATA_EXIST);
- sync_inode_page(&dn);
+ clear_inline_node(dn.inode_page);
f2fs_put_dnode(&dn);
return 0;
}
@@ -276,16 +267,16 @@ process_inline:
ipage = get_node_page(sbi, inode->i_ino);
f2fs_bug_on(sbi, IS_ERR(ipage));
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
src_addr = inline_data_addr(npage);
dst_addr = inline_data_addr(ipage);
memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
- set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
+ set_inode_flag(inode, FI_INLINE_DATA);
+ set_inode_flag(inode, FI_DATA_EXIST);
- update_inode(inode, ipage);
+ set_page_dirty(ipage);
f2fs_put_page(ipage, 1);
return true;
}
@@ -296,7 +287,6 @@ process_inline:
if (!truncate_inline_inode(ipage, 0))
return false;
f2fs_clear_inline_inode(inode);
- update_inode(inode, ipage);
f2fs_put_page(ipage, 1);
} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
if (truncate_blocks(inode, 0, false))
@@ -307,7 +297,7 @@ process_inline:
}
struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
- struct f2fs_filename *fname, struct page **res_page)
+ struct fscrypt_name *fname, struct page **res_page)
{
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
struct f2fs_inline_dentry *inline_dentry;
@@ -318,10 +308,12 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
f2fs_hash_t namehash;
ipage = get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage))
+ if (IS_ERR(ipage)) {
+ *res_page = ipage;
return NULL;
+ }
- namehash = f2fs_dentry_hash(&name, fname);
+ namehash = f2fs_dentry_hash(&name);
inline_dentry = inline_data_addr(ipage);
@@ -333,30 +325,6 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
else
f2fs_put_page(ipage, 0);
- /*
- * For the most part, it should be a bug when name_len is zero.
- * We stop here for figuring out where the bugs has occurred.
- */
- f2fs_bug_on(sbi, d.max < 0);
- return de;
-}
-
-struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *dir,
- struct page **p)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- struct page *ipage;
- struct f2fs_dir_entry *de;
- struct f2fs_inline_dentry *dentry_blk;
-
- ipage = get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage))
- return NULL;
-
- dentry_blk = inline_data_addr(ipage);
- de = &dentry_blk->dentry[1];
- *p = ipage;
- unlock_page(ipage);
return de;
}
@@ -374,10 +342,8 @@ int make_empty_inline_dir(struct inode *inode, struct inode *parent,
set_page_dirty(ipage);
/* update i_size to MAX_INLINE_DATA */
- if (i_size_read(inode) < MAX_INLINE_DATA) {
- i_size_write(inode, MAX_INLINE_DATA);
- set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
- }
+ if (i_size_read(inode) < MAX_INLINE_DATA)
+ f2fs_i_size_write(inode, MAX_INLINE_DATA);
return 0;
}
@@ -385,7 +351,7 @@ int make_empty_inline_dir(struct inode *inode, struct inode *parent,
* NOTE: ipage is grabbed by caller, but if any error occurs, we should
* release ipage in this function.
*/
-static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
+static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
struct f2fs_inline_dentry *inline_dentry)
{
struct page *page;
@@ -393,7 +359,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
struct f2fs_dentry_block *dentry_blk;
int err;
- page = grab_cache_page(dir->i_mapping, 0);
+ page = f2fs_grab_cache_page(dir->i_mapping, 0, false);
if (!page) {
f2fs_put_page(ipage, 1);
return -ENOMEM;
@@ -404,8 +370,8 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
if (err)
goto out;
- f2fs_wait_on_page_writeback(page, DATA);
- zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+ f2fs_wait_on_page_writeback(page, DATA, true);
+ zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
dentry_blk = kmap_atomic(page);
@@ -426,37 +392,132 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
NR_INLINE_DENTRY * F2FS_SLOT_LEN);
kunmap_atomic(dentry_blk);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
set_page_dirty(page);
/* clear inline dir and flag after data writeback */
truncate_inline_inode(ipage, 0);
stat_dec_inline_dir(dir);
- clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
+ clear_inode_flag(dir, FI_INLINE_DENTRY);
- if (i_size_read(dir) < PAGE_CACHE_SIZE) {
- i_size_write(dir, PAGE_CACHE_SIZE);
- set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
-
- sync_inode_page(&dn);
+ f2fs_i_depth_write(dir, 1);
+ if (i_size_read(dir) < PAGE_SIZE)
+ f2fs_i_size_write(dir, PAGE_SIZE);
out:
f2fs_put_page(page, 1);
return err;
}
-int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
- struct inode *inode, nid_t ino, umode_t mode)
+static int f2fs_add_inline_entries(struct inode *dir,
+ struct f2fs_inline_dentry *inline_dentry)
+{
+ struct f2fs_dentry_ptr d;
+ unsigned long bit_pos = 0;
+ int err = 0;
+
+ make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
+
+ while (bit_pos < d.max) {
+ struct f2fs_dir_entry *de;
+ struct qstr new_name;
+ nid_t ino;
+ umode_t fake_mode;
+
+ if (!test_bit_le(bit_pos, d.bitmap)) {
+ bit_pos++;
+ continue;
+ }
+
+ de = &d.dentry[bit_pos];
+
+ if (unlikely(!de->name_len)) {
+ bit_pos++;
+ continue;
+ }
+
+ new_name.name = d.filename[bit_pos];
+ new_name.len = le16_to_cpu(de->name_len);
+
+ ino = le32_to_cpu(de->ino);
+ fake_mode = get_de_type(de) << S_SHIFT;
+
+ err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL,
+ ino, fake_mode);
+ if (err)
+ goto punch_dentry_pages;
+
+ bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
+ }
+ return 0;
+punch_dentry_pages:
+ truncate_inode_pages(&dir->i_data, 0);
+ truncate_blocks(dir, 0, false);
+ remove_dirty_inode(dir);
+ return err;
+}
+
+static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
+ struct f2fs_inline_dentry *inline_dentry)
+{
+ struct f2fs_inline_dentry *backup_dentry;
+ int err;
+
+ backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir),
+ sizeof(struct f2fs_inline_dentry), GFP_F2FS_ZERO);
+ if (!backup_dentry) {
+ f2fs_put_page(ipage, 1);
+ return -ENOMEM;
+ }
+
+ memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA);
+ truncate_inline_inode(ipage, 0);
+
+ unlock_page(ipage);
+
+ err = f2fs_add_inline_entries(dir, backup_dentry);
+ if (err)
+ goto recover;
+
+ lock_page(ipage);
+
+ stat_dec_inline_dir(dir);
+ clear_inode_flag(dir, FI_INLINE_DENTRY);
+ kfree(backup_dentry);
+ return 0;
+recover:
+ lock_page(ipage);
+ memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA);
+ f2fs_i_depth_write(dir, 0);
+ f2fs_i_size_write(dir, MAX_INLINE_DATA);
+ set_page_dirty(ipage);
+ f2fs_put_page(ipage, 1);
+
+ kfree(backup_dentry);
+ return err;
+}
+
+static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
+ struct f2fs_inline_dentry *inline_dentry)
+{
+ if (!F2FS_I(dir)->i_dir_level)
+ return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
+ else
+ return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
+}
+
+int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
+ const struct qstr *orig_name,
+ struct inode *inode, nid_t ino, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct page *ipage;
unsigned int bit_pos;
f2fs_hash_t name_hash;
- size_t namelen = name->len;
struct f2fs_inline_dentry *dentry_blk = NULL;
struct f2fs_dentry_ptr d;
- int slots = GET_DENTRY_SLOTS(namelen);
+ int slots = GET_DENTRY_SLOTS(new_name->len);
struct page *page = NULL;
int err = 0;
@@ -477,25 +538,27 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
- page = init_inode_metadata(inode, dir, name, ipage);
+ page = init_inode_metadata(inode, dir, new_name,
+ orig_name, ipage);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
}
+ if (f2fs_encrypted_inode(dir))
+ file_set_enc_name(inode);
}
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
- name_hash = f2fs_dentry_hash(name, NULL);
+ name_hash = f2fs_dentry_hash(new_name);
make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
- f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos);
+ f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
set_page_dirty(ipage);
/* we don't need to mark_inode_dirty now */
if (inode) {
- F2FS_I(inode)->i_pino = dir->i_ino;
- update_inode(inode, page);
+ f2fs_i_pino_write(inode, dir->i_ino);
f2fs_put_page(page, 1);
}
@@ -503,11 +566,6 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
fail:
if (inode)
up_write(&F2FS_I(inode)->i_sem);
-
- if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
- update_inode(dir, ipage);
- clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
out:
f2fs_put_page(ipage, 1);
return err;
@@ -522,22 +580,22 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
int i;
lock_page(page);
- f2fs_wait_on_page_writeback(page, NODE);
+ f2fs_wait_on_page_writeback(page, NODE, true);
inline_dentry = inline_data_addr(page);
bit_pos = dentry - inline_dentry->dentry;
for (i = 0; i < slots; i++)
- test_and_clear_bit_le(bit_pos + i,
+ __clear_bit_le(bit_pos + i,
&inline_dentry->dentry_bitmap);
set_page_dirty(page);
+ f2fs_put_page(page, 1);
- dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+ dir->i_ctime = dir->i_mtime = current_time(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
- f2fs_drop_nlink(dir, inode, page);
-
- f2fs_put_page(page, 1);
+ f2fs_drop_nlink(dir, inode);
}
bool f2fs_empty_inline_dir(struct inode *dir)
@@ -565,12 +623,13 @@ bool f2fs_empty_inline_dir(struct inode *dir)
}
int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
- struct f2fs_str *fstr)
+ struct fscrypt_str *fstr)
{
struct inode *inode = file_inode(file);
struct f2fs_inline_dentry *inline_dentry = NULL;
struct page *ipage = NULL;
struct f2fs_dentry_ptr d;
+ int err;
if (ctx->pos == NR_INLINE_DENTRY)
return 0;
@@ -583,11 +642,12 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
make_dentry_ptr(inode, &d, (void *)inline_dentry, 2);
- if (!f2fs_fill_dentries(ctx, &d, 0, fstr))
+ err = f2fs_fill_dentries(ctx, &d, 0, fstr);
+ if (!err)
ctx->pos = NR_INLINE_DENTRY;
f2fs_put_page(ipage, 1);
- return 0;
+ return err < 0 ? err : 0;
}
int f2fs_inline_data_fiemap(struct inode *inode,
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 97e20decacb4..af06bda51a54 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
+#include <linux/backing-dev.h>
#include <linux/writeback.h>
#include "f2fs.h"
@@ -18,6 +19,14 @@
#include <trace/events/f2fs.h>
+void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
+{
+ if (f2fs_inode_dirtied(inode, sync))
+ return;
+
+ mark_inode_dirty_sync(inode);
+}
+
void f2fs_set_inode_flags(struct inode *inode)
{
unsigned int flags = F2FS_I(inode)->i_flags;
@@ -35,6 +44,7 @@ void f2fs_set_inode_flags(struct inode *inode)
new_fl |= S_DIRSYNC;
inode_set_flags(inode, new_fl,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+ f2fs_mark_inode_dirty_sync(inode, false);
}
static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
@@ -83,10 +93,10 @@ static void __recover_inline_status(struct inode *inode, struct page *ipage)
while (start < end) {
if (*start++) {
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
- set_raw_inline(F2FS_I(inode), F2FS_INODE(ipage));
+ set_inode_flag(inode, FI_DATA_EXIST);
+ set_raw_inline(inode, F2FS_INODE(ipage));
set_page_dirty(ipage);
return;
}
@@ -138,9 +148,10 @@ static int do_read_inode(struct inode *inode)
fi->i_pino = le32_to_cpu(ri->i_pino);
fi->i_dir_level = ri->i_dir_level;
- f2fs_init_extent_tree(inode, &ri->i_ext);
+ if (f2fs_init_extent_tree(inode, &ri->i_ext))
+ set_page_dirty(node_page);
- get_inline_info(fi, ri);
+ get_inline_info(inode, ri);
/* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
@@ -150,7 +161,10 @@ static int do_read_inode(struct inode *inode)
__get_inode_rdev(inode, ri);
if (__written_first_block(ri))
- set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+
+ if (!need_inode_block_update(sbi, inode->i_ino))
+ fi->last_disk_size = inode->i_size;
f2fs_put_page(node_page, 1);
@@ -202,6 +216,7 @@ make_now:
inode->i_op = &f2fs_encrypted_symlink_inode_operations;
else
inode->i_op = &f2fs_symlink_inode_operations;
+ inode_nohighmem(inode);
inode->i_mapping->a_ops = &f2fs_dblock_aops;
} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
@@ -221,11 +236,28 @@ bad_inode:
return ERR_PTR(ret);
}
-void update_inode(struct inode *inode, struct page *node_page)
+struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
+{
+ struct inode *inode;
+retry:
+ inode = f2fs_iget(sb, ino);
+ if (IS_ERR(inode)) {
+ if (PTR_ERR(inode) == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
+ }
+ return inode;
+}
+
+int update_inode(struct inode *inode, struct page *node_page)
{
struct f2fs_inode *ri;
+ struct extent_tree *et = F2FS_I(inode)->extent_tree;
+
+ f2fs_inode_synced(inode);
- f2fs_wait_on_page_writeback(node_page, NODE);
+ f2fs_wait_on_page_writeback(node_page, NODE, true);
ri = F2FS_INODE(node_page);
@@ -237,12 +269,14 @@ void update_inode(struct inode *inode, struct page *node_page)
ri->i_size = cpu_to_le64(i_size_read(inode));
ri->i_blocks = cpu_to_le64(inode->i_blocks);
- if (F2FS_I(inode)->extent_tree)
- set_raw_extent(&F2FS_I(inode)->extent_tree->largest,
- &ri->i_ext);
- else
+ if (et) {
+ read_lock(&et->lock);
+ set_raw_extent(&et->largest, &ri->i_ext);
+ read_unlock(&et->lock);
+ } else {
memset(&ri->i_ext, 0, sizeof(ri->i_ext));
- set_raw_inline(F2FS_I(inode), ri);
+ }
+ set_raw_inline(inode, ri);
ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
@@ -259,15 +293,19 @@ void update_inode(struct inode *inode, struct page *node_page)
__set_inode_rdev(inode, ri);
set_cold_node(inode, node_page);
- set_page_dirty(node_page);
- clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
+ /* deleted inode */
+ if (inode->i_nlink == 0)
+ clear_inline_node(node_page);
+
+ return set_page_dirty(node_page);
}
-void update_inode_page(struct inode *inode)
+int update_inode_page(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *node_page;
+ int ret = 0;
retry:
node_page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(node_page)) {
@@ -276,12 +314,14 @@ retry:
cond_resched();
goto retry;
} else if (err != -ENOENT) {
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
}
- return;
+ f2fs_inode_synced(inode);
+ return 0;
}
- update_inode(inode, node_page);
+ ret = update_inode(inode, node_page);
f2fs_put_page(node_page, 1);
+ return ret;
}
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
@@ -292,16 +332,15 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
inode->i_ino == F2FS_META_INO(sbi))
return 0;
- if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE))
+ if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
return 0;
/*
* We need to balance fs here to prevent from producing dirty node pages
* during the urgent cleaning time when runing out of free sections.
*/
- update_inode_page(inode);
-
- f2fs_balance_fs(sbi);
+ if (update_inode_page(inode) && wbc && wbc->nr_to_write)
+ f2fs_balance_fs(sbi, true);
return 0;
}
@@ -311,13 +350,12 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
void f2fs_evict_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_inode_info *fi = F2FS_I(inode);
- nid_t xnid = fi->i_xattr_nid;
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
int err = 0;
/* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode))
- commit_inmem_pages(inode, true);
+ drop_inmem_pages(inode);
trace_f2fs_evict_inode(inode);
truncate_inode_pages_final(&inode->i_data);
@@ -327,26 +365,44 @@ void f2fs_evict_inode(struct inode *inode)
goto out_clear;
f2fs_bug_on(sbi, get_dirty_pages(inode));
- remove_dirty_dir_inode(inode);
+ remove_dirty_inode(inode);
f2fs_destroy_extent_tree(inode);
if (inode->i_nlink || is_bad_inode(inode))
goto no_delete;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_EVICT_INODE))
+ goto no_delete;
+#endif
+
+ remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
+ remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
+
sb_start_intwrite(inode->i_sb);
- set_inode_flag(fi, FI_NO_ALLOC);
+ set_inode_flag(inode, FI_NO_ALLOC);
i_size_write(inode, 0);
-
+retry:
if (F2FS_HAS_BLOCKS(inode))
- err = f2fs_truncate(inode, true);
+ err = f2fs_truncate(inode);
if (!err) {
f2fs_lock_op(sbi);
err = remove_inode_page(inode);
f2fs_unlock_op(sbi);
+ if (err == -ENOENT)
+ err = 0;
}
+ /* give more chances, if ENOMEM case */
+ if (err == -ENOMEM) {
+ err = 0;
+ goto retry;
+ }
+
+ if (err)
+ update_inode_page(inode);
sb_end_intwrite(inode->i_sb);
no_delete:
stat_dec_inline_xattr(inode);
@@ -356,36 +412,20 @@ no_delete:
invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
if (xnid)
invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
- if (is_inode_flag_set(fi, FI_APPEND_WRITE))
- add_dirty_inode(sbi, inode->i_ino, APPEND_INO);
- if (is_inode_flag_set(fi, FI_UPDATE_WRITE))
- add_dirty_inode(sbi, inode->i_ino, UPDATE_INO);
- if (is_inode_flag_set(fi, FI_FREE_NID)) {
- if (err && err != -ENOENT)
- alloc_nid_done(sbi, inode->i_ino);
- else
- alloc_nid_failed(sbi, inode->i_ino);
- clear_inode_flag(fi, FI_FREE_NID);
+ if (inode->i_nlink) {
+ if (is_inode_flag_set(inode, FI_APPEND_WRITE))
+ add_ino_entry(sbi, inode->i_ino, APPEND_INO);
+ if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
+ add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
}
-
- if (err && err != -ENOENT) {
- if (!exist_written_data(sbi, inode->i_ino, ORPHAN_INO)) {
- /*
- * get here because we failed to release resource
- * of inode previously, reminder our user to run fsck
- * for fixing.
- */
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_msg(sbi->sb, KERN_WARNING,
- "inode (ino:%lu) resource leak, run fsck "
- "to fix this issue!", inode->i_ino);
- }
+ if (is_inode_flag_set(inode, FI_FREE_NID)) {
+ alloc_nid_failed(sbi, inode->i_ino);
+ clear_inode_flag(inode, FI_FREE_NID);
}
+ f2fs_bug_on(sbi, err &&
+ !exist_written_data(sbi, inode->i_ino, ORPHAN_INO));
out_clear:
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- if (fi->i_crypt_info)
- f2fs_free_encryption_info(inode, fi->i_crypt_info);
-#endif
+ fscrypt_put_encryption_info(inode, NULL);
clear_inode(inode);
}
@@ -393,37 +433,44 @@ out_clear:
void handle_failed_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- int err = 0;
+ struct node_info ni;
+ /*
+ * clear nlink of inode in order to release resource of inode
+ * immediately.
+ */
clear_nlink(inode);
- make_bad_inode(inode);
- unlock_new_inode(inode);
- i_size_write(inode, 0);
- if (F2FS_HAS_BLOCKS(inode))
- err = f2fs_truncate(inode, false);
+ /*
+ * we must call this to avoid inode being remained as dirty, resulting
+ * in a panic when flushing dirty inodes in gdirty_list.
+ */
+ update_inode_page(inode);
- if (!err)
- err = remove_inode_page(inode);
+ /* don't make bad inode, since it becomes a regular file. */
+ unlock_new_inode(inode);
/*
- * if we skip truncate_node in remove_inode_page bacause we failed
- * before, it's better to find another way to release resource of
- * this inode (e.g. valid block count, node block or nid). Here we
- * choose to add this inode to orphan list, so that we can call iput
- * for releasing in orphan recovery flow.
- *
* Note: we should add inode to orphan list before f2fs_unlock_op()
* so we can prevent losing this orphan when encoutering checkpoint
* and following suddenly power-off.
*/
- if (err && err != -ENOENT) {
- err = acquire_orphan_inode(sbi);
- if (!err)
- add_orphan_inode(sbi, inode->i_ino);
+ get_node_info(sbi, inode->i_ino, &ni);
+
+ if (ni.blk_addr != NULL_ADDR) {
+ int err = acquire_orphan_inode(sbi);
+ if (err) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "Too many orphan inodes, run fsck to fix.");
+ } else {
+ add_orphan_inode(inode);
+ }
+ alloc_nid_done(sbi, inode->i_ino);
+ } else {
+ set_inode_flag(inode, FI_FREE_NID);
}
- set_inode_flag(F2FS_I(inode), FI_FREE_NID);
f2fs_unlock_op(sbi);
/* iput will drop the inode object */
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 2c32110f9fc0..468b2dbe6d34 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -46,7 +46,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
inode->i_ino = ino;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
inode->i_generation = sbi->s_next_generation++;
err = insert_inode_locked(inode);
@@ -60,10 +60,14 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode))
f2fs_set_encrypted_inode(inode);
- if (f2fs_may_inline_data(inode))
- set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
+ set_inode_flag(inode, FI_NEW_INODE);
+
+ if (test_opt(sbi, INLINE_XATTR))
+ set_inode_flag(inode, FI_INLINE_XATTR);
+ if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
+ set_inode_flag(inode, FI_INLINE_DATA);
if (f2fs_may_inline_dentry(inode))
- set_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY);
+ set_inode_flag(inode, FI_INLINE_DENTRY);
f2fs_init_extent_tree(inode, NULL);
@@ -72,14 +76,13 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
stat_inc_inline_dir(inode);
trace_f2fs_new_inode(inode, 0);
- mark_inode_dirty(inode);
return inode;
fail:
trace_f2fs_new_inode(inode, err);
make_bad_inode(inode);
if (nid_free)
- set_inode_flag(F2FS_I(inode), FI_FREE_NID);
+ set_inode_flag(inode, FI_FREE_NID);
iput(inode);
return ERR_PTR(err);
}
@@ -88,18 +91,23 @@ static int is_multimedia_file(const unsigned char *s, const char *sub)
{
size_t slen = strlen(s);
size_t sublen = strlen(sub);
+ int i;
/*
* filename format of multimedia file should be defined as:
- * "filename + '.' + extension".
+ * "filename + '.' + extension + (optional: '.' + temp extension)".
*/
if (slen < sublen + 2)
return 0;
- if (s[slen - sublen - 1] != '.')
- return 0;
+ for (i = 1; i < slen - sublen; i++) {
+ if (s[i] != '.')
+ continue;
+ if (!strncasecmp(s + i + 1, sub, sublen))
+ return 1;
+ }
- return !strncasecmp(s + slen - sublen, sub, sublen);
+ return 0;
}
/*
@@ -128,8 +136,6 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
nid_t ino = 0;
int err;
- f2fs_balance_fs(sbi);
-
inode = f2fs_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -142,6 +148,8 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
inode->i_mapping->a_ops = &f2fs_dblock_aops;
ino = inode->i_ino;
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
@@ -169,15 +177,15 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
int err;
if (f2fs_encrypted_inode(dir) &&
- !f2fs_is_child_context_consistent_with_parent(dir, inode))
+ !fscrypt_has_permitted_context(dir, inode))
return -EPERM;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
- inode->i_ctime = CURRENT_TIME;
+ inode->i_ctime = current_time(inode);
ihold(inode);
- set_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ set_inode_flag(inode, FI_INC_LINK);
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
@@ -190,7 +198,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
f2fs_sync_fs(sbi->sb, 1);
return 0;
out:
- clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ clear_inode_flag(inode, FI_INC_LINK);
iput(inode);
f2fs_unlock_op(sbi);
return err;
@@ -199,10 +207,14 @@ out:
struct dentry *f2fs_get_parent(struct dentry *child)
{
struct qstr dotdot = QSTR_INIT("..", 2);
- unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot);
- if (!ino)
+ struct page *page;
+ unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot, &page);
+ if (!ino) {
+ if (IS_ERR(page))
+ return ERR_CAST(page);
return ERR_PTR(-ENOENT);
- return d_obtain_alias(f2fs_iget(d_inode(child)->i_sb, ino));
+ }
+ return d_obtain_alias(f2fs_iget(child->d_sb, ino));
}
static int __recover_dot_dentries(struct inode *dir, nid_t pino)
@@ -214,12 +226,24 @@ static int __recover_dot_dentries(struct inode *dir, nid_t pino)
struct page *page;
int err = 0;
+ if (f2fs_readonly(sbi->sb)) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "skip recovering inline_dots inode (ino:%lu, pino:%u) "
+ "in readonly mountpoint", dir->i_ino, pino);
+ return 0;
+ }
+
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
de = f2fs_find_entry(dir, &dot, &page);
if (de) {
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
+ } else if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto out;
} else {
err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR);
if (err)
@@ -230,14 +254,14 @@ static int __recover_dot_dentries(struct inode *dir, nid_t pino)
if (de) {
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
+ } else if (IS_ERR(page)) {
+ err = PTR_ERR(page);
} else {
err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR);
}
out:
- if (!err) {
- clear_inode_flag(F2FS_I(dir), FI_INLINE_DOTS);
- mark_inode_dirty(dir);
- }
+ if (!err)
+ clear_inode_flag(dir, FI_INLINE_DOTS);
f2fs_unlock_op(sbi);
return err;
@@ -251,13 +275,32 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
struct page *page;
nid_t ino;
int err = 0;
+ unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir));
+
+ if (f2fs_encrypted_inode(dir)) {
+ int res = fscrypt_get_encryption_info(dir);
+
+ /*
+ * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is
+ * created while the directory was encrypted and we
+ * don't have access to the key.
+ */
+ if (fscrypt_has_encryption_key(dir))
+ fscrypt_set_encrypted_dentry(dentry);
+ fscrypt_set_d_op(dentry);
+ if (res && res != -ENOKEY)
+ return ERR_PTR(res);
+ }
if (dentry->d_name.len > F2FS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
de = f2fs_find_entry(dir, &dentry->d_name, &page);
- if (!de)
+ if (!de) {
+ if (IS_ERR(page))
+ return (struct dentry *)page;
return d_splice_alias(inode, dentry);
+ }
ino = le32_to_cpu(de->ino);
f2fs_dentry_kunmap(dir, page);
@@ -267,15 +310,29 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
if (IS_ERR(inode))
return ERR_CAST(inode);
+ if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) {
+ err = __recover_dot_dentries(dir, root_ino);
+ if (err)
+ goto err_out;
+ }
+
if (f2fs_has_inline_dots(inode)) {
err = __recover_dot_dentries(inode, dir->i_ino);
if (err)
goto err_out;
}
+ if (!IS_ERR(inode) && f2fs_encrypted_inode(dir) &&
+ (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
+ !fscrypt_has_permitted_context(dir, inode)) {
+ bool nokey = f2fs_encrypted_inode(inode) &&
+ !fscrypt_has_encryption_key(inode);
+ err = nokey ? -ENOKEY : -EPERM;
+ goto err_out;
+ }
return d_splice_alias(inode, dentry);
err_out:
- iget_failed(inode);
+ iput(inode);
return ERR_PTR(err);
}
@@ -288,11 +345,15 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
int err = -ENOENT;
trace_f2fs_unlink_enter(dir, dentry);
- f2fs_balance_fs(sbi);
de = f2fs_find_entry(dir, &dentry->d_name, &page);
- if (!de)
+ if (!de) {
+ if (IS_ERR(page))
+ err = PTR_ERR(page);
goto fail;
+ }
+
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
err = acquire_orphan_inode(sbi);
@@ -305,9 +366,6 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
f2fs_delete_entry(de, page, dir, inode);
f2fs_unlock_op(sbi);
- /* In order to evict this inode, we set it dirty */
- mark_inode_dirty(inode);
-
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
fail:
@@ -332,16 +390,24 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode;
size_t len = strlen(symname);
- size_t p_len;
- char *p_str;
- struct f2fs_str disk_link = FSTR_INIT(NULL, 0);
- struct f2fs_encrypted_symlink_data *sd = NULL;
+ struct fscrypt_str disk_link = FSTR_INIT((char *)symname, len + 1);
+ struct fscrypt_symlink_data *sd = NULL;
int err;
- if (len > dir->i_sb->s_blocksize)
- return -ENAMETOOLONG;
+ if (f2fs_encrypted_inode(dir)) {
+ err = fscrypt_get_encryption_info(dir);
+ if (err)
+ return err;
+
+ if (!fscrypt_has_encryption_key(dir))
+ return -EPERM;
- f2fs_balance_fs(sbi);
+ disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
+ sizeof(struct fscrypt_symlink_data));
+ }
+
+ if (disk_link.len > dir->i_sb->s_blocksize)
+ return -ENAMETOOLONG;
inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO);
if (IS_ERR(inode))
@@ -351,8 +417,11 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
inode->i_op = &f2fs_encrypted_symlink_inode_operations;
else
inode->i_op = &f2fs_symlink_inode_operations;
+ inode_nohighmem(inode);
inode->i_mapping->a_ops = &f2fs_dblock_aops;
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
@@ -360,42 +429,36 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
f2fs_unlock_op(sbi);
alloc_nid_done(sbi, inode->i_ino);
- if (f2fs_encrypted_inode(dir)) {
+ if (f2fs_encrypted_inode(inode)) {
struct qstr istr = QSTR_INIT(symname, len);
+ struct fscrypt_str ostr;
- err = f2fs_get_encryption_info(inode);
- if (err)
+ sd = kzalloc(disk_link.len, GFP_NOFS);
+ if (!sd) {
+ err = -ENOMEM;
goto err_out;
+ }
- err = f2fs_fname_crypto_alloc_buffer(inode, len, &disk_link);
+ err = fscrypt_get_encryption_info(inode);
if (err)
goto err_out;
- err = f2fs_fname_usr_to_disk(inode, &istr, &disk_link);
- if (err < 0)
- goto err_out;
-
- p_len = encrypted_symlink_data_len(disk_link.len) + 1;
-
- if (p_len > dir->i_sb->s_blocksize) {
- err = -ENAMETOOLONG;
+ if (!fscrypt_has_encryption_key(inode)) {
+ err = -EPERM;
goto err_out;
}
- sd = kzalloc(p_len, GFP_NOFS);
- if (!sd) {
- err = -ENOMEM;
+ ostr.name = sd->encrypted_path;
+ ostr.len = disk_link.len;
+ err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
+ if (err)
goto err_out;
- }
- memcpy(sd->encrypted_path, disk_link.name, disk_link.len);
- sd->len = cpu_to_le16(disk_link.len);
- p_str = (char *)sd;
- } else {
- p_len = len + 1;
- p_str = (char *)symname;
+
+ sd->len = cpu_to_le16(ostr.len);
+ disk_link.name = (char *)sd;
}
- err = page_symlink(inode, p_str, p_len);
+ err = page_symlink(inode, disk_link.name, disk_link.len);
err_out:
d_instantiate(dentry, inode);
@@ -411,7 +474,8 @@ err_out:
* performance regression.
*/
if (!err) {
- filemap_write_and_wait_range(inode->i_mapping, 0, p_len - 1);
+ filemap_write_and_wait_range(inode->i_mapping, 0,
+ disk_link.len - 1);
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
@@ -420,7 +484,6 @@ err_out:
}
kfree(sd);
- f2fs_fname_crypto_free_buffer(&disk_link);
return err;
out:
handle_failed_inode(inode);
@@ -433,8 +496,6 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
struct inode *inode;
int err;
- f2fs_balance_fs(sbi);
-
inode = f2fs_new_inode(dir, S_IFDIR | mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -444,7 +505,9 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
inode->i_mapping->a_ops = &f2fs_dblock_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
- set_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ f2fs_balance_fs(sbi, true);
+
+ set_inode_flag(inode, FI_INC_LINK);
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
@@ -461,7 +524,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
return 0;
out_fail:
- clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ clear_inode_flag(inode, FI_INC_LINK);
handle_failed_inode(inode);
return err;
}
@@ -481,8 +544,6 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
struct inode *inode;
int err = 0;
- f2fs_balance_fs(sbi);
-
inode = f2fs_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -490,6 +551,8 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
init_special_inode(inode, inode->i_mode, rdev);
inode->i_op = &f2fs_special_inode_operations;
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
@@ -516,9 +579,6 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
struct inode *inode;
int err;
- if (!whiteout)
- f2fs_balance_fs(sbi);
-
inode = f2fs_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -532,6 +592,8 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
inode->i_mapping->a_ops = &f2fs_dblock_aops;
}
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
err = acquire_orphan_inode(sbi);
if (err)
@@ -545,17 +607,17 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
* add this non-linked tmpfile to orphan list, in this way we could
* remove all unused data of tmpfile after abnormal power-off.
*/
- add_orphan_inode(sbi, inode->i_ino);
- f2fs_unlock_op(sbi);
-
+ add_orphan_inode(inode);
alloc_nid_done(sbi, inode->i_ino);
if (whiteout) {
- inode_dec_link_count(inode);
+ f2fs_i_links_write(inode, false);
*whiteout = inode;
} else {
d_tmpfile(dentry, inode);
}
+ /* link_count was changed by d_tmpfile as well. */
+ f2fs_unlock_op(sbi);
unlock_new_inode(inode);
return 0;
@@ -569,7 +631,7 @@ out:
static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
if (f2fs_encrypted_inode(dir)) {
- int err = f2fs_get_encryption_info(dir);
+ int err = fscrypt_get_encryption_info(dir);
if (err)
return err;
}
@@ -595,26 +657,29 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct f2fs_dir_entry *old_dir_entry = NULL;
struct f2fs_dir_entry *old_entry;
struct f2fs_dir_entry *new_entry;
+ bool is_old_inline = f2fs_has_inline_dentry(old_dir);
int err = -ENOENT;
if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) &&
- !f2fs_is_child_context_consistent_with_parent(new_dir,
- old_inode)) {
+ !fscrypt_has_permitted_context(new_dir, old_inode)) {
err = -EPERM;
goto out;
}
- f2fs_balance_fs(sbi);
-
old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
- if (!old_entry)
+ if (!old_entry) {
+ if (IS_ERR(old_page))
+ err = PTR_ERR(old_page);
goto out;
+ }
if (S_ISDIR(old_inode->i_mode)) {
- err = -EIO;
old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page);
- if (!old_dir_entry)
+ if (!old_dir_entry) {
+ if (IS_ERR(old_dir_page))
+ err = PTR_ERR(old_dir_page);
goto out_old;
+ }
}
if (flags & RENAME_WHITEOUT) {
@@ -632,8 +697,13 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
err = -ENOENT;
new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
&new_page);
- if (!new_entry)
+ if (!new_entry) {
+ if (IS_ERR(new_page))
+ err = PTR_ERR(new_page);
goto out_whiteout;
+ }
+
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
@@ -641,31 +711,29 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (err)
goto put_out_dir;
- if (update_dent_inode(old_inode, new_inode,
- &new_dentry->d_name)) {
+ err = update_dent_inode(old_inode, new_inode,
+ &new_dentry->d_name);
+ if (err) {
release_orphan_inode(sbi);
goto put_out_dir;
}
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
- new_inode->i_ctime = CURRENT_TIME;
+ new_inode->i_ctime = current_time(new_inode);
down_write(&F2FS_I(new_inode)->i_sem);
if (old_dir_entry)
- drop_nlink(new_inode);
- drop_nlink(new_inode);
+ f2fs_i_links_write(new_inode, false);
+ f2fs_i_links_write(new_inode, false);
up_write(&F2FS_I(new_inode)->i_sem);
- mark_inode_dirty(new_inode);
-
if (!new_inode->i_nlink)
- add_orphan_inode(sbi, new_inode->i_ino);
+ add_orphan_inode(new_inode);
else
release_orphan_inode(sbi);
-
- update_inode_page(old_inode);
- update_inode_page(new_inode);
} else {
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
err = f2fs_add_link(new_dentry, old_inode);
@@ -674,9 +742,29 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out_whiteout;
}
- if (old_dir_entry) {
- inc_nlink(new_dir);
- update_inode_page(new_dir);
+ if (old_dir_entry)
+ f2fs_i_links_write(new_dir, true);
+
+ /*
+ * old entry and new entry can locate in the same inline
+ * dentry in inode, when attaching new entry in inline dentry,
+ * it could force inline dentry conversion, after that,
+ * old_entry and old_page will point to wrong address, in
+ * order to avoid this, let's do the check and update here.
+ */
+ if (is_old_inline && !f2fs_has_inline_dentry(old_dir)) {
+ f2fs_put_page(old_page, 0);
+ old_page = NULL;
+
+ old_entry = f2fs_find_entry(old_dir,
+ &old_dentry->d_name, &old_page);
+ if (!old_entry) {
+ err = -ENOENT;
+ if (IS_ERR(old_page))
+ err = PTR_ERR(old_page);
+ f2fs_unlock_op(sbi);
+ goto out_whiteout;
+ }
}
}
@@ -686,14 +774,14 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
file_set_enc_name(old_inode);
up_write(&F2FS_I(old_inode)->i_sem);
- old_inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(old_inode);
+ old_inode->i_ctime = current_time(old_inode);
+ f2fs_mark_inode_dirty_sync(old_inode, false);
f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
if (whiteout) {
whiteout->i_state |= I_LINKABLE;
- set_inode_flag(F2FS_I(whiteout), FI_INC_LINK);
+ set_inode_flag(whiteout, FI_INC_LINK);
err = f2fs_add_link(old_dentry, whiteout);
if (err)
goto put_out_dir;
@@ -705,14 +793,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (old_dir != new_dir && !whiteout) {
f2fs_set_link(old_inode, old_dir_entry,
old_dir_page, new_dir);
- update_inode_page(old_inode);
} else {
f2fs_dentry_kunmap(old_inode, old_dir_page);
f2fs_put_page(old_dir_page, 0);
}
- drop_nlink(old_dir);
- mark_inode_dirty(old_dir);
- update_inode_page(old_dir);
+ f2fs_i_links_write(old_dir, false);
}
f2fs_unlock_op(sbi);
@@ -756,39 +841,45 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
int err = -ENOENT;
if ((f2fs_encrypted_inode(old_dir) || f2fs_encrypted_inode(new_dir)) &&
- (old_dir != new_dir) &&
- (!f2fs_is_child_context_consistent_with_parent(new_dir,
- old_inode) ||
- !f2fs_is_child_context_consistent_with_parent(old_dir,
- new_inode)))
+ (old_dir != new_dir) &&
+ (!fscrypt_has_permitted_context(new_dir, old_inode) ||
+ !fscrypt_has_permitted_context(old_dir, new_inode)))
return -EPERM;
- f2fs_balance_fs(sbi);
-
old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
- if (!old_entry)
+ if (!old_entry) {
+ if (IS_ERR(old_page))
+ err = PTR_ERR(old_page);
goto out;
+ }
new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page);
- if (!new_entry)
+ if (!new_entry) {
+ if (IS_ERR(new_page))
+ err = PTR_ERR(new_page);
goto out_old;
+ }
/* prepare for updating ".." directory entry info later */
if (old_dir != new_dir) {
if (S_ISDIR(old_inode->i_mode)) {
- err = -EIO;
old_dir_entry = f2fs_parent_dir(old_inode,
&old_dir_page);
- if (!old_dir_entry)
+ if (!old_dir_entry) {
+ if (IS_ERR(old_dir_page))
+ err = PTR_ERR(old_dir_page);
goto out_new;
+ }
}
if (S_ISDIR(new_inode->i_mode)) {
- err = -EIO;
new_dir_entry = f2fs_parent_dir(new_inode,
&new_dir_page);
- if (!new_dir_entry)
+ if (!new_dir_entry) {
+ if (IS_ERR(new_dir_page))
+ err = PTR_ERR(new_dir_page);
goto out_old_dir;
+ }
}
}
@@ -807,6 +898,8 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out_new_dir;
}
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
err = update_dent_inode(old_inode, new_inode, &new_dentry->d_name);
@@ -836,19 +929,13 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
file_lost_pino(old_inode);
up_write(&F2FS_I(old_inode)->i_sem);
- update_inode_page(old_inode);
-
old_dir->i_ctime = CURRENT_TIME;
if (old_nlink) {
down_write(&F2FS_I(old_dir)->i_sem);
- if (old_nlink < 0)
- drop_nlink(old_dir);
- else
- inc_nlink(old_dir);
+ f2fs_i_links_write(old_dir, old_nlink > 0);
up_write(&F2FS_I(old_dir)->i_sem);
}
- mark_inode_dirty(old_dir);
- update_inode_page(old_dir);
+ f2fs_mark_inode_dirty_sync(old_dir, false);
/* update directory entry info of new dir inode */
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
@@ -857,19 +944,13 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
file_lost_pino(new_inode);
up_write(&F2FS_I(new_inode)->i_sem);
- update_inode_page(new_inode);
-
new_dir->i_ctime = CURRENT_TIME;
if (new_nlink) {
down_write(&F2FS_I(new_dir)->i_sem);
- if (new_nlink < 0)
- drop_nlink(new_dir);
- else
- inc_nlink(new_dir);
+ f2fs_i_links_write(new_dir, new_nlink > 0);
up_write(&F2FS_I(new_dir)->i_sem);
}
- mark_inode_dirty(new_dir);
- update_inode_page(new_dir);
+ f2fs_mark_inode_dirty_sync(new_dir, false);
f2fs_unlock_op(sbi);
@@ -922,89 +1003,85 @@ static int f2fs_rename2(struct inode *old_dir, struct dentry *old_dentry,
return f2fs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
}
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
static const char *f2fs_encrypted_follow_link(struct dentry *dentry, void **cookie)
{
struct page *cpage = NULL;
char *caddr, *paddr = NULL;
- struct f2fs_str cstr;
- struct f2fs_str pstr = FSTR_INIT(NULL, 0);
+ struct fscrypt_str cstr = FSTR_INIT(NULL, 0);
+ struct fscrypt_str pstr = FSTR_INIT(NULL, 0);
+ struct fscrypt_symlink_data *sd;
struct inode *inode = d_inode(dentry);
- struct f2fs_encrypted_symlink_data *sd;
- loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
u32 max_size = inode->i_sb->s_blocksize;
int res;
- res = f2fs_get_encryption_info(inode);
+ if (!dentry)
+ return ERR_PTR(-ECHILD);
+
+ res = fscrypt_get_encryption_info(inode);
if (res)
return ERR_PTR(res);
cpage = read_mapping_page(inode->i_mapping, 0, NULL);
if (IS_ERR(cpage))
return ERR_CAST(cpage);
- caddr = kmap(cpage);
- caddr[size] = 0;
+ caddr = page_address(cpage);
/* Symlink is encrypted */
- sd = (struct f2fs_encrypted_symlink_data *)caddr;
+ sd = (struct fscrypt_symlink_data *)caddr;
+ cstr.name = sd->encrypted_path;
cstr.len = le16_to_cpu(sd->len);
- cstr.name = kmalloc(cstr.len, GFP_NOFS);
- if (!cstr.name) {
- res = -ENOMEM;
- goto errout;
- }
- memcpy(cstr.name, sd->encrypted_path, cstr.len);
/* this is broken symlink case */
- if (cstr.name[0] == 0 && cstr.len == 0) {
+ if (unlikely(cstr.len == 0)) {
res = -ENOENT;
goto errout;
}
- if ((cstr.len + sizeof(struct f2fs_encrypted_symlink_data) - 1) >
- max_size) {
+ if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) {
/* Symlink data on the disk is corrupted */
res = -EIO;
goto errout;
}
- res = f2fs_fname_crypto_alloc_buffer(inode, cstr.len, &pstr);
+ res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
if (res)
goto errout;
- res = f2fs_fname_disk_to_usr(inode, NULL, &cstr, &pstr);
- if (res < 0)
+ res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
+ if (res)
goto errout;
- kfree(cstr.name);
+ /* this is broken symlink case */
+ if (unlikely(pstr.name[0] == 0)) {
+ res = -ENOENT;
+ goto errout;
+ }
paddr = pstr.name;
/* Null-terminate the name */
- paddr[res] = '\0';
+ paddr[pstr.len] = '\0';
- kunmap(cpage);
- page_cache_release(cpage);
+ put_page(cpage);
return *cookie = paddr;
errout:
- kfree(cstr.name);
- f2fs_fname_crypto_free_buffer(&pstr);
- kunmap(cpage);
- page_cache_release(cpage);
+ fscrypt_fname_free_buffer(&pstr);
+ put_page(cpage);
return ERR_PTR(res);
}
const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
.readlink = generic_readlink,
- .follow_link = f2fs_encrypted_follow_link,
- .put_link = kfree_put_link,
+ .follow_link = f2fs_encrypted_follow_link,
+ .put_link = kfree_put_link,
.getattr = f2fs_getattr,
.setattr = f2fs_setattr,
+#ifdef CONFIG_F2FS_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = f2fs_listxattr,
.removexattr = generic_removexattr,
-};
#endif
+};
const struct inode_operations f2fs_dir_inode_operations = {
.create = f2fs_create,
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 7bcbc6e9c40d..edacbabb92cf 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -45,13 +45,15 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
* give 25%, 25%, 50%, 50%, 50% memory for each components respectively
*/
if (type == FREE_NIDS) {
- mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
- PAGE_CACHE_SHIFT;
+ mem_size = (nm_i->nid_cnt[FREE_NID_LIST] *
+ sizeof(struct free_nid)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
} else if (type == NAT_ENTRIES) {
mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
- PAGE_CACHE_SHIFT;
+ PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
+ if (excess_cached_nats(sbi))
+ res = false;
} else if (type == DIRTY_DENTS) {
if (sbi->sb->s_bdi->wb.dirty_exceeded)
return false;
@@ -62,16 +64,17 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
for (i = 0; i <= UPDATE_INO; i++)
mem_size += (sbi->im[i].ino_num *
- sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT;
+ sizeof(struct ino_entry)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
} else if (type == EXTENT_CACHE) {
- mem_size = (sbi->total_ext_tree * sizeof(struct extent_tree) +
+ mem_size = (atomic_read(&sbi->total_ext_tree) *
+ sizeof(struct extent_tree) +
atomic_read(&sbi->total_ext_node) *
- sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT;
+ sizeof(struct extent_node)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
} else {
- if (sbi->sb->s_bdi->wb.dirty_exceeded)
- return false;
+ if (!sbi->sb->s_bdi->wb.dirty_exceeded)
+ return true;
}
return res;
}
@@ -120,7 +123,7 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
src_addr = page_address(src_page);
dst_addr = page_address(dst_page);
- memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+ memcpy(dst_addr, src_addr, PAGE_SIZE);
set_page_dirty(dst_page);
f2fs_put_page(src_page, 1);
@@ -256,18 +259,22 @@ static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
return new;
}
-static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
+static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
struct f2fs_nat_entry *ne)
{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
- down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid);
if (!e) {
e = grab_nat_entry(nm_i, nid);
node_info_from_raw_nat(&e->ni, ne);
+ } else {
+ f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
+ nat_get_blkaddr(e) !=
+ le32_to_cpu(ne->block_addr) ||
+ nat_get_version(e) != ne->version);
}
- up_write(&nm_i->nat_tree_lock);
}
static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
@@ -355,7 +362,7 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
nid_t start_nid = START_NID(nid);
struct f2fs_nat_block *nat_blk;
struct page *page = NULL;
@@ -372,21 +379,20 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
ni->ino = nat_get_ino(e);
ni->blk_addr = nat_get_blkaddr(e);
ni->version = nat_get_version(e);
- }
- up_read(&nm_i->nat_tree_lock);
- if (e)
+ up_read(&nm_i->nat_tree_lock);
return;
+ }
memset(&ne, 0, sizeof(struct f2fs_nat_entry));
/* Check current segment summary */
- mutex_lock(&curseg->curseg_mutex);
- i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
+ down_read(&curseg->journal_rwsem);
+ i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
if (i >= 0) {
- ne = nat_in_journal(sum, i);
+ ne = nat_in_journal(journal, i);
node_info_from_raw_nat(ni, &ne);
}
- mutex_unlock(&curseg->curseg_mutex);
+ up_read(&curseg->journal_rwsem);
if (i >= 0)
goto cache;
@@ -397,18 +403,75 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
node_info_from_raw_nat(ni, &ne);
f2fs_put_page(page, 1);
cache:
+ up_read(&nm_i->nat_tree_lock);
/* cache nat entry */
- cache_nat_entry(NM_I(sbi), nid, &ne);
+ down_write(&nm_i->nat_tree_lock);
+ cache_nat_entry(sbi, nid, &ne);
+ up_write(&nm_i->nat_tree_lock);
+}
+
+/*
+ * readahead MAX_RA_NODE number of node pages.
+ */
+static void ra_node_pages(struct page *parent, int start, int n)
+{
+ struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
+ struct blk_plug plug;
+ int i, end;
+ nid_t nid;
+
+ blk_start_plug(&plug);
+
+ /* Then, try readahead for siblings of the desired node */
+ end = start + n;
+ end = min(end, NIDS_PER_BLOCK);
+ for (i = start; i < end; i++) {
+ nid = get_nid(parent, i, false);
+ ra_node_page(sbi, nid);
+ }
+
+ blk_finish_plug(&plug);
+}
+
+pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
+{
+ const long direct_index = ADDRS_PER_INODE(dn->inode);
+ const long direct_blks = ADDRS_PER_BLOCK;
+ const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+ unsigned int skipped_unit = ADDRS_PER_BLOCK;
+ int cur_level = dn->cur_level;
+ int max_level = dn->max_level;
+ pgoff_t base = 0;
+
+ if (!dn->max_level)
+ return pgofs + 1;
+
+ while (max_level-- > cur_level)
+ skipped_unit *= NIDS_PER_BLOCK;
+
+ switch (dn->max_level) {
+ case 3:
+ base += 2 * indirect_blks;
+ case 2:
+ base += 2 * direct_blks;
+ case 1:
+ base += direct_index;
+ break;
+ default:
+ f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
+ }
+
+ return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
}
/*
* The maximum depth is four.
* Offset[0] will have raw inode offset.
*/
-static int get_node_path(struct f2fs_inode_info *fi, long block,
+static int get_node_path(struct inode *inode, long block,
int offset[4], unsigned int noffset[4])
{
- const long direct_index = ADDRS_PER_INODE(fi);
+ const long direct_index = ADDRS_PER_INODE(inode);
const long direct_blks = ADDRS_PER_BLOCK;
const long dptrs_per_blk = NIDS_PER_BLOCK;
const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
@@ -493,10 +556,10 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
int offset[4];
unsigned int noffset[4];
nid_t nids[4];
- int level, i;
+ int level, i = 0;
int err = 0;
- level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
+ level = get_node_path(dn->inode, index, offset, noffset);
nids[0] = dn->inode->i_ino;
npage[0] = dn->inode_page;
@@ -583,6 +646,11 @@ release_pages:
release_out:
dn->inode_page = NULL;
dn->node_page = NULL;
+ if (err == -ENOENT) {
+ dn->cur_level = i;
+ dn->max_level = level;
+ dn->ofs_in_node = offset[level];
+ }
return err;
}
@@ -606,8 +674,7 @@ static void truncate_node(struct dnode_of_data *dn)
if (dn->nid == dn->inode->i_ino) {
remove_orphan_inode(sbi, dn->nid);
dec_valid_inode_count(sbi);
- } else {
- sync_inode_page(dn);
+ f2fs_inode_synced(dn->inode);
}
invalidate:
clear_node_page_dirty(dn->node_page);
@@ -666,6 +733,8 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
return PTR_ERR(page);
}
+ ra_node_pages(page, ofs, NIDS_PER_BLOCK);
+
rn = F2FS_NODE(page);
if (depth < 3) {
for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
@@ -676,7 +745,8 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
ret = truncate_dnode(&rdn);
if (ret < 0)
goto out_err;
- set_nid(page, i, 0, false);
+ if (set_nid(page, i, 0, false))
+ dn->node_changed = true;
}
} else {
child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
@@ -689,7 +759,8 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
rdn.nid = child_nid;
ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
if (ret == (NIDS_PER_BLOCK + 1)) {
- set_nid(page, i, 0, false);
+ if (set_nid(page, i, 0, false))
+ dn->node_changed = true;
child_nofs += ret;
} else if (ret < 0 && ret != -ENOENT) {
goto out_err;
@@ -741,6 +812,8 @@ static int truncate_partial_nodes(struct dnode_of_data *dn,
nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
}
+ ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
+
/* free direct nodes linked to a partial indirect node */
for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
child_nid = get_nid(pages[idx], i, false);
@@ -750,7 +823,8 @@ static int truncate_partial_nodes(struct dnode_of_data *dn,
err = truncate_dnode(dn);
if (err < 0)
goto fail;
- set_nid(pages[idx], i, 0, false);
+ if (set_nid(pages[idx], i, 0, false))
+ dn->node_changed = true;
}
if (offset[idx + 1] == 0) {
@@ -787,8 +861,8 @@ int truncate_inode_blocks(struct inode *inode, pgoff_t from)
trace_f2fs_truncate_inode_blocks_enter(inode, from);
- level = get_node_path(F2FS_I(inode), from, offset, noffset);
-restart:
+ level = get_node_path(inode, from, offset, noffset);
+
page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(page)) {
trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
@@ -852,11 +926,8 @@ skip_partial:
if (offset[1] == 0 &&
ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
lock_page(page);
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
- f2fs_put_page(page, 1);
- goto restart;
- }
- f2fs_wait_on_page_writeback(page, NODE);
+ BUG_ON(page->mapping != NODE_MAPPING(sbi));
+ f2fs_wait_on_page_writeback(page, NODE, true);
ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
set_page_dirty(page);
unlock_page(page);
@@ -885,7 +956,7 @@ int truncate_xattr_node(struct inode *inode, struct page *page)
if (IS_ERR(npage))
return PTR_ERR(npage);
- F2FS_I(inode)->i_xattr_nid = 0;
+ f2fs_i_xnid_write(inode, 0);
/* need to do checkpoint during fsync */
F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
@@ -951,10 +1022,10 @@ struct page *new_node_page(struct dnode_of_data *dn,
struct page *page;
int err;
- if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
+ if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return ERR_PTR(-EPERM);
- page = grab_cache_page(NODE_MAPPING(sbi), dn->nid);
+ page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
if (!page)
return ERR_PTR(-ENOMEM);
@@ -971,23 +1042,19 @@ struct page *new_node_page(struct dnode_of_data *dn,
new_ni.ino = dn->inode->i_ino;
set_node_addr(sbi, &new_ni, NEW_ADDR, false);
- f2fs_wait_on_page_writeback(page, NODE);
+ f2fs_wait_on_page_writeback(page, NODE, true);
fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
set_cold_node(dn->inode, page);
- SetPageUptodate(page);
- set_page_dirty(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
+ if (set_page_dirty(page))
+ dn->node_changed = true;
if (f2fs_has_xattr_block(ofs))
- F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
+ f2fs_i_xnid_write(dn->inode, dn->nid);
- dn->node_page = page;
- if (ipage)
- update_inode(dn->inode, ipage);
- else
- sync_inode_page(dn);
if (ofs == 0)
inc_valid_inode_count(sbi);
-
return page;
fail:
@@ -1013,6 +1080,9 @@ static int read_node_page(struct page *page, int rw)
.encrypted_page = NULL,
};
+ if (PageUptodate(page))
+ return LOCKED_PAGE;
+
get_node_info(sbi, page->index, &ni);
if (unlikely(ni.blk_addr == NULL_ADDR)) {
@@ -1020,10 +1090,7 @@ static int read_node_page(struct page *page, int rw)
return -ENOENT;
}
- if (PageUptodate(page))
- return LOCKED_PAGE;
-
- fio.blk_addr = ni.blk_addr;
+ fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
return f2fs_submit_page_bio(&fio);
}
@@ -1035,14 +1102,17 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
struct page *apage;
int err;
- apage = find_get_page(NODE_MAPPING(sbi), nid);
- if (apage && PageUptodate(apage)) {
- f2fs_put_page(apage, 0);
+ if (!nid)
+ return;
+ f2fs_bug_on(sbi, check_nid_range(sbi, nid));
+
+ rcu_read_lock();
+ apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
+ rcu_read_unlock();
+ if (apage)
return;
- }
- f2fs_put_page(apage, 0);
- apage = grab_cache_page(NODE_MAPPING(sbi), nid);
+ apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
if (!apage)
return;
@@ -1050,12 +1120,17 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
f2fs_put_page(apage, err ? 1 : 0);
}
-struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
+static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
+ struct page *parent, int start)
{
struct page *page;
int err;
+
+ if (!nid)
+ return ERR_PTR(-ENOENT);
+ f2fs_bug_on(sbi, check_nid_range(sbi, nid));
repeat:
- page = grab_cache_page(NODE_MAPPING(sbi), nid);
+ page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
if (!page)
return ERR_PTR(-ENOMEM);
@@ -1063,106 +1138,301 @@ repeat:
if (err < 0) {
f2fs_put_page(page, 1);
return ERR_PTR(err);
- } else if (err != LOCKED_PAGE) {
- lock_page(page);
+ } else if (err == LOCKED_PAGE) {
+ goto page_hit;
}
- if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
- ClearPageUptodate(page);
- f2fs_put_page(page, 1);
- return ERR_PTR(-EIO);
- }
+ if (parent)
+ ra_node_pages(parent, start + 1, MAX_RA_NODE);
+
+ lock_page(page);
+
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
f2fs_put_page(page, 1);
goto repeat;
}
+
+ if (unlikely(!PageUptodate(page)))
+ goto out_err;
+page_hit:
+ if(unlikely(nid != nid_of_node(page))) {
+ f2fs_bug_on(sbi, 1);
+ ClearPageUptodate(page);
+out_err:
+ f2fs_put_page(page, 1);
+ return ERR_PTR(-EIO);
+ }
return page;
}
-/*
- * Return a locked page for the desired node page.
- * And, readahead MAX_RA_NODE number of node pages.
- */
+struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
+{
+ return __get_node_page(sbi, nid, NULL, 0);
+}
+
struct page *get_node_page_ra(struct page *parent, int start)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
- struct blk_plug plug;
+ nid_t nid = get_nid(parent, start, false);
+
+ return __get_node_page(sbi, nid, parent, start);
+}
+
+static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
+{
+ struct inode *inode;
struct page *page;
- int err, i, end;
- nid_t nid;
+ int ret;
- /* First, try getting the desired direct node. */
- nid = get_nid(parent, start, false);
- if (!nid)
- return ERR_PTR(-ENOENT);
-repeat:
- page = grab_cache_page(NODE_MAPPING(sbi), nid);
+ /* should flush inline_data before evict_inode */
+ inode = ilookup(sbi->sb, ino);
+ if (!inode)
+ return;
+
+ page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0);
if (!page)
- return ERR_PTR(-ENOMEM);
+ goto iput_out;
- err = read_node_page(page, READ_SYNC);
- if (err < 0) {
- f2fs_put_page(page, 1);
- return ERR_PTR(err);
- } else if (err == LOCKED_PAGE) {
- goto page_hit;
- }
+ if (!PageUptodate(page))
+ goto page_out;
- blk_start_plug(&plug);
+ if (!PageDirty(page))
+ goto page_out;
- /* Then, try readahead for siblings of the desired node */
- end = start + MAX_RA_NODE;
- end = min(end, NIDS_PER_BLOCK);
- for (i = start + 1; i < end; i++) {
- nid = get_nid(parent, i, false);
- if (!nid)
- continue;
- ra_node_page(sbi, nid);
- }
+ if (!clear_page_dirty_for_io(page))
+ goto page_out;
- blk_finish_plug(&plug);
+ ret = f2fs_write_inline_data(inode, page);
+ inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
+ if (ret)
+ set_page_dirty(page);
+page_out:
+ f2fs_put_page(page, 1);
+iput_out:
+ iput(inode);
+}
- lock_page(page);
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
- f2fs_put_page(page, 1);
- goto repeat;
- }
-page_hit:
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 1);
- return ERR_PTR(-EIO);
+void move_node_page(struct page *node_page, int gc_type)
+{
+ if (gc_type == FG_GC) {
+ struct f2fs_sb_info *sbi = F2FS_P_SB(node_page);
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 1,
+ .for_reclaim = 0,
+ };
+
+ set_page_dirty(node_page);
+ f2fs_wait_on_page_writeback(node_page, NODE, true);
+
+ f2fs_bug_on(sbi, PageWriteback(node_page));
+ if (!clear_page_dirty_for_io(node_page))
+ goto out_page;
+
+ if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc))
+ unlock_page(node_page);
+ goto release_page;
+ } else {
+ /* set page dirty and write it */
+ if (!PageWriteback(node_page))
+ set_page_dirty(node_page);
+ }
+out_page:
+ unlock_page(node_page);
+release_page:
+ f2fs_put_page(node_page, 0);
+}
+
+static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
+{
+ pgoff_t index, end;
+ struct pagevec pvec;
+ struct page *last_page = NULL;
+
+ pagevec_init(&pvec, 0);
+ index = 0;
+ end = ULONG_MAX;
+
+ while (index <= end) {
+ int i, nr_pages;
+ nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+ PAGECACHE_TAG_DIRTY,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+ if (nr_pages == 0)
+ break;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+
+ if (unlikely(f2fs_cp_error(sbi))) {
+ f2fs_put_page(last_page, 0);
+ pagevec_release(&pvec);
+ return ERR_PTR(-EIO);
+ }
+
+ if (!IS_DNODE(page) || !is_cold_node(page))
+ continue;
+ if (ino_of_node(page) != ino)
+ continue;
+
+ lock_page(page);
+
+ if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+continue_unlock:
+ unlock_page(page);
+ continue;
+ }
+ if (ino_of_node(page) != ino)
+ goto continue_unlock;
+
+ if (!PageDirty(page)) {
+ /* someone wrote it for us */
+ goto continue_unlock;
+ }
+
+ if (last_page)
+ f2fs_put_page(last_page, 0);
+
+ get_page(page);
+ last_page = page;
+ unlock_page(page);
+ }
+ pagevec_release(&pvec);
+ cond_resched();
}
- return page;
+ return last_page;
}
-void sync_inode_page(struct dnode_of_data *dn)
+int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
+ struct writeback_control *wbc, bool atomic)
{
- if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
- update_inode(dn->inode, dn->node_page);
- } else if (dn->inode_page) {
- if (!dn->inode_page_locked)
- lock_page(dn->inode_page);
- update_inode(dn->inode, dn->inode_page);
- if (!dn->inode_page_locked)
- unlock_page(dn->inode_page);
- } else {
- update_inode_page(dn->inode);
+ pgoff_t index, end;
+ struct pagevec pvec;
+ int ret = 0;
+ struct page *last_page = NULL;
+ bool marked = false;
+ nid_t ino = inode->i_ino;
+ int nwritten = 0;
+
+ if (atomic) {
+ last_page = last_fsync_dnode(sbi, ino);
+ if (IS_ERR_OR_NULL(last_page))
+ return PTR_ERR_OR_ZERO(last_page);
+ }
+retry:
+ pagevec_init(&pvec, 0);
+ index = 0;
+ end = ULONG_MAX;
+
+ while (index <= end) {
+ int i, nr_pages;
+ nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+ PAGECACHE_TAG_DIRTY,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+ if (nr_pages == 0)
+ break;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+
+ if (unlikely(f2fs_cp_error(sbi))) {
+ f2fs_put_page(last_page, 0);
+ pagevec_release(&pvec);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!IS_DNODE(page) || !is_cold_node(page))
+ continue;
+ if (ino_of_node(page) != ino)
+ continue;
+
+ lock_page(page);
+
+ if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+continue_unlock:
+ unlock_page(page);
+ continue;
+ }
+ if (ino_of_node(page) != ino)
+ goto continue_unlock;
+
+ if (!PageDirty(page) && page != last_page) {
+ /* someone wrote it for us */
+ goto continue_unlock;
+ }
+
+ f2fs_wait_on_page_writeback(page, NODE, true);
+ BUG_ON(PageWriteback(page));
+
+ if (!atomic || page == last_page) {
+ set_fsync_mark(page, 1);
+ if (IS_INODE(page)) {
+ if (is_inode_flag_set(inode,
+ FI_DIRTY_INODE))
+ update_inode(inode, page);
+ set_dentry_mark(page,
+ need_dentry_mark(sbi, ino));
+ }
+ /* may be written by other thread */
+ if (!PageDirty(page))
+ set_page_dirty(page);
+ }
+
+ if (!clear_page_dirty_for_io(page))
+ goto continue_unlock;
+
+ ret = NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
+ if (ret) {
+ unlock_page(page);
+ f2fs_put_page(last_page, 0);
+ break;
+ } else {
+ nwritten++;
+ }
+
+ if (page == last_page) {
+ f2fs_put_page(page, 0);
+ marked = true;
+ break;
+ }
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+
+ if (ret || marked)
+ break;
}
+ if (!ret && atomic && !marked) {
+ f2fs_msg(sbi->sb, KERN_DEBUG,
+ "Retry to write fsync mark: ino=%u, idx=%lx",
+ ino, last_page->index);
+ lock_page(last_page);
+ f2fs_wait_on_page_writeback(last_page, NODE, true);
+ set_page_dirty(last_page);
+ unlock_page(last_page);
+ goto retry;
+ }
+out:
+ if (nwritten)
+ f2fs_submit_merged_bio_cond(sbi, NULL, NULL, ino, NODE, WRITE);
+ return ret ? -EIO: 0;
}
-int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
- struct writeback_control *wbc)
+int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc)
{
pgoff_t index, end;
struct pagevec pvec;
- int step = ino ? 2 : 0;
- int nwritten = 0, wrote = 0;
+ int step = 0;
+ int nwritten = 0;
+ int ret = 0;
pagevec_init(&pvec, 0);
next_step:
index = 0;
- end = LONG_MAX;
+ end = ULONG_MAX;
while (index <= end) {
int i, nr_pages;
@@ -1175,6 +1445,12 @@ next_step:
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
+ if (unlikely(f2fs_cp_error(sbi))) {
+ pagevec_release(&pvec);
+ ret = -EIO;
+ goto out;
+ }
+
/*
* flushing sequence with step:
* 0. indirect nodes
@@ -1189,14 +1465,8 @@ next_step:
if (step == 2 && (!IS_DNODE(page) ||
!is_cold_node(page)))
continue;
-
- /*
- * If an fsync mode,
- * we should not skip writing node pages.
- */
- if (ino && ino_of_node(page) == ino)
- lock_page(page);
- else if (!trylock_page(page))
+lock_node:
+ if (!trylock_page(page))
continue;
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
@@ -1204,33 +1474,33 @@ continue_unlock:
unlock_page(page);
continue;
}
- if (ino && ino_of_node(page) != ino)
- goto continue_unlock;
if (!PageDirty(page)) {
/* someone wrote it for us */
goto continue_unlock;
}
+ /* flush inline_data */
+ if (is_inline_node(page)) {
+ clear_inline_node(page);
+ unlock_page(page);
+ flush_inline_data(sbi, ino_of_node(page));
+ goto lock_node;
+ }
+
+ f2fs_wait_on_page_writeback(page, NODE, true);
+
+ BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
- /* called by fsync() */
- if (ino && IS_DNODE(page)) {
- set_fsync_mark(page, 1);
- if (IS_INODE(page))
- set_dentry_mark(page,
- need_dentry_mark(sbi, ino));
- nwritten++;
- } else {
- set_fsync_mark(page, 0);
- set_dentry_mark(page, 0);
- }
+ set_fsync_mark(page, 0);
+ set_dentry_mark(page, 0);
if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
unlock_page(page);
else
- wrote++;
+ nwritten++;
if (--wbc->nr_to_write == 0)
break;
@@ -1248,15 +1518,15 @@ continue_unlock:
step++;
goto next_step;
}
-
- if (wrote)
+out:
+ if (nwritten)
f2fs_submit_merged_bio(sbi, NODE, WRITE);
- return nwritten;
+ return ret;
}
int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
{
- pgoff_t index = 0, end = LONG_MAX;
+ pgoff_t index = 0, end = ULONG_MAX;
struct pagevec pvec;
int ret2 = 0, ret = 0;
@@ -1278,7 +1548,7 @@ int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
continue;
if (ino && ino_of_node(page) == ino) {
- f2fs_wait_on_page_writeback(page, NODE);
+ f2fs_wait_on_page_writeback(page, NODE, true);
if (TestClearPageError(page))
ret = -EIO;
}
@@ -1317,8 +1587,6 @@ static int f2fs_write_node_page(struct page *page,
if (unlikely(f2fs_cp_error(sbi)))
goto redirty_out;
- f2fs_wait_on_page_writeback(page, NODE);
-
/* get old block addr of this node page */
nid = nid_of_node(page);
f2fs_bug_on(sbi, page->index != nid);
@@ -1342,14 +1610,18 @@ static int f2fs_write_node_page(struct page *page,
}
set_page_writeback(page);
- fio.blk_addr = ni.blk_addr;
+ fio.old_blkaddr = ni.blk_addr;
write_node_page(nid, &fio);
- set_node_addr(sbi, &ni, fio.blk_addr, is_fsync_dnode(page));
+ set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
dec_page_count(sbi, F2FS_DIRTY_NODES);
up_read(&sbi->node_write);
- unlock_page(page);
if (wbc->for_reclaim)
+ f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, NODE, WRITE);
+
+ unlock_page(page);
+
+ if (unlikely(f2fs_cp_error(sbi)))
f2fs_submit_merged_bio(sbi, NODE, WRITE);
return 0;
@@ -1363,10 +1635,9 @@ static int f2fs_write_node_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ struct blk_plug plug;
long diff;
- trace_f2fs_writepages(mapping->host, wbc, NODE);
-
/* balancing f2fs's metadata in background */
f2fs_balance_fs_bg(sbi);
@@ -1374,14 +1645,19 @@ static int f2fs_write_node_pages(struct address_space *mapping,
if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
goto skip_write;
+ trace_f2fs_writepages(mapping->host, wbc, NODE);
+
diff = nr_pages_to_write(sbi, NODE, wbc);
wbc->sync_mode = WB_SYNC_NONE;
- sync_node_pages(sbi, 0, wbc);
+ blk_start_plug(&plug);
+ sync_node_pages(sbi, wbc);
+ blk_finish_plug(&plug);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
return 0;
skip_write:
wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
+ trace_f2fs_writepages(mapping->host, wbc, NODE);
return 0;
}
@@ -1389,9 +1665,10 @@ static int f2fs_set_node_page_dirty(struct page *page)
{
trace_f2fs_set_page_dirty(page, NODE);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
+ f2fs_set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
SetPagePrivate(page);
f2fs_trace_pid(page);
@@ -1409,6 +1686,9 @@ const struct address_space_operations f2fs_node_aops = {
.set_page_dirty = f2fs_set_node_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page,
+#ifdef CONFIG_MIGRATION
+ .migratepage = f2fs_migrate_page,
+#endif
};
static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
@@ -1417,11 +1697,35 @@ static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
return radix_tree_lookup(&nm_i->free_nid_root, n);
}
-static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
- struct free_nid *i)
+static int __insert_nid_to_list(struct f2fs_sb_info *sbi,
+ struct free_nid *i, enum nid_list list, bool new)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+ if (new) {
+ int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
+ if (err)
+ return err;
+ }
+
+ f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
+ i->state != NID_ALLOC);
+ nm_i->nid_cnt[list]++;
+ list_add_tail(&i->list, &nm_i->nid_list[list]);
+ return 0;
+}
+
+static void __remove_nid_from_list(struct f2fs_sb_info *sbi,
+ struct free_nid *i, enum nid_list list, bool reuse)
{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+ f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
+ i->state != NID_ALLOC);
+ nm_i->nid_cnt[list]--;
list_del(&i->list);
- radix_tree_delete(&nm_i->free_nid_root, i->nid);
+ if (!reuse)
+ radix_tree_delete(&nm_i->free_nid_root, i->nid);
}
static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
@@ -1429,10 +1733,7 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
struct nat_entry *ne;
- bool allocated = false;
-
- if (!available_free_memory(sbi, FREE_NIDS))
- return -1;
+ int err;
/* 0 nid should not be used */
if (unlikely(nid == 0))
@@ -1440,14 +1741,9 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
if (build) {
/* do not add allocated nids */
- down_read(&nm_i->nat_tree_lock);
ne = __lookup_nat_cache(nm_i, nid);
- if (ne &&
- (!get_nat_flag(ne, IS_CHECKPOINTED) ||
+ if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
nat_get_blkaddr(ne) != NULL_ADDR))
- allocated = true;
- up_read(&nm_i->nat_tree_lock);
- if (allocated)
return 0;
}
@@ -1460,33 +1756,30 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
return 0;
}
- spin_lock(&nm_i->free_nid_list_lock);
- if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
- spin_unlock(&nm_i->free_nid_list_lock);
- radix_tree_preload_end();
+ spin_lock(&nm_i->nid_list_lock);
+ err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true);
+ spin_unlock(&nm_i->nid_list_lock);
+ radix_tree_preload_end();
+ if (err) {
kmem_cache_free(free_nid_slab, i);
return 0;
}
- list_add_tail(&i->list, &nm_i->free_nid_list);
- nm_i->fcnt++;
- spin_unlock(&nm_i->free_nid_list_lock);
- radix_tree_preload_end();
return 1;
}
-static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
+static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
bool need_free = false;
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
if (i && i->state == NID_NEW) {
- __del_from_free_nid_list(nm_i, i);
- nm_i->fcnt--;
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
need_free = true;
}
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
if (need_free)
kmem_cache_free(free_nid_slab, i);
@@ -1509,29 +1802,32 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
- if (blk_addr == NULL_ADDR) {
- if (add_free_nid(sbi, start_nid, true) < 0)
- break;
- }
+ if (blk_addr == NULL_ADDR)
+ add_free_nid(sbi, start_nid, true);
}
}
-static void build_free_nids(struct f2fs_sb_info *sbi)
+static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
int i = 0;
nid_t nid = nm_i->next_scan_nid;
/* Enough entries */
- if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
+ if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK)
+ return;
+
+ if (!sync && !available_free_memory(sbi, FREE_NIDS))
return;
/* readahead nat pages to be scanned */
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
META_NAT, true);
+ down_read(&nm_i->nat_tree_lock);
+
while (1) {
struct page *page = get_current_nat_page(sbi, nid);
@@ -1550,21 +1846,31 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
nm_i->next_scan_nid = nid;
/* find free nids from current sum_pages */
- mutex_lock(&curseg->curseg_mutex);
- for (i = 0; i < nats_in_cursum(sum); i++) {
- block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
- nid = le32_to_cpu(nid_in_journal(sum, i));
+ down_read(&curseg->journal_rwsem);
+ for (i = 0; i < nats_in_cursum(journal); i++) {
+ block_t addr;
+
+ addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
+ nid = le32_to_cpu(nid_in_journal(journal, i));
if (addr == NULL_ADDR)
add_free_nid(sbi, nid, true);
else
- remove_free_nid(nm_i, nid);
+ remove_free_nid(sbi, nid);
}
- mutex_unlock(&curseg->curseg_mutex);
+ up_read(&curseg->journal_rwsem);
+ up_read(&nm_i->nat_tree_lock);
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
nm_i->ra_nid_pages, META_NAT, false);
}
+void build_free_nids(struct f2fs_sb_info *sbi, bool sync)
+{
+ mutex_lock(&NM_I(sbi)->build_lock);
+ __build_free_nids(sbi, sync);
+ mutex_unlock(&NM_I(sbi)->build_lock);
+}
+
/*
* If this function returns success, caller can obtain a new nid
* from second parameter of this function.
@@ -1575,40 +1881,35 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i = NULL;
retry:
- if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_ALLOC_NID))
return false;
+#endif
+ spin_lock(&nm_i->nid_list_lock);
- spin_lock(&nm_i->free_nid_list_lock);
+ if (unlikely(nm_i->available_nids == 0)) {
+ spin_unlock(&nm_i->nid_list_lock);
+ return false;
+ }
/* We should not use stale free nids created by build_free_nids */
- if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
- struct node_info ni;
-
- f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
- list_for_each_entry(i, &nm_i->free_nid_list, list)
- if (i->state == NID_NEW)
- break;
-
- f2fs_bug_on(sbi, i->state != NID_NEW);
+ if (nm_i->nid_cnt[FREE_NID_LIST] && !on_build_free_nids(nm_i)) {
+ f2fs_bug_on(sbi, list_empty(&nm_i->nid_list[FREE_NID_LIST]));
+ i = list_first_entry(&nm_i->nid_list[FREE_NID_LIST],
+ struct free_nid, list);
*nid = i->nid;
+
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, true);
i->state = NID_ALLOC;
- nm_i->fcnt--;
- spin_unlock(&nm_i->free_nid_list_lock);
-
- /* check nid is allocated already */
- get_node_info(sbi, *nid, &ni);
- if (ni.blk_addr != NULL_ADDR) {
- alloc_nid_done(sbi, *nid);
- goto retry;
- }
+ __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
+ nm_i->available_nids--;
+ spin_unlock(&nm_i->nid_list_lock);
return true;
}
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
/* Let's scan nat pages and its caches to get free nids */
- mutex_lock(&nm_i->build_lock);
- build_free_nids(sbi);
- mutex_unlock(&nm_i->build_lock);
+ build_free_nids(sbi, true);
goto retry;
}
@@ -1620,11 +1921,11 @@ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
- f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
- __del_from_free_nid_list(nm_i, i);
- spin_unlock(&nm_i->free_nid_list_lock);
+ f2fs_bug_on(sbi, !i);
+ __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
+ spin_unlock(&nm_i->nid_list_lock);
kmem_cache_free(free_nid_slab, i);
}
@@ -1641,17 +1942,22 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
if (!nid)
return;
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
- f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
+ f2fs_bug_on(sbi, !i);
+
if (!available_free_memory(sbi, FREE_NIDS)) {
- __del_from_free_nid_list(nm_i, i);
+ __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
need_free = true;
} else {
+ __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, true);
i->state = NID_NEW;
- nm_i->fcnt++;
+ __insert_nid_to_list(sbi, i, FREE_NID_LIST, false);
}
- spin_unlock(&nm_i->free_nid_list_lock);
+
+ nm_i->available_nids++;
+
+ spin_unlock(&nm_i->nid_list_lock);
if (need_free)
kmem_cache_free(free_nid_slab, i);
@@ -1663,21 +1969,24 @@ int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
struct free_nid *i, *next;
int nr = nr_shrink;
+ if (nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
+ return 0;
+
if (!mutex_trylock(&nm_i->build_lock))
return 0;
- spin_lock(&nm_i->free_nid_list_lock);
- list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
- if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK)
+ spin_lock(&nm_i->nid_list_lock);
+ list_for_each_entry_safe(i, next, &nm_i->nid_list[FREE_NID_LIST],
+ list) {
+ if (nr_shrink <= 0 ||
+ nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
break;
- if (i->state == NID_ALLOC)
- continue;
- __del_from_free_nid_list(nm_i, i);
+
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
kmem_cache_free(free_nid_slab, i);
- nm_i->fcnt--;
nr_shrink--;
}
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
mutex_unlock(&nm_i->build_lock);
return nr - nr_shrink;
@@ -1695,7 +2004,7 @@ void recover_inline_xattr(struct inode *inode, struct page *page)
ri = F2FS_INODE(page);
if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
- clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR);
+ clear_inode_flag(inode, FI_INLINE_XATTR);
goto update_inode;
}
@@ -1703,7 +2012,7 @@ void recover_inline_xattr(struct inode *inode, struct page *page)
src_addr = inline_xattr_addr(page);
inline_size = inline_xattr_size(inode);
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
memcpy(dst_addr, src_addr, inline_size);
update_inode:
update_inode(inode, ipage);
@@ -1733,17 +2042,15 @@ recover_xnid:
if (unlikely(!inc_valid_node_count(sbi, inode)))
f2fs_bug_on(sbi, 1);
- remove_free_nid(NM_I(sbi), new_xnid);
+ remove_free_nid(sbi, new_xnid);
get_node_info(sbi, new_xnid, &ni);
ni.ino = inode->i_ino;
set_node_addr(sbi, &ni, NEW_ADDR, false);
- F2FS_I(inode)->i_xattr_nid = new_xnid;
+ f2fs_i_xnid_write(inode, new_xnid);
/* 3: update xattr blkaddr */
refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
set_node_addr(sbi, &ni, blkaddr, false);
-
- update_inode_page(inode);
}
int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
@@ -1757,15 +2064,18 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
if (unlikely(old_ni.blk_addr != NULL_ADDR))
return -EINVAL;
-
- ipage = grab_cache_page(NODE_MAPPING(sbi), ino);
- if (!ipage)
- return -ENOMEM;
+retry:
+ ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
+ if (!ipage) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
/* Should not use this inode from free nid list */
- remove_free_nid(NM_I(sbi), ino);
+ remove_free_nid(sbi, ino);
- SetPageUptodate(ipage);
+ if (!PageUptodate(ipage))
+ SetPageUptodate(ipage);
fill_node_footer(ipage, ino, ino, 0, true);
src = F2FS_INODE(page);
@@ -1796,7 +2106,6 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
struct f2fs_node *rn;
struct f2fs_summary *sum_entry;
block_t addr;
- int bio_blocks = MAX_BIO_BLOCKS(sbi);
int i, idx, last_offset, nrpages;
/* scan the node segment */
@@ -1805,7 +2114,7 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
sum_entry = &sum->entries[0];
for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
- nrpages = min(last_offset - i, bio_blocks);
+ nrpages = min(last_offset - i, BIO_MAX_PAGES);
/* readahead node pages */
ra_meta_pages(sbi, addr, nrpages, META_POR, true);
@@ -1831,28 +2140,39 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
int i;
- mutex_lock(&curseg->curseg_mutex);
- for (i = 0; i < nats_in_cursum(sum); i++) {
+ down_write(&curseg->journal_rwsem);
+ for (i = 0; i < nats_in_cursum(journal); i++) {
struct nat_entry *ne;
struct f2fs_nat_entry raw_ne;
- nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
+ nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
- raw_ne = nat_in_journal(sum, i);
+ raw_ne = nat_in_journal(journal, i);
- down_write(&nm_i->nat_tree_lock);
ne = __lookup_nat_cache(nm_i, nid);
if (!ne) {
ne = grab_nat_entry(nm_i, nid);
node_info_from_raw_nat(&ne->ni, &raw_ne);
}
+
+ /*
+ * if a free nat in journal has not been used after last
+ * checkpoint, we should remove it from available nids,
+ * since later we will add it again.
+ */
+ if (!get_nat_flag(ne, IS_DIRTY) &&
+ le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
+ spin_lock(&nm_i->nid_list_lock);
+ nm_i->available_nids--;
+ spin_unlock(&nm_i->nid_list_lock);
+ }
+
__set_nat_cache_dirty(nm_i, ne);
- up_write(&nm_i->nat_tree_lock);
}
- update_nats_in_cursum(sum, -i);
- mutex_unlock(&curseg->curseg_mutex);
+ update_nats_in_cursum(journal, -i);
+ up_write(&curseg->journal_rwsem);
}
static void __adjust_nat_entry_set(struct nat_entry_set *nes,
@@ -1877,24 +2197,23 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
struct nat_entry_set *set)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
bool to_journal = true;
struct f2fs_nat_block *nat_blk;
struct nat_entry *ne, *cur;
struct page *page = NULL;
- struct f2fs_nm_info *nm_i = NM_I(sbi);
/*
* there are two steps to flush nat entries:
* #1, flush nat entries to journal in current hot data summary block.
* #2, flush nat entries to nat page.
*/
- if (!__has_cursum_space(sum, set->entry_cnt, NAT_JOURNAL))
+ if (!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
to_journal = false;
if (to_journal) {
- mutex_lock(&curseg->curseg_mutex);
+ down_write(&curseg->journal_rwsem);
} else {
page = get_next_nat_page(sbi, start_nid);
nat_blk = page_address(page);
@@ -1911,35 +2230,33 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
continue;
if (to_journal) {
- offset = lookup_journal_in_cursum(sum,
+ offset = lookup_journal_in_cursum(journal,
NAT_JOURNAL, nid, 1);
f2fs_bug_on(sbi, offset < 0);
- raw_ne = &nat_in_journal(sum, offset);
- nid_in_journal(sum, offset) = cpu_to_le32(nid);
+ raw_ne = &nat_in_journal(journal, offset);
+ nid_in_journal(journal, offset) = cpu_to_le32(nid);
} else {
raw_ne = &nat_blk->entries[nid - start_nid];
}
raw_nat_from_node_info(raw_ne, &ne->ni);
-
- down_write(&NM_I(sbi)->nat_tree_lock);
nat_reset_flag(ne);
__clear_nat_cache_dirty(NM_I(sbi), ne);
- up_write(&NM_I(sbi)->nat_tree_lock);
-
- if (nat_get_blkaddr(ne) == NULL_ADDR)
+ if (nat_get_blkaddr(ne) == NULL_ADDR) {
add_free_nid(sbi, nid, false);
+ spin_lock(&NM_I(sbi)->nid_list_lock);
+ NM_I(sbi)->available_nids++;
+ spin_unlock(&NM_I(sbi)->nid_list_lock);
+ }
}
if (to_journal)
- mutex_unlock(&curseg->curseg_mutex);
+ up_write(&curseg->journal_rwsem);
else
f2fs_put_page(page, 1);
f2fs_bug_on(sbi, set->entry_cnt);
- down_write(&nm_i->nat_tree_lock);
radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
- up_write(&nm_i->nat_tree_lock);
kmem_cache_free(nat_entry_set_slab, set);
}
@@ -1950,7 +2267,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
struct nat_entry_set *setvec[SETVEC_SIZE];
struct nat_entry_set *set, *tmp;
unsigned int found;
@@ -1959,29 +2276,32 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
if (!nm_i->dirty_nat_cnt)
return;
+
+ down_write(&nm_i->nat_tree_lock);
+
/*
* if there are no enough space in journal to store dirty nat
* entries, remove all entries from journal and merge them
* into nat entry set.
*/
- if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL))
+ if (!__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
remove_nats_in_journal(sbi);
- down_write(&nm_i->nat_tree_lock);
while ((found = __gang_lookup_nat_set(nm_i,
set_idx, SETVEC_SIZE, setvec))) {
unsigned idx;
set_idx = setvec[found - 1]->set + 1;
for (idx = 0; idx < found; idx++)
__adjust_nat_entry_set(setvec[idx], &sets,
- MAX_NAT_JENTRIES(sum));
+ MAX_NAT_JENTRIES(journal));
}
- up_write(&nm_i->nat_tree_lock);
/* flush dirty nats in nat entry set */
list_for_each_entry_safe(set, tmp, &sets, set_list)
__flush_nat_entry_set(sbi, set);
+ up_write(&nm_i->nat_tree_lock);
+
f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
}
@@ -2001,20 +2321,24 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
/* not used nids: 0, node, meta, (and root counted as valid node) */
- nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM;
- nm_i->fcnt = 0;
+ nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
+ F2FS_RESERVED_NODE_NUM;
+ nm_i->nid_cnt[FREE_NID_LIST] = 0;
+ nm_i->nid_cnt[ALLOC_NID_LIST] = 0;
nm_i->nat_cnt = 0;
nm_i->ram_thresh = DEF_RAM_THRESHOLD;
nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
+ nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
- INIT_LIST_HEAD(&nm_i->free_nid_list);
+ INIT_LIST_HEAD(&nm_i->nid_list[FREE_NID_LIST]);
+ INIT_LIST_HEAD(&nm_i->nid_list[ALLOC_NID_LIST]);
INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
INIT_LIST_HEAD(&nm_i->nat_entries);
mutex_init(&nm_i->build_lock);
- spin_lock_init(&nm_i->free_nid_list_lock);
+ spin_lock_init(&nm_i->nid_list_lock);
init_rwsem(&nm_i->nat_tree_lock);
nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
@@ -2042,7 +2366,7 @@ int build_node_manager(struct f2fs_sb_info *sbi)
if (err)
return err;
- build_free_nids(sbi);
+ build_free_nids(sbi, true);
return 0;
}
@@ -2059,17 +2383,18 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
return;
/* destroy free nid list */
- spin_lock(&nm_i->free_nid_list_lock);
- list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
- f2fs_bug_on(sbi, i->state == NID_ALLOC);
- __del_from_free_nid_list(nm_i, i);
- nm_i->fcnt--;
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
+ list_for_each_entry_safe(i, next_i, &nm_i->nid_list[FREE_NID_LIST],
+ list) {
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
+ spin_unlock(&nm_i->nid_list_lock);
kmem_cache_free(free_nid_slab, i);
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
}
- f2fs_bug_on(sbi, nm_i->fcnt);
- spin_unlock(&nm_i->free_nid_list_lock);
+ f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID_LIST]);
+ f2fs_bug_on(sbi, nm_i->nid_cnt[ALLOC_NID_LIST]);
+ f2fs_bug_on(sbi, !list_empty(&nm_i->nid_list[ALLOC_NID_LIST]));
+ spin_unlock(&nm_i->nid_list_lock);
/* destroy nat cache */
down_write(&nm_i->nat_tree_lock);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index e4fffd2d98c4..e7997e240366 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -15,15 +15,21 @@
#define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
/* # of pages to perform synchronous readahead before building free nids */
-#define FREE_NID_PAGES 4
+#define FREE_NID_PAGES 8
+#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
-#define DEF_RA_NID_PAGES 4 /* # of nid pages to be readaheaded */
+#define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */
/* maximum readahead size for node during getting data blocks */
#define MAX_RA_NODE 128
/* control the memory footprint threshold (10MB per 1GB ram) */
-#define DEF_RAM_THRESHOLD 10
+#define DEF_RAM_THRESHOLD 1
+
+/* control dirty nats ratio threshold (default: 10% over max nid count) */
+#define DEF_DIRTY_NAT_RATIO_THRESHOLD 10
+/* control total # of nats */
+#define DEF_NAT_CACHE_THRESHOLD 100000
/* vector size for gang look-up from nat cache that consists of radix tree */
#define NATVEC_SIZE 64
@@ -117,6 +123,17 @@ static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
raw_ne->version = ni->version;
}
+static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
+{
+ return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid *
+ NM_I(sbi)->dirty_nats_ratio / 100;
+}
+
+static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
+{
+ return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
+}
+
enum mem_type {
FREE_NIDS, /* indicates the free nid list */
NAT_ENTRIES, /* indicates the cached nat entry */
@@ -152,14 +169,15 @@ static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *fnid;
- spin_lock(&nm_i->free_nid_list_lock);
- if (nm_i->fcnt <= 0) {
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
+ if (nm_i->nid_cnt[FREE_NID_LIST] <= 0) {
+ spin_unlock(&nm_i->nid_list_lock);
return;
}
- fnid = list_entry(nm_i->free_nid_list.next, struct free_nid, list);
+ fnid = list_entry(nm_i->nid_list[FREE_NID_LIST].next,
+ struct free_nid, list);
*nid = fnid->nid;
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
}
/*
@@ -183,7 +201,7 @@ static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
block_addr = (pgoff_t)(nm_i->nat_blkaddr +
(seg_off << sbi->log_blocks_per_seg << 1) +
- (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
+ (block_off & (sbi->blocks_per_seg - 1)));
if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
block_addr += sbi->blocks_per_seg;
@@ -212,6 +230,37 @@ static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
f2fs_change_bit(block_off, nm_i->nat_bitmap);
}
+static inline nid_t ino_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le32_to_cpu(rn->footer.ino);
+}
+
+static inline nid_t nid_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le32_to_cpu(rn->footer.nid);
+}
+
+static inline unsigned int ofs_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ unsigned flag = le32_to_cpu(rn->footer.flag);
+ return flag >> OFFSET_BIT_SHIFT;
+}
+
+static inline __u64 cpver_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le64_to_cpu(rn->footer.cp_ver);
+}
+
+static inline block_t next_blkaddr_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le32_to_cpu(rn->footer.next_blkaddr);
+}
+
static inline void fill_node_footer(struct page *page, nid_t nid,
nid_t ino, unsigned int ofs, bool reset)
{
@@ -242,40 +291,30 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
struct f2fs_node *rn = F2FS_NODE(page);
+ size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
+ __u64 cp_ver = le64_to_cpu(ckpt->checkpoint_ver);
- rn->footer.cp_ver = ckpt->checkpoint_ver;
+ if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
+ __u64 crc = le32_to_cpu(*((__le32 *)
+ ((unsigned char *)ckpt + crc_offset)));
+ cp_ver |= (crc << 32);
+ }
+ rn->footer.cp_ver = cpu_to_le64(cp_ver);
rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
}
-static inline nid_t ino_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le32_to_cpu(rn->footer.ino);
-}
-
-static inline nid_t nid_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le32_to_cpu(rn->footer.nid);
-}
-
-static inline unsigned int ofs_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- unsigned flag = le32_to_cpu(rn->footer.flag);
- return flag >> OFFSET_BIT_SHIFT;
-}
-
-static inline unsigned long long cpver_of_node(struct page *node_page)
+static inline bool is_recoverable_dnode(struct page *page)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le64_to_cpu(rn->footer.cp_ver);
-}
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
+ size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
+ __u64 cp_ver = cur_cp_version(ckpt);
-static inline block_t next_blkaddr_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le32_to_cpu(rn->footer.next_blkaddr);
+ if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
+ __u64 crc = le32_to_cpu(*((__le32 *)
+ ((unsigned char *)ckpt + crc_offset)));
+ cp_ver |= (crc << 32);
+ }
+ return cp_ver == cpver_of_node(page);
}
/*
@@ -317,17 +356,17 @@ static inline bool IS_DNODE(struct page *node_page)
return true;
}
-static inline void set_nid(struct page *p, int off, nid_t nid, bool i)
+static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
{
struct f2fs_node *rn = F2FS_NODE(p);
- f2fs_wait_on_page_writeback(p, NODE);
+ f2fs_wait_on_page_writeback(p, NODE, true);
if (i)
rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
else
rn->in.nid[off] = cpu_to_le32(nid);
- set_page_dirty(p);
+ return set_page_dirty(p);
}
static inline nid_t get_nid(struct page *p, int off, bool i)
@@ -370,6 +409,21 @@ static inline int is_node(struct page *page, int type)
#define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT)
#define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT)
+static inline int is_inline_node(struct page *page)
+{
+ return PageChecked(page);
+}
+
+static inline void set_inline_node(struct page *page)
+{
+ SetPageChecked(page);
+}
+
+static inline void clear_inline_node(struct page *page)
+{
+ ClearPageChecked(page);
+}
+
static inline void set_cold_node(struct inode *inode, struct page *page)
{
struct f2fs_node *rn = F2FS_NODE(page);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index cbf74f47cce8..983c35da6bce 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -49,8 +49,9 @@ static struct kmem_cache *fsync_entry_slab;
bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
- if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
- > sbi->user_block_count)
+ s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
+
+ if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
return false;
return true;
}
@@ -67,42 +68,71 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
return NULL;
}
-static int recover_dentry(struct inode *inode, struct page *ipage)
+static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
+ struct list_head *head, nid_t ino)
+{
+ struct inode *inode;
+ struct fsync_inode_entry *entry;
+
+ inode = f2fs_iget_retry(sbi->sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
+ entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
+ entry->inode = inode;
+ list_add_tail(&entry->list, head);
+
+ return entry;
+}
+
+static void del_fsync_inode(struct fsync_inode_entry *entry)
+{
+ iput(entry->inode);
+ list_del(&entry->list);
+ kmem_cache_free(fsync_entry_slab, entry);
+}
+
+static int recover_dentry(struct inode *inode, struct page *ipage,
+ struct list_head *dir_list)
{
struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
nid_t pino = le32_to_cpu(raw_inode->i_pino);
struct f2fs_dir_entry *de;
- struct qstr name;
+ struct fscrypt_name fname;
struct page *page;
struct inode *dir, *einode;
+ struct fsync_inode_entry *entry;
int err = 0;
+ char *name;
- dir = f2fs_iget(inode->i_sb, pino);
- if (IS_ERR(dir)) {
- err = PTR_ERR(dir);
- goto out;
+ entry = get_fsync_inode(dir_list, pino);
+ if (!entry) {
+ entry = add_fsync_inode(F2FS_I_SB(inode), dir_list, pino);
+ if (IS_ERR(entry)) {
+ dir = ERR_CAST(entry);
+ err = PTR_ERR(entry);
+ goto out;
+ }
}
- if (file_enc_name(inode)) {
- iput(dir);
- return 0;
- }
+ dir = entry->inode;
- name.len = le32_to_cpu(raw_inode->i_namelen);
- name.name = raw_inode->i_name;
+ memset(&fname, 0, sizeof(struct fscrypt_name));
+ fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
+ fname.disk_name.name = raw_inode->i_name;
- if (unlikely(name.len > F2FS_NAME_LEN)) {
+ if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
WARN_ON(1);
err = -ENAMETOOLONG;
- goto out_err;
+ goto out;
}
retry:
- de = f2fs_find_entry(dir, &name, &page);
+ de = __f2fs_find_entry(dir, &fname, &page);
if (de && inode->i_ino == le32_to_cpu(de->ino))
goto out_unmap_put;
if (de) {
- einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
+ einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
if (IS_ERR(einode)) {
WARN_ON(1);
err = PTR_ERR(einode);
@@ -118,29 +148,27 @@ retry:
f2fs_delete_entry(de, page, dir, einode);
iput(einode);
goto retry;
- }
- err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
- if (err)
- goto out_err;
-
- if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) {
- iput(dir);
+ } else if (IS_ERR(page)) {
+ err = PTR_ERR(page);
} else {
- add_dirty_dir_inode(dir);
- set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
+ err = __f2fs_do_add_link(dir, &fname, inode,
+ inode->i_ino, inode->i_mode);
}
-
+ if (err == -ENOMEM)
+ goto retry;
goto out;
out_unmap_put:
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
-out_err:
- iput(dir);
out:
+ if (file_enc_name(inode))
+ name = "<encrypted>";
+ else
+ name = raw_inode->i_name;
f2fs_msg(inode->i_sb, KERN_NOTICE,
"%s: ino = %x, name = %s, dir = %lx, err = %d",
- __func__, ino_of_node(ipage), raw_inode->i_name,
+ __func__, ino_of_node(ipage), name,
IS_ERR(dir) ? 0 : dir->i_ino, err);
return err;
}
@@ -151,14 +179,16 @@ static void recover_inode(struct inode *inode, struct page *page)
char *name;
inode->i_mode = le16_to_cpu(raw->i_mode);
- i_size_write(inode, le64_to_cpu(raw->i_size));
- inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
+ f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
+ inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
- inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
+ inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
+ F2FS_I(inode)->i_advise = raw->i_advise;
+
if (file_enc_name(inode))
name = "<encrypted>";
else
@@ -170,7 +200,6 @@ static void recover_inode(struct inode *inode, struct page *page)
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
- unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
struct page *page = NULL;
block_t blkaddr;
@@ -180,8 +209,6 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
- ra_meta_pages(sbi, blkaddr, 1, META_POR, true);
-
while (1) {
struct fsync_inode_entry *entry;
@@ -190,7 +217,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
page = get_tmp_page(sbi, blkaddr);
- if (cp_ver != cpver_of_node(page))
+ if (!is_recoverable_dnode(page))
break;
if (!is_fsync_dnode(page))
@@ -204,35 +231,24 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
break;
}
- /* add this fsync inode to the list */
- entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
- if (!entry) {
- err = -ENOMEM;
- break;
- }
/*
* CP | dnode(F) | inode(DF)
* For this case, we should not give up now.
*/
- entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
- if (IS_ERR(entry->inode)) {
- err = PTR_ERR(entry->inode);
- kmem_cache_free(fsync_entry_slab, entry);
+ entry = add_fsync_inode(sbi, head, ino_of_node(page));
+ if (IS_ERR(entry)) {
+ err = PTR_ERR(entry);
if (err == -ENOENT) {
err = 0;
goto next;
}
break;
}
- list_add_tail(&entry->list, head);
}
entry->blkaddr = blkaddr;
- if (IS_INODE(page)) {
- entry->last_inode = blkaddr;
- if (is_dent_dnode(page))
- entry->last_dentry = blkaddr;
- }
+ if (IS_INODE(page) && is_dent_dnode(page))
+ entry->last_dentry = blkaddr;
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
@@ -248,11 +264,8 @@ static void destroy_fsync_dnodes(struct list_head *head)
{
struct fsync_inode_entry *entry, *tmp;
- list_for_each_entry_safe(entry, tmp, head, list) {
- iput(entry->inode);
- list_del(&entry->list);
- kmem_cache_free(fsync_entry_slab, entry);
- }
+ list_for_each_entry_safe(entry, tmp, head, list)
+ del_fsync_inode(entry);
}
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
@@ -276,7 +289,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
return 0;
/* Get the previous summary */
- for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
+ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
struct curseg_info *curseg = CURSEG_I(sbi, i);
if (curseg->segno == segno) {
sum = curseg->sum_blk->entries[blkoff];
@@ -314,15 +327,14 @@ got_it:
if (ino != dn->inode->i_ino) {
/* Deallocate previous index in the node page */
- inode = f2fs_iget(sbi->sb, ino);
+ inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode))
return PTR_ERR(inode);
} else {
inode = dn->inode;
}
- bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
- le16_to_cpu(sum.ofs_in_node);
+ bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
/*
* if inode page is locked, unlock temporarily, but its reference
@@ -357,10 +369,9 @@ truncate_out:
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
struct page *page, block_t blkaddr)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
- unsigned int start, end;
struct dnode_of_data dn;
struct node_info ni;
+ unsigned int start, end;
int err = 0, recovered = 0;
/* step 1: recover xattr */
@@ -380,16 +391,21 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
goto out;
/* step 3: recover data indices */
- start = start_bidx_of_node(ofs_of_node(page), fi);
- end = start + ADDRS_PER_PAGE(page, fi);
+ start = start_bidx_of_node(ofs_of_node(page), inode);
+ end = start + ADDRS_PER_PAGE(page, inode);
set_new_dnode(&dn, inode, NULL, NULL, 0);
-
+retry_dn:
err = get_dnode_of_data(&dn, start, ALLOC_NODE);
- if (err)
+ if (err) {
+ if (err == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry_dn;
+ }
goto out;
+ }
- f2fs_wait_on_page_writeback(dn.node_page, NODE);
+ f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
get_node_info(sbi, dn.nid, &ni);
f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
@@ -411,14 +427,17 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
continue;
}
+ if (!file_keep_isize(inode) &&
+ (i_size_read(inode) <= (start << PAGE_SHIFT)))
+ f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);
+
/*
* dest is reserved block, invalidate src block
* and then reserve one new block in dnode page.
*/
if (dest == NEW_ADDR) {
truncate_data_blocks_range(&dn, 1);
- err = reserve_new_block(&dn);
- f2fs_bug_on(sbi, err);
+ reserve_new_block(&dn);
continue;
}
@@ -427,25 +446,33 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
if (src == NULL_ADDR) {
err = reserve_new_block(&dn);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ while (err)
+ err = reserve_new_block(&dn);
+#endif
/* We should not get -ENOSPC */
f2fs_bug_on(sbi, err);
+ if (err)
+ goto err;
}
-
+retry_prev:
/* Check the previous node page having this index */
err = check_index_in_prev_nodes(sbi, dest, &dn);
- if (err)
+ if (err) {
+ if (err == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry_prev;
+ }
goto err;
+ }
/* write dummy data page */
f2fs_replace_block(sbi, &dn, src, dest,
- ni.version, false);
+ ni.version, false, false);
recovered++;
}
}
- if (IS_INODE(dn.node_page))
- sync_inode_page(&dn);
-
copy_node_footer(dn.node_page, page);
fill_node_footer(dn.node_page, dn.nid, ni.ino,
ofs_of_node(page), false);
@@ -454,22 +481,23 @@ err:
f2fs_put_dnode(&dn);
out:
f2fs_msg(sbi->sb, KERN_NOTICE,
- "recover_data: ino = %lx, recovered = %d blocks, err = %d",
- inode->i_ino, recovered, err);
+ "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
+ inode->i_ino,
+ file_keep_isize(inode) ? "keep" : "recover",
+ recovered, err);
return err;
}
-static int recover_data(struct f2fs_sb_info *sbi,
- struct list_head *head, int type)
+static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
+ struct list_head *dir_list)
{
- unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
struct page *page = NULL;
int err = 0;
block_t blkaddr;
/* get node pages in the current segment */
- curseg = CURSEG_I(sbi, type);
+ curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
while (1) {
@@ -482,12 +510,12 @@ static int recover_data(struct f2fs_sb_info *sbi,
page = get_tmp_page(sbi, blkaddr);
- if (cp_ver != cpver_of_node(page)) {
+ if (!is_recoverable_dnode(page)) {
f2fs_put_page(page, 1);
break;
}
- entry = get_fsync_inode(head, ino_of_node(page));
+ entry = get_fsync_inode(inode_list, ino_of_node(page));
if (!entry)
goto next;
/*
@@ -495,10 +523,10 @@ static int recover_data(struct f2fs_sb_info *sbi,
* In this case, we can lose the latest inode(x).
* So, call recover_inode for the inode update.
*/
- if (entry->last_inode == blkaddr)
+ if (IS_INODE(page))
recover_inode(entry->inode, page);
if (entry->last_dentry == blkaddr) {
- err = recover_dentry(entry->inode, page);
+ err = recover_dentry(entry->inode, page, dir_list);
if (err) {
f2fs_put_page(page, 1);
break;
@@ -510,11 +538,8 @@ static int recover_data(struct f2fs_sb_info *sbi,
break;
}
- if (entry->blkaddr == blkaddr) {
- iput(entry->inode);
- list_del(&entry->list);
- kmem_cache_free(fsync_entry_slab, entry);
- }
+ if (entry->blkaddr == blkaddr)
+ del_fsync_inode(entry);
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
@@ -525,12 +550,14 @@ next:
return err;
}
-int recover_fsync_data(struct f2fs_sb_info *sbi)
+int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
struct list_head inode_list;
+ struct list_head dir_list;
block_t blkaddr;
int err;
+ int ret = 0;
bool need_writecp = false;
fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
@@ -539,6 +566,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
return -ENOMEM;
INIT_LIST_HEAD(&inode_list);
+ INIT_LIST_HEAD(&dir_list);
/* prevent checkpoint */
mutex_lock(&sbi->cp_mutex);
@@ -547,25 +575,26 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
/* step #1: find fsynced inode numbers */
err = find_fsync_dnodes(sbi, &inode_list);
- if (err)
+ if (err || list_empty(&inode_list))
goto out;
- if (list_empty(&inode_list))
+ if (check_only) {
+ ret = 1;
goto out;
+ }
need_writecp = true;
/* step #2: recover data */
- err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
+ err = recover_data(sbi, &inode_list, &dir_list);
if (!err)
f2fs_bug_on(sbi, !list_empty(&inode_list));
out:
destroy_fsync_dnodes(&inode_list);
- kmem_cache_destroy(fsync_entry_slab);
/* truncate meta pages to be used by the recovery */
truncate_inode_pages_range(META_MAPPING(sbi),
- (loff_t)MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
+ (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
if (err) {
truncate_inode_pages_final(NODE_MAPPING(sbi));
@@ -573,31 +602,20 @@ out:
}
clear_sbi_flag(sbi, SBI_POR_DOING);
- if (err) {
- bool invalidate = false;
-
- if (discard_next_dnode(sbi, blkaddr))
- invalidate = true;
-
- /* Flush all the NAT/SIT pages */
- while (get_pages(sbi, F2FS_DIRTY_META))
- sync_meta_pages(sbi, META, LONG_MAX);
+ if (err)
+ set_ckpt_flags(sbi, CP_ERROR_FLAG);
+ mutex_unlock(&sbi->cp_mutex);
- /* invalidate temporary meta page */
- if (invalidate)
- invalidate_mapping_pages(META_MAPPING(sbi),
- blkaddr, blkaddr);
+ /* let's drop all the directory inodes for clean checkpoint */
+ destroy_fsync_dnodes(&dir_list);
- set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
- mutex_unlock(&sbi->cp_mutex);
- } else if (need_writecp) {
+ if (!err && need_writecp) {
struct cp_control cpc = {
.reason = CP_RECOVERY,
};
- mutex_unlock(&sbi->cp_mutex);
- write_checkpoint(sbi, &cpc);
- } else {
- mutex_unlock(&sbi->cp_mutex);
+ err = write_checkpoint(sbi, &cpc);
}
- return err;
+
+ kmem_cache_destroy(fsync_entry_slab);
+ return ret ? ret: err;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 7965957dd0e6..a288de069164 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -86,6 +86,7 @@ static inline unsigned long __reverse_ffs(unsigned long word)
/*
* __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
* f2fs_set_bit makes MSB and LSB reversed in a byte.
+ * @size must be integral times of unsigned long.
* Example:
* MSB <--> LSB
* f2fs_set_bit(0, bitmap) => 1000 0000
@@ -95,94 +96,73 @@ static unsigned long __find_rev_next_bit(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
const unsigned long *p = addr + BIT_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG - 1);
+ unsigned long result = size;
unsigned long tmp;
if (offset >= size)
return size;
- size -= result;
+ size -= (offset & ~(BITS_PER_LONG - 1));
offset %= BITS_PER_LONG;
- if (!offset)
- goto aligned;
-
- tmp = __reverse_ulong((unsigned char *)p);
- tmp &= ~0UL >> offset;
-
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp)
- goto found_middle;
-
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- p++;
-aligned:
- while (size & ~(BITS_PER_LONG-1)) {
+
+ while (1) {
+ if (*p == 0)
+ goto pass;
+
tmp = __reverse_ulong((unsigned char *)p);
+
+ tmp &= ~0UL >> offset;
+ if (size < BITS_PER_LONG)
+ tmp &= (~0UL << (BITS_PER_LONG - size));
if (tmp)
- goto found_middle;
- result += BITS_PER_LONG;
+ goto found;
+pass:
+ if (size <= BITS_PER_LONG)
+ break;
size -= BITS_PER_LONG;
+ offset = 0;
p++;
}
- if (!size)
- return result;
-
- tmp = __reverse_ulong((unsigned char *)p);
-found_first:
- tmp &= (~0UL << (BITS_PER_LONG - size));
- if (!tmp) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __reverse_ffs(tmp);
+ return result;
+found:
+ return result - size + __reverse_ffs(tmp);
}
static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
const unsigned long *p = addr + BIT_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG - 1);
+ unsigned long result = size;
unsigned long tmp;
if (offset >= size)
return size;
- size -= result;
+ size -= (offset & ~(BITS_PER_LONG - 1));
offset %= BITS_PER_LONG;
- if (!offset)
- goto aligned;
-
- tmp = __reverse_ulong((unsigned char *)p);
- tmp |= ~((~0UL << offset) >> offset);
-
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp != ~0UL)
- goto found_middle;
-
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- p++;
-aligned:
- while (size & ~(BITS_PER_LONG - 1)) {
+
+ while (1) {
+ if (*p == ~0UL)
+ goto pass;
+
tmp = __reverse_ulong((unsigned char *)p);
+
+ if (offset)
+ tmp |= ~0UL << (BITS_PER_LONG - offset);
+ if (size < BITS_PER_LONG)
+ tmp |= ~0UL >> size;
if (tmp != ~0UL)
- goto found_middle;
- result += BITS_PER_LONG;
+ goto found;
+pass:
+ if (size <= BITS_PER_LONG)
+ break;
size -= BITS_PER_LONG;
+ offset = 0;
p++;
}
- if (!size)
- return result;
-
- tmp = __reverse_ulong((unsigned char *)p);
-found_first:
- tmp |= ~(~0UL << (BITS_PER_LONG - size));
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
-found_middle:
- return result + __reverse_ffz(tmp);
+ return result;
+found:
+ return result - size + __reverse_ffz(tmp);
}
void register_inmem_page(struct inode *inode, struct page *page)
@@ -211,69 +191,150 @@ void register_inmem_page(struct inode *inode, struct page *page)
trace_f2fs_register_inmem_page(page, INMEM);
}
-int commit_inmem_pages(struct inode *inode, bool abort)
+static int __revoke_inmem_pages(struct inode *inode,
+ struct list_head *head, bool drop, bool recover)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct inmem_pages *cur, *tmp;
+ int err = 0;
+
+ list_for_each_entry_safe(cur, tmp, head, list) {
+ struct page *page = cur->page;
+
+ if (drop)
+ trace_f2fs_commit_inmem_page(page, INMEM_DROP);
+
+ lock_page(page);
+
+ if (recover) {
+ struct dnode_of_data dn;
+ struct node_info ni;
+
+ trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) {
+ err = -EAGAIN;
+ goto next;
+ }
+ get_node_info(sbi, dn.nid, &ni);
+ f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
+ cur->old_addr, ni.version, true, true);
+ f2fs_put_dnode(&dn);
+ }
+next:
+ /* we don't need to invalidate this in the sccessful status */
+ if (drop || recover)
+ ClearPageUptodate(page);
+ set_page_private(page, 0);
+ ClearPagePrivate(page);
+ f2fs_put_page(page, 1);
+
+ list_del(&cur->list);
+ kmem_cache_free(inmem_entry_slab, cur);
+ dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
+ }
+ return err;
+}
+
+void drop_inmem_pages(struct inode *inode)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
+
+ mutex_lock(&fi->inmem_lock);
+ __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
+ mutex_unlock(&fi->inmem_lock);
+}
+
+static int __commit_inmem_pages(struct inode *inode,
+ struct list_head *revoke_list)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct inmem_pages *cur, *tmp;
- bool submit_bio = false;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = DATA,
.rw = WRITE_SYNC | REQ_PRIO,
.encrypted_page = NULL,
};
+ bool submit_bio = false;
int err = 0;
- /*
- * The abort is true only when f2fs_evict_inode is called.
- * Basically, the f2fs_evict_inode doesn't produce any data writes, so
- * that we don't need to call f2fs_balance_fs.
- * Otherwise, f2fs_gc in f2fs_balance_fs can wait forever until this
- * inode becomes free by iget_locked in f2fs_iget.
- */
- if (!abort) {
- f2fs_balance_fs(sbi);
- f2fs_lock_op(sbi);
- }
-
- mutex_lock(&fi->inmem_lock);
list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
- lock_page(cur->page);
- if (!abort) {
- if (cur->page->mapping == inode->i_mapping) {
- set_page_dirty(cur->page);
- f2fs_wait_on_page_writeback(cur->page, DATA);
- if (clear_page_dirty_for_io(cur->page))
- inode_dec_dirty_pages(inode);
- trace_f2fs_commit_inmem_page(cur->page, INMEM);
- fio.page = cur->page;
- err = do_write_data_page(&fio);
- if (err) {
- unlock_page(cur->page);
- break;
- }
- clear_cold_data(cur->page);
- submit_bio = true;
+ struct page *page = cur->page;
+
+ lock_page(page);
+ if (page->mapping == inode->i_mapping) {
+ trace_f2fs_commit_inmem_page(page, INMEM);
+
+ set_page_dirty(page);
+ f2fs_wait_on_page_writeback(page, DATA, true);
+ if (clear_page_dirty_for_io(page)) {
+ inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
}
- } else {
- trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP);
+
+ fio.page = page;
+ err = do_write_data_page(&fio);
+ if (err) {
+ unlock_page(page);
+ break;
+ }
+
+ /* record old blkaddr for revoking */
+ cur->old_addr = fio.old_blkaddr;
+
+ submit_bio = true;
}
- set_page_private(cur->page, 0);
- ClearPagePrivate(cur->page);
- f2fs_put_page(cur->page, 1);
+ unlock_page(page);
+ list_move_tail(&cur->list, revoke_list);
+ }
- list_del(&cur->list);
- kmem_cache_free(inmem_entry_slab, cur);
- dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
+ if (submit_bio)
+ f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
+
+ if (!err)
+ __revoke_inmem_pages(inode, revoke_list, false, false);
+
+ return err;
+}
+
+int commit_inmem_pages(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct list_head revoke_list;
+ int err;
+
+ INIT_LIST_HEAD(&revoke_list);
+ f2fs_balance_fs(sbi, true);
+ f2fs_lock_op(sbi);
+
+ mutex_lock(&fi->inmem_lock);
+ err = __commit_inmem_pages(inode, &revoke_list);
+ if (err) {
+ int ret;
+ /*
+ * try to revoke all committed pages, but still we could fail
+ * due to no memory or other reason, if that happened, EAGAIN
+ * will be returned, which means in such case, transaction is
+ * already not integrity, caller should use journal to do the
+ * recovery or rewrite & commit last transaction. For other
+ * error number, revoking was done by filesystem itself.
+ */
+ ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
+ if (ret)
+ err = ret;
+
+ /* drop all uncommitted pages */
+ __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
}
mutex_unlock(&fi->inmem_lock);
- if (!abort) {
- f2fs_unlock_op(sbi);
- if (submit_bio)
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
- }
+ f2fs_unlock_op(sbi);
return err;
}
@@ -281,15 +342,27 @@ int commit_inmem_pages(struct inode *inode, bool abort)
* This function balances dirty node and dentry pages.
* In addition, it controls garbage collection.
*/
-void f2fs_balance_fs(struct f2fs_sb_info *sbi)
+void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_CHECKPOINT))
+ f2fs_stop_checkpoint(sbi, false);
+#endif
+
+ if (!need)
+ return;
+
+ /* balance_fs_bg is able to be pending */
+ if (excess_cached_nats(sbi))
+ f2fs_balance_fs_bg(sbi);
+
/*
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.
*/
- if (has_not_enough_free_secs(sbi, 0)) {
+ if (has_not_enough_free_secs(sbi, 0, 0)) {
mutex_lock(&sbi->gc_mutex);
- f2fs_gc(sbi, false);
+ f2fs_gc(sbi, false, false);
}
}
@@ -304,14 +377,55 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
if (!available_free_memory(sbi, FREE_NIDS))
- try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES);
+ try_to_free_nids(sbi, MAX_FREE_NIDS);
+ else
+ build_free_nids(sbi, false);
+
+ if (!is_idle(sbi))
+ return;
/* checkpoint is the only way to shrink partial cached entries */
if (!available_free_memory(sbi, NAT_ENTRIES) ||
- excess_prefree_segs(sbi) ||
!available_free_memory(sbi, INO_ENTRIES) ||
- jiffies > sbi->cp_expires)
+ excess_prefree_segs(sbi) ||
+ excess_dirty_nats(sbi) ||
+ f2fs_time_over(sbi, CP_TIME)) {
+ if (test_opt(sbi, DATA_FLUSH)) {
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
+ sync_dirty_inodes(sbi, FILE_INODE);
+ blk_finish_plug(&plug);
+ }
f2fs_sync_fs(sbi->sb, true);
+ stat_inc_bg_cp_count(sbi->stat_info);
+ }
+}
+
+static int __submit_flush_wait(struct block_device *bdev)
+{
+ struct bio *bio = f2fs_bio_alloc(0);
+ int ret;
+
+ bio->bi_bdev = bdev;
+ ret = submit_bio_wait(WRITE_FLUSH, bio);
+ bio_put(bio);
+ return ret;
+}
+
+static int submit_flush_wait(struct f2fs_sb_info *sbi)
+{
+ int ret = __submit_flush_wait(sbi->sb->s_bdev);
+ int i;
+
+ if (sbi->s_ndevs && !ret) {
+ for (i = 1; i < sbi->s_ndevs; i++) {
+ ret = __submit_flush_wait(FDEV(i).bdev);
+ if (ret)
+ break;
+ }
+ }
+ return ret;
}
static int issue_flush_thread(void *data)
@@ -324,24 +438,18 @@ repeat:
return 0;
if (!llist_empty(&fcc->issue_list)) {
- struct bio *bio;
struct flush_cmd *cmd, *next;
int ret;
- bio = f2fs_bio_alloc(0);
-
fcc->dispatch_list = llist_del_all(&fcc->issue_list);
fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
- bio->bi_bdev = sbi->sb->s_bdev;
- ret = submit_bio_wait(WRITE_FLUSH, bio);
-
+ ret = submit_flush_wait(sbi);
llist_for_each_entry_safe(cmd, next,
fcc->dispatch_list, llnode) {
cmd->ret = ret;
complete(&cmd->wait);
}
- bio_put(bio);
fcc->dispatch_list = NULL;
}
@@ -361,24 +469,30 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
if (test_opt(sbi, NOBARRIER))
return 0;
- if (!test_opt(sbi, FLUSH_MERGE)) {
- struct bio *bio = f2fs_bio_alloc(0);
+ if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) {
int ret;
- bio->bi_bdev = sbi->sb->s_bdev;
- ret = submit_bio_wait(WRITE_FLUSH, bio);
- bio_put(bio);
+ atomic_inc(&fcc->submit_flush);
+ ret = submit_flush_wait(sbi);
+ atomic_dec(&fcc->submit_flush);
return ret;
}
init_completion(&cmd.wait);
+ atomic_inc(&fcc->submit_flush);
llist_add(&cmd.llnode, &fcc->issue_list);
if (!fcc->dispatch_list)
wake_up(&fcc->flush_wait_queue);
- wait_for_completion(&cmd.wait);
+ if (fcc->f2fs_issue_flush) {
+ wait_for_completion(&cmd.wait);
+ atomic_dec(&fcc->submit_flush);
+ } else {
+ llist_del_all(&fcc->issue_list);
+ atomic_set(&fcc->submit_flush, 0);
+ }
return cmd.ret;
}
@@ -389,12 +503,19 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
struct flush_cmd_control *fcc;
int err = 0;
+ if (SM_I(sbi)->cmd_control_info) {
+ fcc = SM_I(sbi)->cmd_control_info;
+ goto init_thread;
+ }
+
fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
if (!fcc)
return -ENOMEM;
+ atomic_set(&fcc->submit_flush, 0);
init_waitqueue_head(&fcc->flush_wait_queue);
init_llist_head(&fcc->issue_list);
SM_I(sbi)->cmd_control_info = fcc;
+init_thread:
fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(fcc->f2fs_issue_flush)) {
@@ -407,14 +528,20 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
return err;
}
-void destroy_flush_cmd_control(struct f2fs_sb_info *sbi)
+void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
{
struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
- if (fcc && fcc->f2fs_issue_flush)
- kthread_stop(fcc->f2fs_issue_flush);
- kfree(fcc);
- SM_I(sbi)->cmd_control_info = NULL;
+ if (fcc && fcc->f2fs_issue_flush) {
+ struct task_struct *flush_thread = fcc->f2fs_issue_flush;
+
+ fcc->f2fs_issue_flush = NULL;
+ kthread_stop(flush_thread);
+ }
+ if (free) {
+ kfree(fcc);
+ SM_I(sbi)->cmd_control_info = NULL;
+ }
}
static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
@@ -493,46 +620,107 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
mutex_unlock(&dirty_i->seglist_lock);
}
-static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
- block_t blkstart, block_t blklen)
+#ifdef CONFIG_BLK_DEV_ZONED
+static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkstart, block_t blklen)
+{
+ sector_t nr_sects = SECTOR_FROM_BLOCK(blklen);
+ sector_t sector;
+ int devi = 0;
+
+ if (sbi->s_ndevs) {
+ devi = f2fs_target_device_index(sbi, blkstart);
+ blkstart -= FDEV(devi).start_blk;
+ }
+ sector = SECTOR_FROM_BLOCK(blkstart);
+
+ if (sector & (bdev_zone_size(bdev) - 1) ||
+ nr_sects != bdev_zone_size(bdev)) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "(%d) %s: Unaligned discard attempted (block %x + %x)",
+ devi, sbi->s_ndevs ? FDEV(devi).path: "",
+ blkstart, blklen);
+ return -EIO;
+ }
+
+ /*
+ * We need to know the type of the zone: for conventional zones,
+ * use regular discard if the drive supports it. For sequential
+ * zones, reset the zone write pointer.
+ */
+ switch (get_blkz_type(sbi, bdev, blkstart)) {
+
+ case BLK_ZONE_TYPE_CONVENTIONAL:
+ if (!blk_queue_discard(bdev_get_queue(bdev)))
+ return 0;
+ return blkdev_issue_discard(bdev, sector, nr_sects,
+ GFP_NOFS, 0);
+ case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ case BLK_ZONE_TYPE_SEQWRITE_PREF:
+ trace_f2fs_issue_reset_zone(sbi->sb, blkstart);
+ return blkdev_reset_zones(bdev, sector,
+ nr_sects, GFP_NOFS);
+ default:
+ /* Unknown zone type: broken device ? */
+ return -EIO;
+ }
+}
+#endif
+
+static int __issue_discard_async(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkstart, block_t blklen)
{
sector_t start = SECTOR_FROM_BLOCK(blkstart);
sector_t len = SECTOR_FROM_BLOCK(blklen);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
+ bdev_zoned_model(bdev) != BLK_ZONED_NONE)
+ return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
+#endif
+ return blkdev_issue_discard(bdev, start, len, GFP_NOFS, 0);
+}
+
+static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
+ block_t blkstart, block_t blklen)
+{
+ sector_t start = blkstart, len = 0;
+ struct block_device *bdev;
struct seg_entry *se;
unsigned int offset;
block_t i;
+ int err = 0;
+
+ bdev = f2fs_target_device(sbi, blkstart, NULL);
+
+ for (i = blkstart; i < blkstart + blklen; i++, len++) {
+ if (i != start) {
+ struct block_device *bdev2 =
+ f2fs_target_device(sbi, i, NULL);
+
+ if (bdev2 != bdev) {
+ err = __issue_discard_async(sbi, bdev,
+ start, len);
+ if (err)
+ return err;
+ bdev = bdev2;
+ start = i;
+ len = 0;
+ }
+ }
- for (i = blkstart; i < blkstart + blklen; i++) {
se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
offset = GET_BLKOFF_FROM_SEG0(sbi, i);
if (!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
}
- trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
- return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
-}
-
-bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
-{
- int err = -ENOTSUPP;
-
- if (test_opt(sbi, DISCARD)) {
- struct seg_entry *se = get_seg_entry(sbi,
- GET_SEGNO(sbi, blkaddr));
- unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
- if (f2fs_test_bit(offset, se->discard_map))
- return false;
-
- err = f2fs_issue_discard(sbi, blkaddr, 1);
- }
+ if (len)
+ err = __issue_discard_async(sbi, bdev, start, len);
- if (err) {
- update_meta_page(sbi, NULL, blkaddr);
- return true;
- }
- return false;
+ trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
+ return err;
}
static void __add_discard_entry(struct f2fs_sb_info *sbi,
@@ -573,7 +761,7 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
bool force = (cpc->reason == CP_DISCARD);
int i;
- if (se->valid_blocks == max_blocks)
+ if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
return;
if (!force) {
@@ -593,6 +781,10 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
break;
end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
+ if (force && start && end != max_blocks
+ && (end - start) < cpc->trim_minlen)
+ continue;
+
__add_discard_entry(sbi, cpc, se, start, end);
}
}
@@ -630,6 +822,8 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
unsigned int start = 0, end = -1;
+ unsigned int secno, start_segno;
+ bool force = (cpc->reason == CP_DISCARD);
mutex_lock(&dirty_i->seglist_lock);
@@ -646,17 +840,31 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
dirty_i->nr_dirty[PRE] -= end - start;
- if (!test_opt(sbi, DISCARD))
+ if (force || !test_opt(sbi, DISCARD))
continue;
- f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
+ if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
+ f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
(end - start) << sbi->log_blocks_per_seg);
+ continue;
+ }
+next:
+ secno = GET_SECNO(sbi, start);
+ start_segno = secno * sbi->segs_per_sec;
+ if (!IS_CURSEC(sbi, secno) &&
+ !get_valid_blocks(sbi, start, sbi->segs_per_sec))
+ f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
+ sbi->segs_per_sec << sbi->log_blocks_per_seg);
+
+ start = start_segno + sbi->segs_per_sec;
+ if (start < end)
+ goto next;
}
mutex_unlock(&dirty_i->seglist_lock);
/* send small discards */
list_for_each_entry_safe(entry, this, head, list) {
- if (cpc->reason == CP_DISCARD && entry->len < cpc->trim_minlen)
+ if (force && entry->len < cpc->trim_minlen)
goto skip;
f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
cpc->trimmed += entry->len;
@@ -711,12 +919,14 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
if (del > 0) {
if (f2fs_test_and_set_bit(offset, se->cur_valid_map))
f2fs_bug_on(sbi, 1);
- if (!f2fs_test_and_set_bit(offset, se->discard_map))
+ if (f2fs_discard_en(sbi) &&
+ !f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
} else {
if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map))
f2fs_bug_on(sbi, 1);
- if (f2fs_test_and_clear_bit(offset, se->discard_map))
+ if (f2fs_discard_en(sbi) &&
+ f2fs_test_and_clear_bit(offset, se->discard_map))
sbi->discard_blks++;
}
if (!f2fs_test_bit(offset, se->ckpt_valid_map))
@@ -817,12 +1027,12 @@ int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
}
}
- sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
+ sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
SUM_FOOTER_SIZE) / SUMMARY_SIZE;
if (valid_sum_count <= sum_in_page)
return 1;
else if ((valid_sum_count - sum_in_page) <=
- (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
+ (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
return 2;
return 3;
}
@@ -841,9 +1051,9 @@ void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
void *dst = page_address(page);
if (src)
- memcpy(dst, src, PAGE_CACHE_SIZE);
+ memcpy(dst, src, PAGE_SIZE);
else
- memset(dst, 0, PAGE_CACHE_SIZE);
+ memset(dst, 0, PAGE_SIZE);
set_page_dirty(page);
f2fs_put_page(page, 1);
}
@@ -854,6 +1064,31 @@ static void write_sum_page(struct f2fs_sb_info *sbi,
update_meta_page(sbi, (void *)sum_blk, blk_addr);
}
+static void write_current_sum_page(struct f2fs_sb_info *sbi,
+ int type, block_t blk_addr)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ struct page *page = grab_meta_page(sbi, blk_addr);
+ struct f2fs_summary_block *src = curseg->sum_blk;
+ struct f2fs_summary_block *dst;
+
+ dst = (struct f2fs_summary_block *)page_address(page);
+
+ mutex_lock(&curseg->curseg_mutex);
+
+ down_read(&curseg->journal_rwsem);
+ memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
+ up_read(&curseg->journal_rwsem);
+
+ memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
+ memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
+
+ mutex_unlock(&curseg->curseg_mutex);
+
+ set_page_dirty(page);
+ f2fs_put_page(page, 1);
+}
+
static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
@@ -886,9 +1121,8 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
segno = find_next_zero_bit(free_i->free_segmap,
- MAIN_SEGS(sbi), *newseg + 1);
- if (segno - *newseg < sbi->segs_per_sec -
- (*newseg % sbi->segs_per_sec))
+ (hint + 1) * sbi->segs_per_sec, *newseg + 1);
+ if (segno < (hint + 1) * sbi->segs_per_sec)
goto got_it;
}
find_other_zone:
@@ -1071,7 +1305,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
struct curseg_info *curseg = CURSEG_I(sbi, type);
const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
- if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
+ if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0, 0))
return v_ops->get_victim(sbi,
&(curseg)->next_segno, BG_GC, type, SSR);
@@ -1106,22 +1340,21 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
stat_inc_seg_type(sbi, curseg);
}
-static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type)
-{
- struct curseg_info *curseg = CURSEG_I(sbi, type);
- unsigned int old_segno;
-
- old_segno = curseg->segno;
- SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
- locate_dirty_segment(sbi, old_segno);
-}
-
void allocate_new_segments(struct f2fs_sb_info *sbi)
{
+ struct curseg_info *curseg;
+ unsigned int old_segno;
int i;
- for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
- __allocate_new_segments(sbi, i);
+ if (test_opt(sbi, LFS))
+ return;
+
+ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
+ curseg = CURSEG_I(sbi, i);
+ old_segno = curseg->segno;
+ SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
+ locate_dirty_segment(sbi, old_segno);
+ }
}
static const struct segment_allocation default_salloc_ops = {
@@ -1134,6 +1367,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
unsigned int start_segno, end_segno;
struct cp_control cpc;
+ int err = 0;
if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
return -EINVAL;
@@ -1142,6 +1376,12 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
if (end <= MAIN_BLKADDR(sbi))
goto out;
+ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "Found FS corruption, run fsck to fix.");
+ goto out;
+ }
+
/* start/end segment number in main_area */
start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
@@ -1164,12 +1404,16 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
sbi->segs_per_sec) - 1, end_segno);
mutex_lock(&sbi->gc_mutex);
- write_checkpoint(sbi, &cpc);
+ err = write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex);
+ if (err)
+ break;
+
+ schedule();
}
out:
range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
- return 0;
+ return err;
}
static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
@@ -1244,21 +1488,11 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
struct f2fs_summary *sum, int type)
{
struct sit_info *sit_i = SIT_I(sbi);
- struct curseg_info *curseg;
- bool direct_io = (type == CURSEG_DIRECT_IO);
-
- type = direct_io ? CURSEG_WARM_DATA : type;
-
- curseg = CURSEG_I(sbi, type);
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex);
mutex_lock(&sit_i->sentry_lock);
- /* direct_io'ed data is aligned to the segment for better performance */
- if (direct_io && curseg->next_blkoff &&
- !has_not_enough_free_secs(sbi, 0))
- __allocate_new_segments(sbi, type);
-
*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
/*
@@ -1292,11 +1526,17 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
{
int type = __get_segment_type(fio->page, fio->type);
- allocate_data_block(fio->sbi, fio->page, fio->blk_addr,
- &fio->blk_addr, sum, type);
+ if (fio->type == NODE || fio->type == DATA)
+ mutex_lock(&fio->sbi->wio_mutex[fio->type]);
+
+ allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
+ &fio->new_blkaddr, sum, type);
/* writeout dirty page into bdev */
f2fs_submit_page_mbio(fio);
+
+ if (fio->type == NODE || fio->type == DATA)
+ mutex_unlock(&fio->sbi->wio_mutex[fio->type]);
}
void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
@@ -1305,7 +1545,8 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
.sbi = sbi,
.type = META,
.rw = WRITE_SYNC | REQ_META | REQ_PRIO,
- .blk_addr = page->index,
+ .old_blkaddr = page->index,
+ .new_blkaddr = page->index,
.page = page,
.encrypted_page = NULL,
};
@@ -1335,19 +1576,19 @@ void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
do_write_page(&sum, fio);
- dn->data_blkaddr = fio->blk_addr;
+ f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
}
void rewrite_data_page(struct f2fs_io_info *fio)
{
+ fio->new_blkaddr = fio->old_blkaddr;
stat_inc_inplace_blocks(fio->sbi);
f2fs_submit_page_mbio(fio);
}
-static void __f2fs_replace_block(struct f2fs_sb_info *sbi,
- struct f2fs_summary *sum,
+void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
block_t old_blkaddr, block_t new_blkaddr,
- bool recover_curseg)
+ bool recover_curseg, bool recover_newaddr)
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg;
@@ -1390,7 +1631,7 @@ static void __f2fs_replace_block(struct f2fs_sb_info *sbi,
curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
__add_sum_entry(sbi, type, sum);
- if (!recover_curseg)
+ if (!recover_curseg || recover_newaddr)
update_sit_entry(sbi, new_blkaddr, 1);
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
update_sit_entry(sbi, old_blkaddr, -1);
@@ -1414,66 +1655,30 @@ static void __f2fs_replace_block(struct f2fs_sb_info *sbi,
void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
block_t old_addr, block_t new_addr,
- unsigned char version, bool recover_curseg)
+ unsigned char version, bool recover_curseg,
+ bool recover_newaddr)
{
struct f2fs_summary sum;
set_summary(&sum, dn->nid, dn->ofs_in_node, version);
- __f2fs_replace_block(sbi, &sum, old_addr, new_addr, recover_curseg);
-
- dn->data_blkaddr = new_addr;
- set_data_blkaddr(dn);
- f2fs_update_extent_cache(dn);
-}
-
-static inline bool is_merged_page(struct f2fs_sb_info *sbi,
- struct page *page, enum page_type type)
-{
- enum page_type btype = PAGE_TYPE_OF_BIO(type);
- struct f2fs_bio_info *io = &sbi->write_io[btype];
- struct bio_vec *bvec;
- struct page *target;
- int i;
-
- down_read(&io->io_rwsem);
- if (!io->bio) {
- up_read(&io->io_rwsem);
- return false;
- }
-
- bio_for_each_segment_all(bvec, io->bio, i) {
-
- if (bvec->bv_page->mapping) {
- target = bvec->bv_page;
- } else {
- struct f2fs_crypto_ctx *ctx;
-
- /* encrypted page */
- ctx = (struct f2fs_crypto_ctx *)page_private(
- bvec->bv_page);
- target = ctx->w.control_page;
- }
-
- if (page == target) {
- up_read(&io->io_rwsem);
- return true;
- }
- }
+ __f2fs_replace_block(sbi, &sum, old_addr, new_addr,
+ recover_curseg, recover_newaddr);
- up_read(&io->io_rwsem);
- return false;
+ f2fs_update_data_blkaddr(dn, new_addr);
}
void f2fs_wait_on_page_writeback(struct page *page,
- enum page_type type)
+ enum page_type type, bool ordered)
{
if (PageWriteback(page)) {
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
- if (is_merged_page(sbi, page, type))
- f2fs_submit_merged_bio(sbi, type, WRITE);
- wait_on_page_writeback(page);
+ f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, type, WRITE);
+ if (ordered)
+ wait_on_page_writeback(page);
+ else
+ wait_for_stable_page(page);
}
}
@@ -1482,14 +1687,12 @@ void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
{
struct page *cpage;
- if (blkaddr == NEW_ADDR)
+ if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
return;
- f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
-
cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
if (cpage) {
- f2fs_wait_on_page_writeback(cpage, DATA);
+ f2fs_wait_on_page_writeback(cpage, DATA, true);
f2fs_put_page(cpage, 1);
}
}
@@ -1510,12 +1713,11 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
/* Step 1: restore nat cache */
seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
- memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
+ memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
/* Step 2: restore sit cache */
seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
- memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
- SUM_JOURNAL_SIZE);
+ memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
offset = 2 * SUM_JOURNAL_SIZE;
/* Step 3: restore summary entries */
@@ -1539,7 +1741,7 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
s = (struct f2fs_summary *)(kaddr + offset);
seg_i->sum_blk->entries[j] = *s;
offset += SUMMARY_SIZE;
- if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+ if (offset + SUMMARY_SIZE <= PAGE_SIZE -
SUM_FOOTER_SIZE)
continue;
@@ -1611,7 +1813,14 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
/* set uncompleted segment to curseg */
curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex);
- memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
+
+ /* update journal info */
+ down_write(&curseg->journal_rwsem);
+ memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
+ up_write(&curseg->journal_rwsem);
+
+ memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
+ memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
curseg->next_segno = segno;
reset_curseg(sbi, type, 0);
curseg->alloc_type = ckpt->alloc_type[type];
@@ -1623,14 +1832,10 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
{
- struct f2fs_summary_block *s_sits =
- CURSEG_I(sbi, CURSEG_COLD_DATA)->sum_blk;
- struct f2fs_summary_block *s_nats =
- CURSEG_I(sbi, CURSEG_HOT_DATA)->sum_blk;
int type = CURSEG_HOT_DATA;
int err;
- if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
+ if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
int npages = npages_for_summary_flush(sbi, true);
if (npages >= 2)
@@ -1653,11 +1858,6 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
return err;
}
- /* sanity check for summary blocks */
- if (nats_in_cursum(s_nats) > NAT_JOURNAL_ENTRIES ||
- sits_in_cursum(s_sits) > SIT_JOURNAL_ENTRIES)
- return -EINVAL;
-
return 0;
}
@@ -1675,13 +1875,12 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
/* Step 1: write nat cache */
seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
- memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
+ memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
written_size += SUM_JOURNAL_SIZE;
/* Step 2: write sit cache */
seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
- memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
- SUM_JOURNAL_SIZE);
+ memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
written_size += SUM_JOURNAL_SIZE;
/* Step 3: write summary entries */
@@ -1703,7 +1902,7 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
*summary = seg_i->sum_blk->entries[j];
written_size += SUMMARY_SIZE;
- if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+ if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
SUM_FOOTER_SIZE)
continue;
@@ -1727,17 +1926,13 @@ static void write_normal_summaries(struct f2fs_sb_info *sbi,
else
end = type + NR_CURSEG_NODE_TYPE;
- for (i = type; i < end; i++) {
- struct curseg_info *sum = CURSEG_I(sbi, i);
- mutex_lock(&sum->curseg_mutex);
- write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
- mutex_unlock(&sum->curseg_mutex);
- }
+ for (i = type; i < end; i++)
+ write_current_sum_page(sbi, i, blkaddr + (i - type));
}
void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
{
- if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
+ if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
write_compacted_summaries(sbi, start_blk);
else
write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
@@ -1748,24 +1943,24 @@ void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
}
-int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
+int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
unsigned int val, int alloc)
{
int i;
if (type == NAT_JOURNAL) {
- for (i = 0; i < nats_in_cursum(sum); i++) {
- if (le32_to_cpu(nid_in_journal(sum, i)) == val)
+ for (i = 0; i < nats_in_cursum(journal); i++) {
+ if (le32_to_cpu(nid_in_journal(journal, i)) == val)
return i;
}
- if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
- return update_nats_in_cursum(sum, 1);
+ if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
+ return update_nats_in_cursum(journal, 1);
} else if (type == SIT_JOURNAL) {
- for (i = 0; i < sits_in_cursum(sum); i++)
- if (le32_to_cpu(segno_in_journal(sum, i)) == val)
+ for (i = 0; i < sits_in_cursum(journal); i++)
+ if (le32_to_cpu(segno_in_journal(journal, i)) == val)
return i;
- if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
- return update_sits_in_cursum(sum, 1);
+ if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
+ return update_sits_in_cursum(journal, 1);
}
return -1;
}
@@ -1794,7 +1989,7 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
src_addr = page_address(src_page);
dst_addr = page_address(dst_page);
- memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+ memcpy(dst_addr, src_addr, PAGE_SIZE);
set_page_dirty(dst_page);
f2fs_put_page(src_page, 1);
@@ -1869,20 +2064,22 @@ static void add_sits_in_set(struct f2fs_sb_info *sbi)
static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
int i;
- for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
+ down_write(&curseg->journal_rwsem);
+ for (i = 0; i < sits_in_cursum(journal); i++) {
unsigned int segno;
bool dirtied;
- segno = le32_to_cpu(segno_in_journal(sum, i));
+ segno = le32_to_cpu(segno_in_journal(journal, i));
dirtied = __mark_sit_entry_dirty(sbi, segno);
if (!dirtied)
add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
}
- update_sits_in_cursum(sum, -sits_in_cursum(sum));
+ update_sits_in_cursum(journal, -i);
+ up_write(&curseg->journal_rwsem);
}
/*
@@ -1894,13 +2091,12 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct sit_info *sit_i = SIT_I(sbi);
unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
struct sit_entry_set *ses, *tmp;
struct list_head *head = &SM_I(sbi)->sit_entry_set;
bool to_journal = true;
struct seg_entry *se;
- mutex_lock(&curseg->curseg_mutex);
mutex_lock(&sit_i->sentry_lock);
if (!sit_i->dirty_sentries)
@@ -1917,7 +2113,7 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
* entries, remove all entries from journal and add and account
* them in sit entry set.
*/
- if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL))
+ if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
remove_sits_in_journal(sbi);
/*
@@ -1934,10 +2130,12 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
unsigned int segno = start_segno;
if (to_journal &&
- !__has_cursum_space(sum, ses->entry_cnt, SIT_JOURNAL))
+ !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
to_journal = false;
- if (!to_journal) {
+ if (to_journal) {
+ down_write(&curseg->journal_rwsem);
+ } else {
page = get_next_sit_page(sbi, start_segno);
raw_sit = page_address(page);
}
@@ -1955,13 +2153,13 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
}
if (to_journal) {
- offset = lookup_journal_in_cursum(sum,
+ offset = lookup_journal_in_cursum(journal,
SIT_JOURNAL, segno, 1);
f2fs_bug_on(sbi, offset < 0);
- segno_in_journal(sum, offset) =
+ segno_in_journal(journal, offset) =
cpu_to_le32(segno);
seg_info_to_raw_sit(se,
- &sit_in_journal(sum, offset));
+ &sit_in_journal(journal, offset));
} else {
sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
seg_info_to_raw_sit(se,
@@ -1973,7 +2171,9 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
ses->entry_cnt--;
}
- if (!to_journal)
+ if (to_journal)
+ up_write(&curseg->journal_rwsem);
+ else
f2fs_put_page(page, 1);
f2fs_bug_on(sbi, ses->entry_cnt);
@@ -1988,7 +2188,6 @@ out:
add_discard_addrs(sbi, cpc);
}
mutex_unlock(&sit_i->sentry_lock);
- mutex_unlock(&curseg->curseg_mutex);
set_prefree_as_free_segments(sbi);
}
@@ -1996,7 +2195,6 @@ out:
static int build_sit_info(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
- struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct sit_info *sit_i;
unsigned int sit_segs, start;
char *src_bitmap, *dst_bitmap;
@@ -2024,12 +2222,16 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
sit_i->sentries[start].ckpt_valid_map
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
- sit_i->sentries[start].discard_map
- = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->sentries[start].cur_valid_map ||
- !sit_i->sentries[start].ckpt_valid_map ||
- !sit_i->sentries[start].discard_map)
+ !sit_i->sentries[start].ckpt_valid_map)
return -ENOMEM;
+
+ if (f2fs_discard_en(sbi)) {
+ sit_i->sentries[start].discard_map
+ = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ if (!sit_i->sentries[start].discard_map)
+ return -ENOMEM;
+ }
}
sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
@@ -2059,7 +2261,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
- sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
+ sit_i->written_valid_blocks = 0;
sit_i->sit_bitmap = dst_bitmap;
sit_i->bitmap_size = bitmap_size;
sit_i->dirty_sentries = 0;
@@ -2117,9 +2319,14 @@ static int build_curseg(struct f2fs_sb_info *sbi)
for (i = 0; i < NR_CURSEG_TYPE; i++) {
mutex_init(&array[i].curseg_mutex);
- array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+ array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!array[i].sum_blk)
return -ENOMEM;
+ init_rwsem(&array[i].journal_rwsem);
+ array[i].journal = kzalloc(sizeof(struct f2fs_journal),
+ GFP_KERNEL);
+ if (!array[i].journal)
+ return -ENOMEM;
array[i].segno = NULL_SEGNO;
array[i].next_blkoff = 0;
}
@@ -2130,54 +2337,73 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
+ struct seg_entry *se;
+ struct f2fs_sit_entry sit;
int sit_blk_cnt = SIT_BLK_CNT(sbi);
unsigned int i, start, end;
unsigned int readed, start_blk = 0;
- int nrpages = MAX_BIO_BLOCKS(sbi);
do {
- readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT, true);
+ readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
+ META_SIT, true);
start = start_blk * sit_i->sents_per_block;
end = (start_blk + readed) * sit_i->sents_per_block;
for (; start < end && start < MAIN_SEGS(sbi); start++) {
- struct seg_entry *se = &sit_i->sentries[start];
struct f2fs_sit_block *sit_blk;
- struct f2fs_sit_entry sit;
struct page *page;
- mutex_lock(&curseg->curseg_mutex);
- for (i = 0; i < sits_in_cursum(sum); i++) {
- if (le32_to_cpu(segno_in_journal(sum, i))
- == start) {
- sit = sit_in_journal(sum, i);
- mutex_unlock(&curseg->curseg_mutex);
- goto got_it;
- }
- }
- mutex_unlock(&curseg->curseg_mutex);
-
+ se = &sit_i->sentries[start];
page = get_current_sit_page(sbi, start);
sit_blk = (struct f2fs_sit_block *)page_address(page);
sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
f2fs_put_page(page, 1);
-got_it:
+
check_block_count(sbi, start, &sit);
seg_info_from_raw_sit(se, &sit);
/* build discard map only one time */
- memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
- sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks;
-
- if (sbi->segs_per_sec > 1) {
- struct sec_entry *e = get_sec_entry(sbi, start);
- e->valid_blocks += se->valid_blocks;
+ if (f2fs_discard_en(sbi)) {
+ memcpy(se->discard_map, se->cur_valid_map,
+ SIT_VBLOCK_MAP_SIZE);
+ sbi->discard_blks += sbi->blocks_per_seg -
+ se->valid_blocks;
}
+
+ if (sbi->segs_per_sec > 1)
+ get_sec_entry(sbi, start)->valid_blocks +=
+ se->valid_blocks;
}
start_blk += readed;
} while (start_blk < sit_blk_cnt);
+
+ down_read(&curseg->journal_rwsem);
+ for (i = 0; i < sits_in_cursum(journal); i++) {
+ unsigned int old_valid_blocks;
+
+ start = le32_to_cpu(segno_in_journal(journal, i));
+ se = &sit_i->sentries[start];
+ sit = sit_in_journal(journal, i);
+
+ old_valid_blocks = se->valid_blocks;
+
+ check_block_count(sbi, start, &sit);
+ seg_info_from_raw_sit(se, &sit);
+
+ if (f2fs_discard_en(sbi)) {
+ memcpy(se->discard_map, se->cur_valid_map,
+ SIT_VBLOCK_MAP_SIZE);
+ sbi->discard_blks += old_valid_blocks -
+ se->valid_blocks;
+ }
+
+ if (sbi->segs_per_sec > 1)
+ get_sec_entry(sbi, start)->valid_blocks +=
+ se->valid_blocks - old_valid_blocks;
+ }
+ up_read(&curseg->journal_rwsem);
}
static void init_free_segmap(struct f2fs_sb_info *sbi)
@@ -2189,6 +2415,9 @@ static void init_free_segmap(struct f2fs_sb_info *sbi)
struct seg_entry *sentry = get_seg_entry(sbi, start);
if (!sentry->valid_blocks)
__set_free(sbi, start);
+ else
+ SIT_I(sbi)->written_valid_blocks +=
+ sentry->valid_blocks;
}
/* set use the current segments */
@@ -2310,7 +2539,11 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
sm_info->rec_prefree_segments = sm_info->main_segments *
DEF_RECLAIM_PREFREE_SEGMENTS / 100;
- sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
+ if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
+ sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
+
+ if (!test_opt(sbi, LFS))
+ sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
@@ -2392,8 +2625,10 @@ static void destroy_curseg(struct f2fs_sb_info *sbi)
if (!array)
return;
SM_I(sbi)->curseg_array = NULL;
- for (i = 0; i < NR_CURSEG_TYPE; i++)
+ for (i = 0; i < NR_CURSEG_TYPE; i++) {
kfree(array[i].sum_blk);
+ kfree(array[i].journal);
+ }
kfree(array);
}
@@ -2440,7 +2675,7 @@ void destroy_segment_manager(struct f2fs_sb_info *sbi)
if (!sm_info)
return;
- destroy_flush_cmd_control(sbi);
+ destroy_flush_cmd_control(sbi, true);
destroy_dirty_segmap(sbi);
destroy_curseg(sbi);
destroy_free_segmap(sbi);
@@ -2459,7 +2694,7 @@ int __init create_segment_manager_caches(void)
sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
sizeof(struct sit_entry_set));
if (!sit_entry_set_slab)
- goto destory_discard_entry;
+ goto destroy_discard_entry;
inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
sizeof(struct inmem_pages));
@@ -2469,7 +2704,7 @@ int __init create_segment_manager_caches(void)
destroy_sit_entry_set:
kmem_cache_destroy(sit_entry_set_slab);
-destory_discard_entry:
+destroy_discard_entry:
kmem_cache_destroy(discard_entry_slab);
fail:
return -ENOMEM;
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index ee44d346ea44..9d44ce83acb2 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -16,6 +16,9 @@
#define NULL_SECNO ((unsigned int)(~0))
#define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
+#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
+
+#define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
/* L: Logical segment # in volume, R: Relative segment # in main area */
#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
@@ -101,8 +104,6 @@
(((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
#define SECTOR_TO_BLOCK(sectors) \
(sectors >> F2FS_LOG_SECTORS_PER_BLOCK)
-#define MAX_BIO_BLOCKS(sbi) \
- ((int)min((int)max_hw_blocks(sbi), BIO_MAX_PAGES))
/*
* indicate a block allocation direction: RIGHT and LEFT.
@@ -158,16 +159,17 @@ struct victim_sel_policy {
};
struct seg_entry {
- unsigned short valid_blocks; /* # of valid blocks */
+ unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */
+ unsigned int valid_blocks:10; /* # of valid blocks */
+ unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */
+ unsigned int padding:6; /* padding */
unsigned char *cur_valid_map; /* validity bitmap of blocks */
/*
* # of valid blocks and the validity bitmap stored in the the last
* checkpoint pack. This information is used by the SSR mode.
*/
- unsigned short ckpt_valid_blocks;
- unsigned char *ckpt_valid_map;
+ unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */
unsigned char *discard_map;
- unsigned char type; /* segment type like CURSEG_XXX_TYPE */
unsigned long long mtime; /* modification time of the segment */
};
@@ -183,7 +185,7 @@ struct segment_allocation {
* this value is set in page as a private data which indicate that
* the page is atomically written, and it is in inmem_pages list.
*/
-#define ATOMIC_WRITTEN_PAGE 0x0000ffff
+#define ATOMIC_WRITTEN_PAGE ((unsigned long)-1)
#define IS_ATOMIC_WRITTEN_PAGE(page) \
(page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
@@ -191,6 +193,7 @@ struct segment_allocation {
struct inmem_pages {
struct list_head list;
struct page *page;
+ block_t old_addr; /* for revoking when fail to commit */
};
struct sit_info {
@@ -257,6 +260,8 @@ struct victim_selection {
struct curseg_info {
struct mutex curseg_mutex; /* lock for consistency */
struct f2fs_summary_block *sum_blk; /* cached summary block */
+ struct rw_semaphore journal_rwsem; /* protect journal area */
+ struct f2fs_journal *journal; /* cached journal info */
unsigned char alloc_type; /* current allocation type */
unsigned int segno; /* current segment number */
unsigned short next_blkoff; /* next block offset to write */
@@ -466,20 +471,28 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi)
{
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
- return free_sections(sbi) <= (node_secs + 2 * dent_secs +
+ int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
+
+ if (test_opt(sbi, LFS))
+ return false;
+
+ return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
reserved_sections(sbi) + 1);
}
-static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
+static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
+ int freed, int needed)
{
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
+ int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false;
- return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
- reserved_sections(sbi));
+ return (free_sections(sbi) + freed) <=
+ (node_secs + 2 * dent_secs + imeta_secs +
+ reserved_sections(sbi) + needed);
}
static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
@@ -527,6 +540,9 @@ static inline bool need_inplace_update(struct inode *inode)
if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
return false;
+ if (test_opt(sbi, LFS))
+ return false;
+
if (policy & (0x1 << F2FS_IPU_FORCE))
return true;
if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
@@ -540,7 +556,7 @@ static inline bool need_inplace_update(struct inode *inode)
/* this is only set during fdatasync */
if (policy & (0x1 << F2FS_IPU_FSYNC) &&
- is_inode_flag_set(F2FS_I(inode), FI_NEED_IPU))
+ is_inode_flag_set(inode, FI_NEED_IPU))
return true;
return false;
@@ -573,8 +589,8 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
{
- f2fs_bug_on(sbi, blk_addr < SEG0_BLKADDR(sbi)
- || blk_addr >= MAX_BLKADDR(sbi));
+ BUG_ON(blk_addr < SEG0_BLKADDR(sbi)
+ || blk_addr >= MAX_BLKADDR(sbi));
}
/*
@@ -680,13 +696,6 @@ static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
return false;
}
-static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)
-{
- struct block_device *bdev = sbi->sb->s_bdev;
- struct request_queue *q = bdev_get_queue(bdev);
- return SECTOR_TO_BLOCK(queue_max_sectors(q));
-}
-
/*
* It is very important to gather dirty pages and write at once, so that we can
* submit a big bio without interfering other data writes.
@@ -702,9 +711,9 @@ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
if (type == DATA)
return sbi->blocks_per_seg;
else if (type == NODE)
- return 3 * sbi->blocks_per_seg;
+ return 8 * sbi->blocks_per_seg;
else if (type == META)
- return MAX_BIO_BLOCKS(sbi);
+ return 8 * BIO_MAX_PAGES;
else
return 0;
}
@@ -721,13 +730,9 @@ static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
return 0;
nr_to_write = wbc->nr_to_write;
-
- if (type == DATA)
- desired = 4096;
- else if (type == NODE)
- desired = 3 * max_hw_blocks(sbi);
- else
- desired = MAX_BIO_BLOCKS(sbi);
+ desired = BIO_MAX_PAGES;
+ if (type == NODE)
+ desired <<= 1;
wbc->nr_to_write = desired;
return desired - nr_to_write;
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index da0d8e0b55a5..5c60fc28ec75 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -13,6 +13,7 @@
#include <linux/f2fs_fs.h>
#include "f2fs.h"
+#include "node.h"
static LIST_HEAD(f2fs_list);
static DEFINE_SPINLOCK(f2fs_list_lock);
@@ -20,19 +21,22 @@ static unsigned int shrinker_run_no;
static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
{
- return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
+ long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
+
+ return count > 0 ? count : 0;
}
static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
{
- if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK)
- return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK;
- return 0;
+ long count = NM_I(sbi)->nid_cnt[FREE_NID_LIST] - MAX_FREE_NIDS;
+
+ return count > 0 ? count : 0;
}
static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
{
- return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node);
+ return atomic_read(&sbi->total_zombie_tree) +
+ atomic_read(&sbi->total_ext_node);
}
unsigned long f2fs_shrink_count(struct shrinker *shrink,
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 5d3e745d33ae..e6d8d011786c 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -39,6 +39,35 @@ static struct proc_dir_entry *f2fs_proc_root;
static struct kmem_cache *f2fs_inode_cachep;
static struct kset *f2fs_kset;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+
+char *fault_name[FAULT_MAX] = {
+ [FAULT_KMALLOC] = "kmalloc",
+ [FAULT_PAGE_ALLOC] = "page alloc",
+ [FAULT_ALLOC_NID] = "alloc nid",
+ [FAULT_ORPHAN] = "orphan",
+ [FAULT_BLOCK] = "no more block",
+ [FAULT_DIR_DEPTH] = "too big dir depth",
+ [FAULT_EVICT_INODE] = "evict_inode fail",
+ [FAULT_IO] = "IO error",
+ [FAULT_CHECKPOINT] = "checkpoint error",
+};
+
+static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
+ unsigned int rate)
+{
+ struct f2fs_fault_info *ffi = &sbi->fault_info;
+
+ if (rate) {
+ atomic_set(&ffi->inject_ops, 0);
+ ffi->inject_rate = rate;
+ ffi->inject_type = (1 << FAULT_MAX) - 1;
+ } else {
+ memset(ffi, 0, sizeof(struct f2fs_fault_info));
+ }
+}
+#endif
+
/* f2fs-wide shrinker description */
static struct shrinker f2fs_shrinker_info = {
.scan_objects = f2fs_shrink_scan,
@@ -51,6 +80,7 @@ enum {
Opt_disable_roll_forward,
Opt_norecovery,
Opt_discard,
+ Opt_nodiscard,
Opt_noheap,
Opt_user_xattr,
Opt_nouser_xattr,
@@ -61,12 +91,19 @@ enum {
Opt_inline_xattr,
Opt_inline_data,
Opt_inline_dentry,
+ Opt_noinline_dentry,
Opt_flush_merge,
+ Opt_noflush_merge,
Opt_nobarrier,
Opt_fastboot,
Opt_extent_cache,
Opt_noextent_cache,
Opt_noinline_data,
+ Opt_data_flush,
+ Opt_mode,
+ Opt_fault_injection,
+ Opt_lazytime,
+ Opt_nolazytime,
Opt_err,
};
@@ -75,6 +112,7 @@ static match_table_t f2fs_tokens = {
{Opt_disable_roll_forward, "disable_roll_forward"},
{Opt_norecovery, "norecovery"},
{Opt_discard, "discard"},
+ {Opt_nodiscard, "nodiscard"},
{Opt_noheap, "no_heap"},
{Opt_user_xattr, "user_xattr"},
{Opt_nouser_xattr, "nouser_xattr"},
@@ -85,12 +123,19 @@ static match_table_t f2fs_tokens = {
{Opt_inline_xattr, "inline_xattr"},
{Opt_inline_data, "inline_data"},
{Opt_inline_dentry, "inline_dentry"},
+ {Opt_noinline_dentry, "noinline_dentry"},
{Opt_flush_merge, "flush_merge"},
+ {Opt_noflush_merge, "noflush_merge"},
{Opt_nobarrier, "nobarrier"},
{Opt_fastboot, "fastboot"},
{Opt_extent_cache, "extent_cache"},
{Opt_noextent_cache, "noextent_cache"},
{Opt_noinline_data, "noinline_data"},
+ {Opt_data_flush, "data_flush"},
+ {Opt_mode, "mode=%s"},
+ {Opt_fault_injection, "fault_injection=%u"},
+ {Opt_lazytime, "lazytime"},
+ {Opt_nolazytime, "nolazytime"},
{Opt_err, NULL},
};
@@ -100,6 +145,10 @@ enum {
SM_INFO, /* struct f2fs_sm_info */
NM_INFO, /* struct f2fs_nm_info */
F2FS_SBI, /* struct f2fs_sb_info */
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ FAULT_INFO_RATE, /* struct f2fs_fault_info */
+ FAULT_INFO_TYPE, /* struct f2fs_fault_info */
+#endif
};
struct f2fs_attr {
@@ -121,9 +170,27 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
return (unsigned char *)NM_I(sbi);
else if (struct_type == F2FS_SBI)
return (unsigned char *)sbi;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ else if (struct_type == FAULT_INFO_RATE ||
+ struct_type == FAULT_INFO_TYPE)
+ return (unsigned char *)&sbi->fault_info;
+#endif
return NULL;
}
+static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ struct super_block *sb = sbi->sb;
+
+ if (!sb->s_bdev->bd_part)
+ return snprintf(buf, PAGE_SIZE, "0\n");
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (unsigned long long)(sbi->kbytes_written +
+ BD_PART_WRITTEN(sbi)));
+}
+
static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
@@ -157,6 +224,10 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
ret = kstrtoul(skip_spaces(buf), 0, &t);
if (ret < 0)
return ret;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (a->struct_type == FAULT_INFO_TYPE && t >= (1 << FAULT_MAX))
+ return -EINVAL;
+#endif
*ui = t;
return count;
}
@@ -202,6 +273,9 @@ static struct f2fs_attr f2fs_attr_##_name = { \
f2fs_sbi_show, f2fs_sbi_store, \
offsetof(struct struct_name, elname))
+#define F2FS_GENERAL_RO_ATTR(name) \
+static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
+
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
@@ -214,9 +288,16 @@ F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
+F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, cp_interval);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
+F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
+#endif
+F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -234,7 +315,14 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(dir_level),
ATTR_LIST(ram_thresh),
ATTR_LIST(ra_nid_pages),
+ ATTR_LIST(dirty_nats_ratio),
ATTR_LIST(cp_interval),
+ ATTR_LIST(idle_interval),
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ ATTR_LIST(inject_rate),
+ ATTR_LIST(inject_type),
+#endif
+ ATTR_LIST(lifetime_write_kbytes),
NULL,
};
@@ -324,12 +412,20 @@ static int parse_options(struct super_block *sb, char *options)
q = bdev_get_queue(sb->s_bdev);
if (blk_queue_discard(q)) {
set_opt(sbi, DISCARD);
- } else {
+ } else if (!f2fs_sb_mounted_blkzoned(sb)) {
f2fs_msg(sb, KERN_WARNING,
"mounting with \"discard\" option, but "
"the device does not support discard");
}
break;
+ case Opt_nodiscard:
+ if (f2fs_sb_mounted_blkzoned(sb)) {
+ f2fs_msg(sb, KERN_WARNING,
+ "discard is required for zoned block devices");
+ return -EINVAL;
+ }
+ clear_opt(sbi, DISCARD);
+ break;
case Opt_noheap:
set_opt(sbi, NOHEAP);
break;
@@ -388,9 +484,15 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_inline_dentry:
set_opt(sbi, INLINE_DENTRY);
break;
+ case Opt_noinline_dentry:
+ clear_opt(sbi, INLINE_DENTRY);
+ break;
case Opt_flush_merge:
set_opt(sbi, FLUSH_MERGE);
break;
+ case Opt_noflush_merge:
+ clear_opt(sbi, FLUSH_MERGE);
+ break;
case Opt_nobarrier:
set_opt(sbi, NOBARRIER);
break;
@@ -406,6 +508,49 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_noinline_data:
clear_opt(sbi, INLINE_DATA);
break;
+ case Opt_data_flush:
+ set_opt(sbi, DATA_FLUSH);
+ break;
+ case Opt_mode:
+ name = match_strdup(&args[0]);
+
+ if (!name)
+ return -ENOMEM;
+ if (strlen(name) == 8 &&
+ !strncmp(name, "adaptive", 8)) {
+ if (f2fs_sb_mounted_blkzoned(sb)) {
+ f2fs_msg(sb, KERN_WARNING,
+ "adaptive mode is not allowed with "
+ "zoned block device feature");
+ kfree(name);
+ return -EINVAL;
+ }
+ set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
+ } else if (strlen(name) == 3 &&
+ !strncmp(name, "lfs", 3)) {
+ set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
+ case Opt_fault_injection:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ f2fs_build_fault_attr(sbi, arg);
+#else
+ f2fs_msg(sb, KERN_INFO,
+ "FAULT_INJECTION was not selected");
+#endif
+ break;
+ case Opt_lazytime:
+ sb->s_flags |= MS_LAZYTIME;
+ break;
+ case Opt_nolazytime:
+ sb->s_flags &= ~MS_LAZYTIME;
+ break;
default:
f2fs_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" or missing value",
@@ -432,20 +577,15 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
fi->i_current_depth = 1;
fi->i_advise = 0;
init_rwsem(&fi->i_sem);
+ INIT_LIST_HEAD(&fi->dirty_list);
+ INIT_LIST_HEAD(&fi->gdirty_list);
INIT_LIST_HEAD(&fi->inmem_pages);
mutex_init(&fi->inmem_lock);
-
- set_inode_flag(fi, FI_NEW_INODE);
-
- if (test_opt(F2FS_SB(sb), INLINE_XATTR))
- set_inode_flag(fi, FI_INLINE_XATTR);
+ init_rwsem(&fi->dio_rwsem[READ]);
+ init_rwsem(&fi->dio_rwsem[WRITE]);
/* Will be used by directory only */
fi->i_dir_level = F2FS_SB(sb)->dir_level;
-
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- fi->i_crypt_info = NULL;
-#endif
return &fi->vfs_inode;
}
@@ -458,7 +598,7 @@ static int f2fs_drop_inode(struct inode *inode)
* - f2fs_gc -> iput -> evict
* - inode_wait_for_writeback(inode)
*/
- if (!inode_unhashed(inode) && inode->i_state & I_SYNC) {
+ if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
if (!inode->i_nlink && !is_bad_inode(inode)) {
/* to avoid evict_inode call simultaneously */
atomic_inc(&inode->i_count);
@@ -466,32 +606,69 @@ static int f2fs_drop_inode(struct inode *inode)
/* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode))
- commit_inmem_pages(inode, true);
+ drop_inmem_pages(inode);
/* should remain fi->extent_tree for writepage */
f2fs_destroy_extent_node(inode);
sb_start_intwrite(inode->i_sb);
- i_size_write(inode, 0);
+ f2fs_i_size_write(inode, 0);
if (F2FS_HAS_BLOCKS(inode))
- f2fs_truncate(inode, true);
+ f2fs_truncate(inode);
sb_end_intwrite(inode->i_sb);
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- if (F2FS_I(inode)->i_crypt_info)
- f2fs_free_encryption_info(inode,
- F2FS_I(inode)->i_crypt_info);
-#endif
+ fscrypt_put_encryption_info(inode, NULL);
spin_lock(&inode->i_lock);
atomic_dec(&inode->i_count);
}
return 0;
}
+
return generic_drop_inode(inode);
}
+int f2fs_inode_dirtied(struct inode *inode, bool sync)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ int ret = 0;
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
+ ret = 1;
+ } else {
+ set_inode_flag(inode, FI_DIRTY_INODE);
+ stat_inc_dirty_inode(sbi, DIRTY_META);
+ }
+ if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
+ list_add_tail(&F2FS_I(inode)->gdirty_list,
+ &sbi->inode_list[DIRTY_META]);
+ inc_page_count(sbi, F2FS_DIRTY_IMETA);
+ }
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return ret;
+}
+
+void f2fs_inode_synced(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return;
+ }
+ if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
+ list_del_init(&F2FS_I(inode)->gdirty_list);
+ dec_page_count(sbi, F2FS_DIRTY_IMETA);
+ }
+ clear_inode_flag(inode, FI_DIRTY_INODE);
+ clear_inode_flag(inode, FI_AUTO_RECOVER);
+ stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+}
+
/*
* f2fs_dirty_inode() is called from __mark_inode_dirty()
*
@@ -499,7 +676,19 @@ static int f2fs_drop_inode(struct inode *inode)
*/
static void f2fs_dirty_inode(struct inode *inode, int flags)
{
- set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ if (inode->i_ino == F2FS_NODE_INO(sbi) ||
+ inode->i_ino == F2FS_META_INO(sbi))
+ return;
+
+ if (flags == I_DIRTY_TIME)
+ return;
+
+ if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
+ clear_inode_flag(inode, FI_AUTO_RECOVER);
+
+ f2fs_inode_dirtied(inode, false);
}
static void f2fs_i_callback(struct rcu_head *head)
@@ -513,12 +702,32 @@ static void f2fs_destroy_inode(struct inode *inode)
call_rcu(&inode->i_rcu, f2fs_i_callback);
}
+static void destroy_percpu_info(struct f2fs_sb_info *sbi)
+{
+ percpu_counter_destroy(&sbi->alloc_valid_block_count);
+ percpu_counter_destroy(&sbi->total_valid_inode_count);
+}
+
+static void destroy_device_list(struct f2fs_sb_info *sbi)
+{
+ int i;
+
+ for (i = 0; i < sbi->s_ndevs; i++) {
+ blkdev_put(FDEV(i).bdev, FMODE_EXCL);
+#ifdef CONFIG_BLK_DEV_ZONED
+ kfree(FDEV(i).blkz_type);
+#endif
+ }
+ kfree(sbi->devs);
+}
+
static void f2fs_put_super(struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
if (sbi->s_proc) {
remove_proc_entry("segment_info", sbi->s_proc);
+ remove_proc_entry("segment_bits", sbi->s_proc);
remove_proc_entry(sb->s_id, f2fs_proc_root);
}
kobject_del(&sbi->s_kobj);
@@ -534,7 +743,7 @@ static void f2fs_put_super(struct super_block *sb)
* clean checkpoint again.
*/
if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
- !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
+ !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
@@ -548,12 +757,14 @@ static void f2fs_put_super(struct super_block *sb)
* normally superblock is clean, so we need to release this.
* In addition, EIO will skip do checkpoint, we need this as well.
*/
- release_dirty_inode(sbi);
- release_discard_addrs(sbi);
+ release_ino_entry(sbi, true);
f2fs_leave_shrinker(sbi);
mutex_unlock(&sbi->umount_mutex);
+ /* our cp_error case, we can wait for any writeback page */
+ f2fs_flush_merged_bios(sbi);
+
iput(sbi->node_inode);
iput(sbi->meta_inode);
@@ -566,13 +777,20 @@ static void f2fs_put_super(struct super_block *sb)
wait_for_completion(&sbi->s_kobj_unregister);
sb->s_fs_info = NULL;
- brelse(sbi->raw_super_buf);
+ if (sbi->s_chksum_driver)
+ crypto_free_shash(sbi->s_chksum_driver);
+ kfree(sbi->raw_super);
+
+ destroy_device_list(sbi);
+
+ destroy_percpu_info(sbi);
kfree(sbi);
}
int f2fs_sync_fs(struct super_block *sb, int sync)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int err = 0;
trace_f2fs_sync_fs(sb, sync);
@@ -582,25 +800,27 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
cpc.reason = __get_cp_reason(sbi);
mutex_lock(&sbi->gc_mutex);
- write_checkpoint(sbi, &cpc);
+ err = write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex);
- } else {
- f2fs_balance_fs(sbi);
}
f2fs_trace_ios(NULL, 1);
- return 0;
+ return err;
}
static int f2fs_freeze(struct super_block *sb)
{
- int err;
-
if (f2fs_readonly(sb))
return 0;
- err = f2fs_sync_fs(sb, 1);
- return err;
+ /* IO error happened before */
+ if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
+ return -EIO;
+
+ /* must be clean, since sync_filesystem() was already called */
+ if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
+ return -EINVAL;
+ return 0;
}
static int f2fs_unfreeze(struct super_block *sb)
@@ -623,11 +843,12 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bsize = sbi->blocksize;
buf->f_blocks = total_count - start_count;
- buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
+ buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
buf->f_bavail = user_block_count - valid_user_blocks(sbi);
buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
- buf->f_ffree = buf->f_files - valid_inode_count(sbi);
+ buf->f_ffree = min(buf->f_files - valid_node_count(sbi),
+ buf->f_bavail);
buf->f_namelen = F2FS_NAME_LEN;
buf->f_fsid.val[0] = (u32)id;
@@ -676,6 +897,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",noinline_data");
if (test_opt(sbi, INLINE_DENTRY))
seq_puts(seq, ",inline_dentry");
+ else
+ seq_puts(seq, ",noinline_dentry");
if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
seq_puts(seq, ",flush_merge");
if (test_opt(sbi, NOBARRIER))
@@ -686,6 +909,14 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",extent_cache");
else
seq_puts(seq, ",noextent_cache");
+ if (test_opt(sbi, DATA_FLUSH))
+ seq_puts(seq, ",data_flush");
+
+ seq_puts(seq, ",mode=");
+ if (test_opt(sbi, ADAPTIVE))
+ seq_puts(seq, "adaptive");
+ else if (test_opt(sbi, LFS))
+ seq_puts(seq, "lfs");
seq_printf(seq, ",active_logs=%u", sbi->active_logs);
return 0;
@@ -718,19 +949,47 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset)
return 0;
}
-static int segment_info_open_fs(struct inode *inode, struct file *file)
+static int segment_bits_seq_show(struct seq_file *seq, void *offset)
{
- return single_open(file, segment_info_seq_show, PDE_DATA(inode));
+ struct super_block *sb = seq->private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ unsigned int total_segs =
+ le32_to_cpu(sbi->raw_super->segment_count_main);
+ int i, j;
+
+ seq_puts(seq, "format: segment_type|valid_blocks|bitmaps\n"
+ "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
+
+ for (i = 0; i < total_segs; i++) {
+ struct seg_entry *se = get_seg_entry(sbi, i);
+
+ seq_printf(seq, "%-10d", i);
+ seq_printf(seq, "%d|%-3u|", se->type,
+ get_valid_blocks(sbi, i, 1));
+ for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
+ seq_printf(seq, " %.2x", se->cur_valid_map[j]);
+ seq_putc(seq, '\n');
+ }
+ return 0;
}
-static const struct file_operations f2fs_seq_segment_info_fops = {
- .owner = THIS_MODULE,
- .open = segment_info_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+#define F2FS_PROC_FILE_DEF(_name) \
+static int _name##_open_fs(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, _name##_seq_show, PDE_DATA(inode)); \
+} \
+ \
+static const struct file_operations f2fs_seq_##_name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = _name##_open_fs, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
};
+F2FS_PROC_FILE_DEF(segment_info);
+F2FS_PROC_FILE_DEF(segment_bits);
+
static void default_options(struct f2fs_sb_info *sbi)
{
/* init some FS parameters */
@@ -738,7 +997,16 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_DATA);
+ set_opt(sbi, INLINE_DENTRY);
set_opt(sbi, EXTENT_CACHE);
+ sbi->sb->s_flags |= MS_LAZYTIME;
+ set_opt(sbi, FLUSH_MERGE);
+ if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
+ set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ set_opt(sbi, DISCARD);
+ } else {
+ set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
+ }
#ifdef CONFIG_F2FS_FS_XATTR
set_opt(sbi, XATTR_USER);
@@ -746,6 +1014,10 @@ static void default_options(struct f2fs_sb_info *sbi)
#ifdef CONFIG_F2FS_FS_POSIX_ACL
set_opt(sbi, POSIX_ACL);
#endif
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ f2fs_build_fault_attr(sbi, 0);
+#endif
}
static int f2fs_remount(struct super_block *sb, int *flags, char *data)
@@ -756,8 +1028,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
bool need_restart_gc = false;
bool need_stop_gc = false;
bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
-
- sync_filesystem(sb);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ struct f2fs_fault_info ffi = sbi->fault_info;
+#endif
/*
* Save the old mount options in case we
@@ -766,6 +1039,15 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
org_mount_opt = sbi->mount_opt;
active_logs = sbi->active_logs;
+ /* recover superblocks we couldn't write due to previous RO mount */
+ if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
+ err = f2fs_commit_super(sbi, false);
+ f2fs_msg(sb, KERN_INFO,
+ "Try to recover all the superblocks, ret: %d", err);
+ if (!err)
+ clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
+ }
+
sbi->mount_opt.opt = 0;
default_options(sbi);
@@ -797,7 +1079,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
if (sbi->gc_thread) {
stop_gc_thread(sbi);
- f2fs_sync_fs(sb, 1);
need_restart_gc = true;
}
} else if (!sbi->gc_thread) {
@@ -807,21 +1088,33 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
need_stop_gc = true;
}
+ if (*flags & MS_RDONLY) {
+ writeback_inodes_sb(sb, WB_REASON_SYNC);
+ sync_inodes_sb(sb);
+
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
+ set_sbi_flag(sbi, SBI_IS_CLOSE);
+ f2fs_sync_fs(sb, 1);
+ clear_sbi_flag(sbi, SBI_IS_CLOSE);
+ }
+
/*
* We stop issue flush thread if FS is mounted as RO
* or if flush_merge is not passed in mount option.
*/
if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
- destroy_flush_cmd_control(sbi);
- } else if (!SM_I(sbi)->cmd_control_info) {
+ clear_opt(sbi, FLUSH_MERGE);
+ destroy_flush_cmd_control(sbi, false);
+ } else {
err = create_flush_cmd_control(sbi);
if (err)
goto restore_gc;
}
skip:
/* Update the POSIXACL Flag */
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+
return 0;
restore_gc:
if (need_restart_gc) {
@@ -834,6 +1127,9 @@ restore_gc:
restore_opts:
sbi->mount_opt = org_mount_opt;
sbi->active_logs = active_logs;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ sbi->fault_info = ffi;
+#endif
return err;
}
@@ -853,6 +1149,48 @@ static struct super_operations f2fs_sops = {
.remount_fs = f2fs_remount,
};
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
+{
+ return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
+ F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
+ ctx, len, NULL);
+}
+
+static int f2fs_key_prefix(struct inode *inode, u8 **key)
+{
+ *key = F2FS_I_SB(inode)->key_prefix;
+ return F2FS_I_SB(inode)->key_prefix_size;
+}
+
+static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
+ void *fs_data)
+{
+ return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
+ F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
+ ctx, len, fs_data, XATTR_CREATE);
+}
+
+static unsigned f2fs_max_namelen(struct inode *inode)
+{
+ return S_ISLNK(inode->i_mode) ?
+ inode->i_sb->s_blocksize : F2FS_NAME_LEN;
+}
+
+static struct fscrypt_operations f2fs_cryptops = {
+ .get_context = f2fs_get_context,
+ .key_prefix = f2fs_key_prefix,
+ .set_context = f2fs_set_context,
+ .is_encrypted = f2fs_encrypted_inode,
+ .empty_dir = f2fs_empty_dir,
+ .max_namelen = f2fs_max_namelen,
+};
+#else
+static struct fscrypt_operations f2fs_cryptops = {
+ .is_encrypted = f2fs_encrypted_inode,
+};
+#endif
+
static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
u64 ino, u32 generation)
{
@@ -898,7 +1236,7 @@ static const struct export_operations f2fs_export_ops = {
.get_parent = f2fs_get_parent,
};
-loff_t max_file_size(unsigned bits)
+static loff_t max_file_blocks(void)
{
loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS);
loff_t leaf_count = ADDRS_PER_BLOCK;
@@ -914,13 +1252,29 @@ loff_t max_file_size(unsigned bits)
leaf_count *= NIDS_PER_BLOCK;
result += leaf_count;
- result <<= bits;
return result;
}
-static inline bool sanity_check_area_boundary(struct super_block *sb,
- struct f2fs_super_block *raw_super)
+static int __f2fs_commit_super(struct buffer_head *bh,
+ struct f2fs_super_block *super)
{
+ lock_buffer(bh);
+ if (super)
+ memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
+ set_buffer_uptodate(bh);
+ set_buffer_dirty(bh);
+ unlock_buffer(bh);
+
+ /* it's rare case, we can do fua all the time */
+ return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
+}
+
+static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
+ struct buffer_head *bh)
+{
+ struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
+ (bh->b_data + F2FS_SUPER_OFFSET);
+ struct super_block *sb = sbi->sb;
u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
@@ -934,6 +1288,10 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
u32 segment_count = le32_to_cpu(raw_super->segment_count);
u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+ u64 main_end_blkaddr = main_blkaddr +
+ (segment_count_main << log_blocks_per_seg);
+ u64 seg_end_blkaddr = segment0_blkaddr +
+ (segment_count << log_blocks_per_seg);
if (segment0_blkaddr != cp_blkaddr) {
f2fs_msg(sb, KERN_INFO,
@@ -978,22 +1336,47 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
return true;
}
- if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
- segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
+ if (main_end_blkaddr > seg_end_blkaddr) {
f2fs_msg(sb, KERN_INFO,
- "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
+ "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
main_blkaddr,
- segment0_blkaddr + (segment_count << log_blocks_per_seg),
+ segment0_blkaddr +
+ (segment_count << log_blocks_per_seg),
segment_count_main << log_blocks_per_seg);
return true;
- }
+ } else if (main_end_blkaddr < seg_end_blkaddr) {
+ int err = 0;
+ char *res;
+
+ /* fix in-memory information all the time */
+ raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
+ segment0_blkaddr) >> log_blocks_per_seg);
+ if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
+ set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
+ res = "internally";
+ } else {
+ err = __f2fs_commit_super(bh, NULL);
+ res = err ? "failed" : "done";
+ }
+ f2fs_msg(sb, KERN_INFO,
+ "Fix alignment : %s, start(%u) end(%u) block(%u)",
+ res, main_blkaddr,
+ segment0_blkaddr +
+ (segment_count << log_blocks_per_seg),
+ segment_count_main << log_blocks_per_seg);
+ if (err)
+ return true;
+ }
return false;
}
-static int sanity_check_raw_super(struct super_block *sb,
- struct f2fs_super_block *raw_super)
+static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
+ struct buffer_head *bh)
{
+ struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
+ (bh->b_data + F2FS_SUPER_OFFSET);
+ struct super_block *sb = sbi->sb;
unsigned int blocksize;
if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
@@ -1004,10 +1387,10 @@ static int sanity_check_raw_super(struct super_block *sb,
}
/* Currently, support only 4KB page cache size */
- if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
+ if (F2FS_BLKSIZE != PAGE_SIZE) {
f2fs_msg(sb, KERN_INFO,
"Invalid page_cache_size (%lu), supports only 4KB\n",
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
return 1;
}
@@ -1059,27 +1442,19 @@ static int sanity_check_raw_super(struct super_block *sb,
return 1;
}
- if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
- f2fs_msg(sb, KERN_INFO,
- "Invalid segment count (%u)",
- le32_to_cpu(raw_super->segment_count));
- return 1;
- }
-
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
- if (sanity_check_area_boundary(sb, raw_super))
+ if (sanity_check_area_boundary(sbi, bh))
return 1;
return 0;
}
-static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
+int sanity_check_ckpt(struct f2fs_sb_info *sbi)
{
unsigned int total, fsmeta;
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
- unsigned int main_segs, blocks_per_seg;
- int i;
+ unsigned int ovp_segments, reserved_segments;
total = le32_to_cpu(raw_super->segment_count);
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
@@ -1091,20 +1466,14 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
if (unlikely(fsmeta >= total))
return 1;
- main_segs = le32_to_cpu(sbi->raw_super->segment_count_main);
- blocks_per_seg = sbi->blocks_per_seg;
+ ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
+ reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
- for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
- if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
- le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) {
- return 1;
- }
- }
- for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
- if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
- le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) {
- return 1;
- }
+ if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
+ ovp_segments == 0 || reserved_segments == 0)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong layout: check mkfs.f2fs version");
+ return 1;
}
if (unlikely(f2fs_cp_error(sbi))) {
@@ -1137,129 +1506,282 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->cur_victim_sec = NULL_SECNO;
sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
- for (i = 0; i < NR_COUNT_TYPE; i++)
- atomic_set(&sbi->nr_pages[i], 0);
-
sbi->dir_level = DEF_DIR_LEVEL;
- sbi->cp_interval = DEF_CP_INTERVAL;
+ sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
+ sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
clear_sbi_flag(sbi, SBI_NEED_FSCK);
+ for (i = 0; i < NR_COUNT_TYPE; i++)
+ atomic_set(&sbi->nr_pages[i], 0);
+
INIT_LIST_HEAD(&sbi->s_list);
mutex_init(&sbi->umount_mutex);
+ mutex_init(&sbi->wio_mutex[NODE]);
+ mutex_init(&sbi->wio_mutex[DATA]);
+ spin_lock_init(&sbi->cp_lock);
+
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+ memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX,
+ F2FS_KEY_DESC_PREFIX_SIZE);
+ sbi->key_prefix_size = F2FS_KEY_DESC_PREFIX_SIZE;
+#endif
}
+static int init_percpu_info(struct f2fs_sb_info *sbi)
+{
+ int err;
+
+ err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
+ if (err)
+ return err;
+
+ return percpu_counter_init(&sbi->total_valid_inode_count, 0,
+ GFP_KERNEL);
+}
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
+{
+ struct block_device *bdev = FDEV(devi).bdev;
+ sector_t nr_sectors = bdev->bd_part->nr_sects;
+ sector_t sector = 0;
+ struct blk_zone *zones;
+ unsigned int i, nr_zones;
+ unsigned int n = 0;
+ int err = -EIO;
+
+ if (!f2fs_sb_mounted_blkzoned(sbi->sb))
+ return 0;
+
+ if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
+ SECTOR_TO_BLOCK(bdev_zone_size(bdev)))
+ return -EINVAL;
+ sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_size(bdev));
+ if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
+ __ilog2_u32(sbi->blocks_per_blkz))
+ return -EINVAL;
+ sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
+ FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
+ sbi->log_blocks_per_blkz;
+ if (nr_sectors & (bdev_zone_size(bdev) - 1))
+ FDEV(devi).nr_blkz++;
+
+ FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
+ if (!FDEV(devi).blkz_type)
+ return -ENOMEM;
+
+#define F2FS_REPORT_NR_ZONES 4096
+
+ zones = kcalloc(F2FS_REPORT_NR_ZONES, sizeof(struct blk_zone),
+ GFP_KERNEL);
+ if (!zones)
+ return -ENOMEM;
+
+ /* Get block zones type */
+ while (zones && sector < nr_sectors) {
+
+ nr_zones = F2FS_REPORT_NR_ZONES;
+ err = blkdev_report_zones(bdev, sector,
+ zones, &nr_zones,
+ GFP_KERNEL);
+ if (err)
+ break;
+ if (!nr_zones) {
+ err = -EIO;
+ break;
+ }
+
+ for (i = 0; i < nr_zones; i++) {
+ FDEV(devi).blkz_type[n] = zones[i].type;
+ sector += zones[i].len;
+ n++;
+ }
+ }
+
+ kfree(zones);
+
+ return err;
+}
+#endif
+
/*
* Read f2fs raw super block.
- * Because we have two copies of super block, so read the first one at first,
- * if the first one is invalid, move to read the second one.
+ * Because we have two copies of super block, so read both of them
+ * to get the first valid one. If any one of them is broken, we pass
+ * them recovery flag back to the caller.
*/
-static int read_raw_super_block(struct super_block *sb,
+static int read_raw_super_block(struct f2fs_sb_info *sbi,
struct f2fs_super_block **raw_super,
- struct buffer_head **raw_super_buf,
- int *recovery)
+ int *valid_super_block, int *recovery)
{
- int block = 0;
- struct buffer_head *buffer;
+ struct super_block *sb = sbi->sb;
+ int block;
+ struct buffer_head *bh;
struct f2fs_super_block *super;
int err = 0;
-retry:
- buffer = sb_bread(sb, block);
- if (!buffer) {
- *recovery = 1;
- f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
+ super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
+ if (!super)
+ return -ENOMEM;
+
+ for (block = 0; block < 2; block++) {
+ bh = sb_bread(sb, block);
+ if (!bh) {
+ f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
block + 1);
- if (block == 0) {
- block++;
- goto retry;
- } else {
err = -EIO;
- goto out;
+ continue;
}
- }
-
- super = (struct f2fs_super_block *)
- ((char *)(buffer)->b_data + F2FS_SUPER_OFFSET);
- /* sanity checking of raw super */
- if (sanity_check_raw_super(sb, super)) {
- brelse(buffer);
- *recovery = 1;
- f2fs_msg(sb, KERN_ERR,
- "Can't find valid F2FS filesystem in %dth superblock",
- block + 1);
- if (block == 0) {
- block++;
- goto retry;
- } else {
+ /* sanity checking of raw super */
+ if (sanity_check_raw_super(sbi, bh)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Can't find valid F2FS filesystem in %dth superblock",
+ block + 1);
err = -EINVAL;
- goto out;
+ brelse(bh);
+ continue;
}
- }
- if (!*raw_super) {
- *raw_super_buf = buffer;
- *raw_super = super;
- } else {
- /* already have a valid superblock */
- brelse(buffer);
+ if (!*raw_super) {
+ memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
+ sizeof(*super));
+ *valid_super_block = block;
+ *raw_super = super;
+ }
+ brelse(bh);
}
- /* check the validity of the second superblock */
- if (block == 0) {
- block++;
- goto retry;
- }
+ /* Fail to read any one of the superblocks*/
+ if (err < 0)
+ *recovery = 1;
-out:
/* No valid superblock */
if (!*raw_super)
- return err;
+ kfree(super);
+ else
+ err = 0;
- return 0;
+ return err;
}
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
{
- struct buffer_head *sbh = sbi->raw_super_buf;
- sector_t block = sbh->b_blocknr;
+ struct buffer_head *bh;
int err;
- /* write back-up superblock first */
- sbh->b_blocknr = block ? 0 : 1;
- mark_buffer_dirty(sbh);
- err = sync_dirty_buffer(sbh);
+ if ((recover && f2fs_readonly(sbi->sb)) ||
+ bdev_read_only(sbi->sb->s_bdev)) {
+ set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
+ return -EROFS;
+ }
- sbh->b_blocknr = block;
+ /* write back-up superblock first */
+ bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
+ if (!bh)
+ return -EIO;
+ err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
+ brelse(bh);
/* if we are in recovery path, skip writing valid superblock */
if (recover || err)
- goto out;
+ return err;
/* write current valid superblock */
- mark_buffer_dirty(sbh);
- err = sync_dirty_buffer(sbh);
-out:
- clear_buffer_write_io_error(sbh);
- set_buffer_uptodate(sbh);
+ bh = sb_getblk(sbi->sb, sbi->valid_super_block);
+ if (!bh)
+ return -EIO;
+ err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
+ brelse(bh);
return err;
}
+static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+ int i;
+
+ for (i = 0; i < MAX_DEVICES; i++) {
+ if (!RDEV(i).path[0])
+ return 0;
+
+ if (i == 0) {
+ sbi->devs = kzalloc(sizeof(struct f2fs_dev_info) *
+ MAX_DEVICES, GFP_KERNEL);
+ if (!sbi->devs)
+ return -ENOMEM;
+ }
+
+ memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
+ FDEV(i).total_segments = le32_to_cpu(RDEV(i).total_segments);
+ if (i == 0) {
+ FDEV(i).start_blk = 0;
+ FDEV(i).end_blk = FDEV(i).start_blk +
+ (FDEV(i).total_segments <<
+ sbi->log_blocks_per_seg) - 1 +
+ le32_to_cpu(raw_super->segment0_blkaddr);
+ } else {
+ FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
+ FDEV(i).end_blk = FDEV(i).start_blk +
+ (FDEV(i).total_segments <<
+ sbi->log_blocks_per_seg) - 1;
+ }
+
+ FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
+ sbi->sb->s_mode, sbi->sb->s_type);
+ if (IS_ERR(FDEV(i).bdev))
+ return PTR_ERR(FDEV(i).bdev);
+
+ /* to release errored devices */
+ sbi->s_ndevs = i + 1;
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
+ !f2fs_sb_mounted_blkzoned(sbi->sb)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Zoned block device feature not enabled\n");
+ return -EINVAL;
+ }
+ if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
+ if (init_blkz_info(sbi, i)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Failed to initialize F2FS blkzone information");
+ return -EINVAL;
+ }
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
+ i, FDEV(i).path,
+ FDEV(i).total_segments,
+ FDEV(i).start_blk, FDEV(i).end_blk,
+ bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
+ "Host-aware" : "Host-managed");
+ continue;
+ }
+#endif
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Mount Device [%2d]: %20s, %8u, %8x - %8x",
+ i, FDEV(i).path,
+ FDEV(i).total_segments,
+ FDEV(i).start_blk, FDEV(i).end_blk);
+ }
+ return 0;
+}
+
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
struct f2fs_sb_info *sbi;
struct f2fs_super_block *raw_super;
- struct buffer_head *raw_super_buf;
struct inode *root;
- long err;
+ int err;
bool retry = true, need_fsck = false;
char *options = NULL;
- int recovery, i;
+ int recovery, i, valid_super_block;
+ struct curseg_info *seg_i;
try_onemore:
err = -EINVAL;
raw_super = NULL;
- raw_super_buf = NULL;
+ valid_super_block = -1;
recovery = 0;
/* allocate memory for f2fs-specific super block info */
@@ -1267,17 +1789,43 @@ try_onemore:
if (!sbi)
return -ENOMEM;
+ sbi->sb = sb;
+
+ /* Load the checksum driver */
+ sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
+ if (IS_ERR(sbi->s_chksum_driver)) {
+ f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
+ err = PTR_ERR(sbi->s_chksum_driver);
+ sbi->s_chksum_driver = NULL;
+ goto free_sbi;
+ }
+
/* set a block size */
if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
goto free_sbi;
}
- err = read_raw_super_block(sb, &raw_super, &raw_super_buf, &recovery);
+ err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
+ &recovery);
if (err)
goto free_sbi;
sb->s_fs_info = sbi;
+ sbi->raw_super = raw_super;
+
+ /*
+ * The BLKZONED feature indicates that the drive was formatted with
+ * zone alignment optimization. This is optional for host-aware
+ * devices, but mandatory for host-managed zoned block devices.
+ */
+#ifndef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_mounted_blkzoned(sb)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Zoned block device support is not enabled\n");
+ goto free_sb_buf;
+ }
+#endif
default_options(sbi);
/* parse mount options */
options = kstrdup((const char *)data, GFP_KERNEL);
@@ -1290,11 +1838,14 @@ try_onemore:
if (err)
goto free_options;
- sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
+ sbi->max_file_blocks = max_file_blocks();
+ sb->s_maxbytes = sbi->max_file_blocks <<
+ le32_to_cpu(raw_super->log_blocksize);
sb->s_max_links = F2FS_LINK_MAX;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
sb->s_op = &f2fs_sops;
+ sb->s_cop = &f2fs_cryptops;
sb->s_xattr = f2fs_xattr_handlers;
sb->s_export_op = &f2fs_export_ops;
sb->s_magic = F2FS_SUPER_MAGIC;
@@ -1304,11 +1855,8 @@ try_onemore:
memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
/* init f2fs-specific super block info */
- sbi->sb = sb;
- sbi->raw_super = raw_super;
- sbi->raw_super_buf = raw_super_buf;
+ sbi->valid_super_block = valid_super_block;
mutex_init(&sbi->gc_mutex);
- mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex);
init_rwsem(&sbi->node_write);
@@ -1329,6 +1877,10 @@ try_onemore:
init_waitqueue_head(&sbi->cp_wait);
init_sb_info(sbi);
+ err = init_percpu_info(sbi);
+ if (err)
+ goto free_options;
+
/* get an inode for meta space */
sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
if (IS_ERR(sbi->meta_inode)) {
@@ -1343,24 +1895,26 @@ try_onemore:
goto free_meta_inode;
}
- /* sanity checking of checkpoint */
- err = -EINVAL;
- if (sanity_check_ckpt(sbi)) {
- f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
- goto free_cp;
+ /* Initialize device list */
+ err = f2fs_scan_devices(sbi);
+ if (err) {
+ f2fs_msg(sb, KERN_ERR, "Failed to find devices");
+ goto free_devices;
}
sbi->total_valid_node_count =
le32_to_cpu(sbi->ckpt->valid_node_count);
- sbi->total_valid_inode_count =
- le32_to_cpu(sbi->ckpt->valid_inode_count);
+ percpu_counter_set(&sbi->total_valid_inode_count,
+ le32_to_cpu(sbi->ckpt->valid_inode_count));
sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
sbi->total_valid_block_count =
le64_to_cpu(sbi->ckpt->valid_block_count);
sbi->last_valid_block_count = sbi->total_valid_block_count;
- sbi->alloc_valid_block_count = 0;
- INIT_LIST_HEAD(&sbi->dir_inode_list);
- spin_lock_init(&sbi->dir_inode_lock);
+
+ for (i = 0; i < NR_INODE_TYPE; i++) {
+ INIT_LIST_HEAD(&sbi->inode_list[i]);
+ spin_lock_init(&sbi->inode_lock[i]);
+ }
init_extent_cache_info(sbi);
@@ -1380,6 +1934,17 @@ try_onemore:
goto free_nm;
}
+ /* For write statistics */
+ if (sb->s_bdev->bd_part)
+ sbi->sectors_written_start =
+ (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
+
+ /* Read accumulated write IO statistics if exists */
+ seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
+ if (__exist_node_summaries(sbi))
+ sbi->kbytes_written =
+ le64_to_cpu(seg_i->journal->info.kbytes_written);
+
build_gc_manager(sbi);
/* get an inode for node space */
@@ -1423,9 +1988,12 @@ try_onemore:
if (f2fs_proc_root)
sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
- if (sbi->s_proc)
+ if (sbi->s_proc) {
proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
&f2fs_seq_segment_info_fops, sb);
+ proc_create_data("segment_bits", S_IRUGO, sbi->s_proc,
+ &f2fs_seq_segment_bits_fops, sb);
+ }
sbi->s_kobj.kset = f2fs_kset;
init_completion(&sbi->s_kobj_unregister);
@@ -1441,7 +2009,7 @@ try_onemore:
* previous checkpoint was not done by clean system shutdown.
*/
if (bdev_read_only(sb->s_bdev) &&
- !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
+ !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
err = -EROFS;
goto free_kobj;
}
@@ -1449,14 +2017,27 @@ try_onemore:
if (need_fsck)
set_sbi_flag(sbi, SBI_NEED_FSCK);
- err = recover_fsync_data(sbi);
- if (err) {
+ if (!retry)
+ goto skip_recovery;
+
+ err = recover_fsync_data(sbi, false);
+ if (err < 0) {
need_fsck = true;
f2fs_msg(sb, KERN_ERR,
- "Cannot recover all fsync data errno=%ld", err);
+ "Cannot recover all fsync data errno=%d", err);
+ goto free_kobj;
+ }
+ } else {
+ err = recover_fsync_data(sbi, true);
+
+ if (!f2fs_readonly(sb) && err > 0) {
+ err = -EINVAL;
+ f2fs_msg(sb, KERN_ERR,
+ "Need to recover fsync data");
goto free_kobj;
}
}
+skip_recovery:
/* recover_fsync_data() cleared this already */
clear_sbi_flag(sbi, SBI_POR_DOING);
@@ -1473,20 +2054,26 @@ try_onemore:
kfree(options);
/* recover broken superblock */
- if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) {
- f2fs_msg(sb, KERN_INFO, "Recover invalid superblock");
- f2fs_commit_super(sbi, true);
+ if (recovery) {
+ err = f2fs_commit_super(sbi, true);
+ f2fs_msg(sb, KERN_INFO,
+ "Try to recover %dth superblock, ret: %d",
+ sbi->valid_super_block ? 1 : 2, err);
}
- sbi->cp_expires = round_jiffies_up(jiffies);
-
+ f2fs_update_time(sbi, CP_TIME);
+ f2fs_update_time(sbi, REQ_TIME);
return 0;
free_kobj:
+ f2fs_sync_inode_meta(sbi);
kobject_del(&sbi->s_kobj);
+ kobject_put(&sbi->s_kobj);
+ wait_for_completion(&sbi->s_kobj_unregister);
free_proc:
if (sbi->s_proc) {
remove_proc_entry("segment_info", sbi->s_proc);
+ remove_proc_entry("segment_bits", sbi->s_proc);
remove_proc_entry(sb->s_id, f2fs_proc_root);
}
f2fs_destroy_stats(sbi);
@@ -1494,24 +2081,37 @@ free_root_inode:
dput(sb->s_root);
sb->s_root = NULL;
free_node_inode:
+ truncate_inode_pages_final(NODE_MAPPING(sbi));
mutex_lock(&sbi->umount_mutex);
+ release_ino_entry(sbi, true);
f2fs_leave_shrinker(sbi);
+ /*
+ * Some dirty meta pages can be produced by recover_orphan_inodes()
+ * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
+ * followed by write_checkpoint() through f2fs_write_node_pages(), which
+ * falls into an infinite loop in sync_meta_pages().
+ */
+ truncate_inode_pages_final(META_MAPPING(sbi));
iput(sbi->node_inode);
mutex_unlock(&sbi->umount_mutex);
free_nm:
destroy_node_manager(sbi);
free_sm:
destroy_segment_manager(sbi);
-free_cp:
+free_devices:
+ destroy_device_list(sbi);
kfree(sbi->ckpt);
free_meta_inode:
make_bad_inode(sbi->meta_inode);
iput(sbi->meta_inode);
free_options:
+ destroy_percpu_info(sbi);
kfree(options);
free_sb_buf:
- brelse(raw_super_buf);
+ kfree(raw_super);
free_sbi:
+ if (sbi->s_chksum_driver)
+ crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi);
/* give only one another chance */
@@ -1547,8 +2147,9 @@ MODULE_ALIAS_FS("f2fs");
static int __init init_inodecache(void)
{
- f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
- sizeof(struct f2fs_inode_info));
+ f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
+ sizeof(struct f2fs_inode_info), 0,
+ SLAB_RECLAIM_ACCOUNT, NULL);
if (!f2fs_inode_cachep)
return -ENOMEM;
return 0;
@@ -1590,25 +2191,23 @@ static int __init init_f2fs_fs(void)
err = -ENOMEM;
goto free_extent_cache;
}
- err = f2fs_init_crypto();
- if (err)
- goto free_kset;
-
err = register_shrinker(&f2fs_shrinker_info);
if (err)
- goto free_crypto;
+ goto free_kset;
err = register_filesystem(&f2fs_fs_type);
if (err)
goto free_shrinker;
- f2fs_create_root_stats();
+ err = f2fs_create_root_stats();
+ if (err)
+ goto free_filesystem;
f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
return 0;
+free_filesystem:
+ unregister_filesystem(&f2fs_fs_type);
free_shrinker:
unregister_shrinker(&f2fs_shrinker_info);
-free_crypto:
- f2fs_exit_crypto();
free_kset:
kset_unregister(f2fs_kset);
free_extent_cache:
@@ -1629,15 +2228,14 @@ static void __exit exit_f2fs_fs(void)
{
remove_proc_entry("fs/f2fs", NULL);
f2fs_destroy_root_stats();
- unregister_shrinker(&f2fs_shrinker_info);
unregister_filesystem(&f2fs_fs_type);
- f2fs_exit_crypto();
+ unregister_shrinker(&f2fs_shrinker_info);
+ kset_unregister(f2fs_kset);
destroy_extent_cache();
destroy_checkpoint_caches();
destroy_segment_manager_caches();
destroy_node_manager_caches();
destroy_inodecache();
- kset_unregister(f2fs_kset);
f2fs_destroy_trace_ios();
}
@@ -1647,3 +2245,4 @@ module_exit(exit_f2fs_fs)
MODULE_AUTHOR("Samsung Electronics's Praesto Team");
MODULE_DESCRIPTION("Flash Friendly File System");
MODULE_LICENSE("GPL");
+
diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
index 145fb659ad44..562ce0821559 100644
--- a/fs/f2fs/trace.c
+++ b/fs/f2fs/trace.c
@@ -29,7 +29,8 @@ static inline void __print_last_io(void)
last_io.major, last_io.minor,
last_io.pid, "----------------",
last_io.type,
- last_io.fio.rw, last_io.fio.blk_addr,
+ last_io.fio.rw,
+ last_io.fio.new_blkaddr,
last_io.len);
memset(&last_io, 0, sizeof(last_io));
}
@@ -101,7 +102,8 @@ void f2fs_trace_ios(struct f2fs_io_info *fio, int flush)
last_io.pid == pid &&
last_io.type == __file_type(inode, pid) &&
last_io.fio.rw == fio->rw &&
- last_io.fio.blk_addr + last_io.len == fio->blk_addr) {
+ last_io.fio.new_blkaddr + last_io.len ==
+ fio->new_blkaddr) {
last_io.len++;
return;
}
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 862368a32e53..1c4d5e39586c 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -151,7 +151,7 @@ static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
return -EINVAL;
F2FS_I(inode)->i_advise |= *(char *)value;
- mark_inode_dirty(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
return 0;
}
@@ -264,18 +264,20 @@ static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index,
return entry;
}
-static void *read_all_xattrs(struct inode *inode, struct page *ipage)
+static int read_all_xattrs(struct inode *inode, struct page *ipage,
+ void **base_addr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_xattr_header *header;
size_t size = PAGE_SIZE, inline_size = 0;
void *txattr_addr;
+ int err;
inline_size = inline_xattr_size(inode);
txattr_addr = kzalloc(inline_size + size, GFP_F2FS_ZERO);
if (!txattr_addr)
- return NULL;
+ return -ENOMEM;
/* read from inline xattr */
if (inline_size) {
@@ -286,8 +288,10 @@ static void *read_all_xattrs(struct inode *inode, struct page *ipage)
inline_addr = inline_xattr_addr(ipage);
} else {
page = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(page))
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
goto fail;
+ }
inline_addr = inline_xattr_addr(page);
}
memcpy(txattr_addr, inline_addr, inline_size);
@@ -301,8 +305,10 @@ static void *read_all_xattrs(struct inode *inode, struct page *ipage)
/* The inode already has an extended attribute block. */
xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
- if (IS_ERR(xpage))
+ if (IS_ERR(xpage)) {
+ err = PTR_ERR(xpage);
goto fail;
+ }
xattr_addr = page_address(xpage);
memcpy(txattr_addr + inline_size, xattr_addr, PAGE_SIZE);
@@ -316,10 +322,11 @@ static void *read_all_xattrs(struct inode *inode, struct page *ipage)
header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC);
header->h_refcount = cpu_to_le32(1);
}
- return txattr_addr;
+ *base_addr = txattr_addr;
+ return 0;
fail:
kzfree(txattr_addr);
- return NULL;
+ return err;
}
static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
@@ -345,7 +352,8 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
if (ipage) {
inline_addr = inline_xattr_addr(ipage);
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
+ set_page_dirty(ipage);
} else {
page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(page)) {
@@ -353,7 +361,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
return PTR_ERR(page);
}
inline_addr = inline_xattr_addr(page);
- f2fs_wait_on_page_writeback(page, NODE);
+ f2fs_wait_on_page_writeback(page, NODE, true);
}
memcpy(inline_addr, txattr_addr, inline_size);
f2fs_put_page(page, 1);
@@ -374,7 +382,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
return PTR_ERR(xpage);
}
f2fs_bug_on(sbi, new_nid);
- f2fs_wait_on_page_writeback(xpage, NODE);
+ f2fs_wait_on_page_writeback(xpage, NODE, true);
} else {
struct dnode_of_data dn;
set_new_dnode(&dn, inode, NULL, NULL, new_nid);
@@ -412,9 +420,9 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
if (len > F2FS_NAME_LEN)
return -ERANGE;
- base_addr = read_all_xattrs(inode, ipage);
- if (!base_addr)
- return -ENOMEM;
+ error = read_all_xattrs(inode, ipage, &base_addr);
+ if (error)
+ return error;
entry = __find_xattr(base_addr, index, len, name);
if (IS_XATTR_LAST_ENTRY(entry)) {
@@ -448,9 +456,9 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
int error = 0;
size_t rest = buffer_size;
- base_addr = read_all_xattrs(inode, NULL);
- if (!base_addr)
- return -ENOMEM;
+ error = read_all_xattrs(inode, NULL, &base_addr);
+ if (error)
+ return error;
list_for_each_xattr(entry, base_addr) {
const struct xattr_handler *handler =
@@ -481,13 +489,12 @@ static int __f2fs_setxattr(struct inode *inode, int index,
const char *name, const void *value, size_t size,
struct page *ipage, int flags)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_xattr_entry *here, *last;
void *base_addr;
int found, newsize;
size_t len;
__u32 new_hsize;
- int error = -ENOMEM;
+ int error = 0;
if (name == NULL)
return -EINVAL;
@@ -503,9 +510,9 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (size > MAX_VALUE_LEN(inode))
return -E2BIG;
- base_addr = read_all_xattrs(inode, ipage);
- if (!base_addr)
- goto exit;
+ error = read_all_xattrs(inode, ipage, &base_addr);
+ if (error)
+ return error;
/* find entry with wanted name. */
here = __find_xattr(base_addr, index, len, name);
@@ -538,7 +545,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
free = free + ENTRY_SIZE(here);
if (unlikely(free < newsize)) {
- error = -ENOSPC;
+ error = -E2BIG;
goto exit;
}
}
@@ -566,7 +573,6 @@ static int __f2fs_setxattr(struct inode *inode, int index,
* Before we come here, old entry is removed.
* We just write new entry.
*/
- memset(last, 0, newsize);
last->e_name_index = index;
last->e_name_len = len;
memcpy(last->e_name, name, len);
@@ -580,19 +586,17 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (error)
goto exit;
- if (is_inode_flag_set(fi, FI_ACL_MODE)) {
- inode->i_mode = fi->i_acl_mode;
- inode->i_ctime = CURRENT_TIME;
- clear_inode_flag(fi, FI_ACL_MODE);
+ if (is_inode_flag_set(inode, FI_ACL_MODE)) {
+ inode->i_mode = F2FS_I(inode)->i_acl_mode;
+ inode->i_ctime = current_time(inode);
+ clear_inode_flag(inode, FI_ACL_MODE);
}
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode);
-
- if (ipage)
- update_inode(inode, ipage);
- else
- update_inode_page(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ if (!error && S_ISDIR(inode->i_mode))
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
exit:
kzfree(base_addr);
return error;
@@ -609,7 +613,7 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name,
if (ipage)
return __f2fs_setxattr(inode, index, name, value,
size, ipage, flags);
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
/* protect xattr_ver */
@@ -618,5 +622,6 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name,
up_write(&F2FS_I(inode)->i_sem);
f2fs_unlock_op(sbi);
+ f2fs_update_time(sbi, REQ_TIME);
return err;
}
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index 71a7100d5492..d2fd0387a3c7 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -126,7 +126,8 @@ extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t);
#define f2fs_xattr_handlers NULL
static inline int f2fs_setxattr(struct inode *inode, int index,
- const char *name, const void *value, size_t size, int flags)
+ const char *name, const void *value, size_t size,
+ struct page *page, int flags)
{
return -EOPNOTSUPP;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index f32450c3e72c..adbe44dda88f 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -227,6 +227,7 @@ static struct mount *alloc_vfsmnt(const char *name)
mnt->mnt_count = 1;
mnt->mnt_writers = 0;
#endif
+ mnt->mnt.data = NULL;
INIT_HLIST_NODE(&mnt->mnt_hash);
INIT_LIST_HEAD(&mnt->mnt_child);
@@ -976,7 +977,6 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
if (!mnt)
return ERR_PTR(-ENOMEM);
- mnt->mnt.data = NULL;
if (type->alloc_mnt_data) {
mnt->mnt.data = type->alloc_mnt_data();
if (!mnt->mnt.data) {
@@ -990,7 +990,6 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
root = mount_fs(type, flags, name, &mnt->mnt, data);
if (IS_ERR(root)) {
- kfree(mnt->mnt.data);
mnt_free_id(mnt);
free_vfsmnt(mnt);
return ERR_CAST(root);
@@ -1094,7 +1093,6 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
return mnt;
out_free:
- kfree(mnt->mnt.data);
mnt_free_id(mnt);
free_vfsmnt(mnt);
return ERR_PTR(err);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 9dea85f7f918..578350fd96e1 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -243,7 +243,6 @@ int nfs_iocounter_wait(struct nfs_io_counter *c);
extern const struct nfs_pageio_ops nfs_pgio_rw_ops;
struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
void nfs_pgio_header_free(struct nfs_pgio_header *);
-void nfs_pgio_data_destroy(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 452a011ba0d8..8ebfdd00044b 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -528,16 +528,6 @@ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
}
EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
-/*
- * nfs_pgio_header_free - Free a read or write header
- * @hdr: The header to free
- */
-void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
-{
- hdr->rw_ops->rw_free_header(hdr);
-}
-EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
-
/**
* nfs_pgio_data_destroy - make @hdr suitable for reuse
*
@@ -546,14 +536,24 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
*
* @hdr: A header that has had nfs_generic_pgio called
*/
-void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
+static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
{
if (hdr->args.context)
put_nfs_open_context(hdr->args.context);
if (hdr->page_array.pagevec != hdr->page_array.page_array)
kfree(hdr->page_array.pagevec);
}
-EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy);
+
+/*
+ * nfs_pgio_header_free - Free a read or write header
+ * @hdr: The header to free
+ */
+void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
+{
+ nfs_pgio_data_destroy(hdr);
+ hdr->rw_ops->rw_free_header(hdr);
+}
+EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
/**
* nfs_pgio_rpcsetup - Set up arguments for a pageio call
@@ -671,7 +671,6 @@ static int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
u32 midx;
set_bit(NFS_IOHDR_REDO, &hdr->flags);
- nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
/* TODO: Make sure it's right to clean up all mirrors here
* and not just hdr->pgio_mirror_idx */
@@ -689,7 +688,6 @@ static int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
static void nfs_pgio_release(void *calldata)
{
struct nfs_pgio_header *hdr = calldata;
- nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 3cae0726c1b1..7af7bedd7c02 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1943,7 +1943,6 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
nfs_pageio_reset_write_mds(desc);
mirror->pg_recoalesce = 1;
}
- nfs_pgio_data_destroy(hdr);
hdr->release(hdr);
}
@@ -2059,7 +2058,6 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
nfs_pageio_reset_read_mds(desc);
mirror->pg_recoalesce = 1;
}
- nfs_pgio_data_destroy(hdr);
hdr->release(hdr);
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index c7f1ce41442a..9e5a6842346e 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1145,9 +1145,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
{
- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
-
- lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
+ lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
list_del_init(&stp->st_locks);
nfs4_unhash_stid(&stp->st_stid);
@@ -1156,12 +1154,12 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
{
- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
+ struct nfs4_client *clp = stp->st_stid.sc_client;
bool unhashed;
- spin_lock(&oo->oo_owner.so_client->cl_lock);
+ spin_lock(&clp->cl_lock);
unhashed = unhash_lock_stateid(stp);
- spin_unlock(&oo->oo_owner.so_client->cl_lock);
+ spin_unlock(&clp->cl_lock);
if (unhashed)
nfs4_put_stid(&stp->st_stid);
}
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
index 85a60fb5ff39..e0b74dc77c5f 100644
--- a/fs/sdcardfs/derived_perm.c
+++ b/fs/sdcardfs/derived_perm.c
@@ -176,6 +176,9 @@ void fixup_lower_ownership(struct dentry *dentry, const char *name)
gid_t gid = sbi->options.fs_low_gid;
struct iattr newattrs;
+ if (!sbi->options.gid_derivation)
+ return;
+
info = SDCARDFS_I(d_inode(dentry));
info_d = info->data;
perm = info_d->perm;
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 103dc45a131f..85eb8ebb5372 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -34,10 +34,14 @@ const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
if (!cred)
return NULL;
- if (data->under_obb)
- uid = AID_MEDIA_OBB;
- else
- uid = multiuser_get_uid(data->userid, sbi->options.fs_low_uid);
+ if (sbi->options.gid_derivation) {
+ if (data->under_obb)
+ uid = AID_MEDIA_OBB;
+ else
+ uid = multiuser_get_uid(data->userid, sbi->options.fs_low_uid);
+ } else {
+ uid = sbi->options.fs_low_uid;
+ }
cred->fsuid = make_kuid(&init_user_ns, uid);
cred->fsgid = make_kgid(&init_user_ns, sbi->options.fs_low_gid);
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 80825b287836..0a2b5167e9a2 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -32,6 +32,7 @@ enum {
Opt_multiuser,
Opt_userid,
Opt_reserved_mb,
+ Opt_gid_derivation,
Opt_err,
};
@@ -43,6 +44,7 @@ static const match_table_t sdcardfs_tokens = {
{Opt_mask, "mask=%u"},
{Opt_userid, "userid=%d"},
{Opt_multiuser, "multiuser"},
+ {Opt_gid_derivation, "derive_gid"},
{Opt_reserved_mb, "reserved_mb=%u"},
{Opt_err, NULL}
};
@@ -64,6 +66,8 @@ static int parse_options(struct super_block *sb, char *options, int silent,
vfsopts->gid = 0;
/* by default, 0MB is reserved */
opts->reserved_mb = 0;
+ /* by default, gid derivation is off */
+ opts->gid_derivation = false;
*debug = 0;
@@ -115,6 +119,9 @@ static int parse_options(struct super_block *sb, char *options, int silent,
return 0;
opts->reserved_mb = option;
break;
+ case Opt_gid_derivation:
+ opts->gid_derivation = true;
+ break;
/* unknown option */
default:
if (!silent)
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index b8624fd8b817..88b92b2f1872 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -219,6 +219,7 @@ struct sdcardfs_mount_options {
gid_t fs_low_gid;
userid_t fs_user_id;
bool multiuser;
+ bool gid_derivation;
unsigned int reserved_mb;
};
diff --git a/fs/sdcardfs/super.c b/fs/sdcardfs/super.c
index 7f4539b4b249..b89947d878e3 100644
--- a/fs/sdcardfs/super.c
+++ b/fs/sdcardfs/super.c
@@ -302,6 +302,8 @@ static int sdcardfs_show_options(struct vfsmount *mnt, struct seq_file *m,
seq_printf(m, ",mask=%u", vfsopts->mask);
if (opts->fs_user_id)
seq_printf(m, ",userid=%u", opts->fs_user_id);
+ if (opts->gid_derivation)
+ seq_puts(m, ",derive_gid");
if (opts->reserved_mb != 0)
seq_printf(m, ",reserved=%uMB", opts->reserved_mb);
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index ec0e239a0fa9..201aae0b2662 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -369,7 +369,14 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
#endif /* DEBUG */
#ifdef CONFIG_XFS_RT
-#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
+
+/*
+ * make sure we ignore the inode flag if the filesystem doesn't have a
+ * configured realtime device.
+ */
+#define XFS_IS_REALTIME_INODE(ip) \
+ (((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) && \
+ (ip)->i_mount->m_rtdev_targp)
#else
#define XFS_IS_REALTIME_INODE(ip) (0)
#endif
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index fc824e2828f3..5d2add1a6c96 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -48,7 +48,11 @@
#define parent_node(node) ((void)(node),0)
#endif
#ifndef cpumask_of_node
-#define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #ifdef CONFIG_NEED_MULTIPLE_NODES
+ #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
+ #else
+ #define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #endif
#endif
#ifndef pcibus_to_node
#define pcibus_to_node(bus) ((void)(bus), -1)
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 5daa9e78584c..a998cf205cdc 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -119,7 +119,15 @@ struct cpufreq_policy {
bool fast_switch_possible;
bool fast_switch_enabled;
- /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
+ /*
+ * Preferred average time interval between consecutive invocations of
+ * the driver to set the frequency for this policy. To be set by the
+ * scaling driver (0, which is the default, means no preference).
+ */
+ unsigned int up_transition_delay_us;
+ unsigned int down_transition_delay_us;
+
+ /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
int cached_resolved_idx;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 01448e01b40d..c066f6b56e58 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -229,6 +229,7 @@ struct dentry_operations {
#define DCACHE_MAY_FREE 0x00800000
#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
+#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */
#define DCACHE_OP_REAL 0x08000000
extern seqlock_t rename_lock;
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 3d6e6ce44c5c..3e5972ef5019 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -21,7 +21,7 @@
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
-#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
+#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
@@ -51,10 +51,18 @@
#define MAX_ACTIVE_DATA_LOGS 8
#define VERSION_LEN 256
+#define MAX_VOLUME_NAME 512
+#define MAX_PATH_LEN 64
+#define MAX_DEVICES 8
/*
* For superblock
*/
+struct f2fs_device {
+ __u8 path[MAX_PATH_LEN];
+ __le32 total_segments;
+} __packed;
+
struct f2fs_super_block {
__le32 magic; /* Magic Number */
__le16 major_ver; /* Major Version */
@@ -84,7 +92,7 @@ struct f2fs_super_block {
__le32 node_ino; /* node inode number */
__le32 meta_ino; /* meta inode number */
__u8 uuid[16]; /* 128-bit uuid for volume */
- __le16 volume_name[512]; /* volume name */
+ __le16 volume_name[MAX_VOLUME_NAME]; /* volume name */
__le32 extension_count; /* # of extensions below */
__u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
__le32 cp_payload;
@@ -93,12 +101,14 @@ struct f2fs_super_block {
__le32 feature; /* defined features */
__u8 encryption_level; /* versioning level for encryption */
__u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
- __u8 reserved[871]; /* valid reserved region */
+ struct f2fs_device devs[MAX_DEVICES]; /* device list */
+ __u8 reserved[327]; /* valid reserved region */
} __packed;
/*
* For checkpoint
*/
+#define CP_CRC_RECOVERY_FLAG 0x00000040
#define CP_FASTBOOT_FLAG 0x00000020
#define CP_FSCK_FLAG 0x00000010
#define CP_ERROR_FLAG 0x00000008
@@ -169,12 +179,12 @@ struct f2fs_extent {
#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
-#define ADDRS_PER_INODE(fi) addrs_per_inode(fi)
+#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
-#define ADDRS_PER_PAGE(page, fi) \
- (IS_INODE(page) ? ADDRS_PER_INODE(fi) : ADDRS_PER_BLOCK)
+#define ADDRS_PER_PAGE(page, inode) \
+ (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK)
#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)
@@ -261,7 +271,7 @@ struct f2fs_node {
/*
* For NAT entries
*/
-#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry))
+#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
@@ -281,7 +291,7 @@ struct f2fs_nat_block {
* Not allow to change this.
*/
#define SIT_VBLOCK_MAP_SIZE 64
-#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry))
+#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
/*
* F2FS uses 4 bytes to represent block address. As a result, supported size of
@@ -350,7 +360,7 @@ struct f2fs_summary {
struct summary_footer {
unsigned char entry_type; /* SUM_TYPE_XXX */
- __u32 check_sum; /* summary checksum */
+ __le32 check_sum; /* summary checksum */
} __packed;
#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
@@ -363,6 +373,12 @@ struct summary_footer {
sizeof(struct sit_journal_entry))
#define SIT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\
sizeof(struct sit_journal_entry))
+
+/* Reserved area should make size of f2fs_extra_info equals to
+ * that of nat_journal and sit_journal.
+ */
+#define EXTRA_INFO_RESERVED (SUM_JOURNAL_SIZE - 2 - 8)
+
/*
* frequently updated NAT/SIT entries can be stored in the spare area in
* summary blocks
@@ -392,18 +408,28 @@ struct sit_journal {
__u8 reserved[SIT_JOURNAL_RESERVED];
} __packed;
-/* 4KB-sized summary block structure */
-struct f2fs_summary_block {
- struct f2fs_summary entries[ENTRIES_IN_SUM];
+struct f2fs_extra_info {
+ __le64 kbytes_written;
+ __u8 reserved[EXTRA_INFO_RESERVED];
+} __packed;
+
+struct f2fs_journal {
union {
__le16 n_nats;
__le16 n_sits;
};
- /* spare area is used by NAT or SIT journals */
+ /* spare area is used by NAT or SIT journals or extra info */
union {
struct nat_journal nat_j;
struct sit_journal sit_j;
+ struct f2fs_extra_info info;
};
+} __packed;
+
+/* 4KB-sized summary block structure */
+struct f2fs_summary_block {
+ struct f2fs_summary entries[ENTRIES_IN_SUM];
+ struct f2fs_journal journal;
struct summary_footer footer;
} __packed;
@@ -497,4 +523,6 @@ enum {
F2FS_FT_MAX
};
+#define S_SHIFT 12
+
#endif /* _LINUX_F2FS_FS_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index df1171bada01..c6de40668e08 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -52,6 +52,8 @@ struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
+struct fscrypt_info;
+struct fscrypt_operations;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -686,6 +688,9 @@ struct inode {
struct hlist_head i_fsnotify_marks;
#endif
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ struct fscrypt_info *i_crypt_info;
+#endif
void *i_private; /* fs or device private pointer */
};
@@ -1350,6 +1355,8 @@ struct super_block {
#endif
const struct xattr_handler **s_xattr;
+ const struct fscrypt_operations *s_cop;
+
struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
new file mode 100644
index 000000000000..ff8b11b26f31
--- /dev/null
+++ b/include/linux/fscrypto.h
@@ -0,0 +1,411 @@
+/*
+ * General per-file encryption definition
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * Written by Michael Halcrow, 2015.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+
+#ifndef _LINUX_FSCRYPTO_H
+#define _LINUX_FSCRYPTO_H
+
+#include <linux/key.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/bio.h>
+#include <linux/dcache.h>
+#include <crypto/skcipher.h>
+#include <uapi/linux/fs.h>
+
+#define FS_KEY_DERIVATION_NONCE_SIZE 16
+#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
+
+#define FS_POLICY_FLAGS_PAD_4 0x00
+#define FS_POLICY_FLAGS_PAD_8 0x01
+#define FS_POLICY_FLAGS_PAD_16 0x02
+#define FS_POLICY_FLAGS_PAD_32 0x03
+#define FS_POLICY_FLAGS_PAD_MASK 0x03
+#define FS_POLICY_FLAGS_VALID 0x03
+
+/* Encryption algorithms */
+#define FS_ENCRYPTION_MODE_INVALID 0
+#define FS_ENCRYPTION_MODE_AES_256_XTS 1
+#define FS_ENCRYPTION_MODE_AES_256_GCM 2
+#define FS_ENCRYPTION_MODE_AES_256_CBC 3
+#define FS_ENCRYPTION_MODE_AES_256_CTS 4
+
+/**
+ * Encryption context for inode
+ *
+ * Protector format:
+ * 1 byte: Protector format (1 = this version)
+ * 1 byte: File contents encryption mode
+ * 1 byte: File names encryption mode
+ * 1 byte: Flags
+ * 8 bytes: Master Key descriptor
+ * 16 bytes: Encryption Key derivation nonce
+ */
+struct fscrypt_context {
+ u8 format;
+ u8 contents_encryption_mode;
+ u8 filenames_encryption_mode;
+ u8 flags;
+ u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+ u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
+} __packed;
+
+/* Encryption parameters */
+#define FS_XTS_TWEAK_SIZE 16
+#define FS_AES_128_ECB_KEY_SIZE 16
+#define FS_AES_256_GCM_KEY_SIZE 32
+#define FS_AES_256_CBC_KEY_SIZE 32
+#define FS_AES_256_CTS_KEY_SIZE 32
+#define FS_AES_256_XTS_KEY_SIZE 64
+#define FS_MAX_KEY_SIZE 64
+
+#define FS_KEY_DESC_PREFIX "fscrypt:"
+#define FS_KEY_DESC_PREFIX_SIZE 8
+
+/* This is passed in from userspace into the kernel keyring */
+struct fscrypt_key {
+ u32 mode;
+ u8 raw[FS_MAX_KEY_SIZE];
+ u32 size;
+} __packed;
+
+struct fscrypt_info {
+ u8 ci_data_mode;
+ u8 ci_filename_mode;
+ u8 ci_flags;
+ struct crypto_skcipher *ci_ctfm;
+ struct key *ci_keyring_key;
+ u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+};
+
+#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
+#define FS_WRITE_PATH_FL 0x00000002
+
+struct fscrypt_ctx {
+ union {
+ struct {
+ struct page *bounce_page; /* Ciphertext page */
+ struct page *control_page; /* Original page */
+ } w;
+ struct {
+ struct bio *bio;
+ struct work_struct work;
+ } r;
+ struct list_head free_list; /* Free list */
+ };
+ u8 flags; /* Flags */
+ u8 mode; /* Encryption mode for tfm */
+};
+
+struct fscrypt_completion_result {
+ struct completion completion;
+ int res;
+};
+
+#define DECLARE_FS_COMPLETION_RESULT(ecr) \
+ struct fscrypt_completion_result ecr = { \
+ COMPLETION_INITIALIZER((ecr).completion), 0 }
+
+#define FS_FNAME_NUM_SCATTER_ENTRIES 4
+#define FS_CRYPTO_BLOCK_SIZE 16
+#define FS_FNAME_CRYPTO_DIGEST_SIZE 32
+
+/**
+ * For encrypted symlinks, the ciphertext length is stored at the beginning
+ * of the string in little-endian format.
+ */
+struct fscrypt_symlink_data {
+ __le16 len;
+ char encrypted_path[1];
+} __packed;
+
+/**
+ * This function is used to calculate the disk space required to
+ * store a filename of length l in encrypted symlink format.
+ */
+static inline u32 fscrypt_symlink_data_len(u32 l)
+{
+ if (l < FS_CRYPTO_BLOCK_SIZE)
+ l = FS_CRYPTO_BLOCK_SIZE;
+ return (l + sizeof(struct fscrypt_symlink_data) - 1);
+}
+
+struct fscrypt_str {
+ unsigned char *name;
+ u32 len;
+};
+
+struct fscrypt_name {
+ const struct qstr *usr_fname;
+ struct fscrypt_str disk_name;
+ u32 hash;
+ u32 minor_hash;
+ struct fscrypt_str crypto_buf;
+};
+
+#define FSTR_INIT(n, l) { .name = n, .len = l }
+#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
+#define fname_name(p) ((p)->disk_name.name)
+#define fname_len(p) ((p)->disk_name.len)
+
+/*
+ * crypto opertions for filesystems
+ */
+struct fscrypt_operations {
+ int (*get_context)(struct inode *, void *, size_t);
+ int (*key_prefix)(struct inode *, u8 **);
+ int (*prepare_context)(struct inode *);
+ int (*set_context)(struct inode *, const void *, size_t, void *);
+ int (*dummy_context)(struct inode *);
+ bool (*is_encrypted)(struct inode *);
+ bool (*empty_dir)(struct inode *);
+ unsigned (*max_namelen)(struct inode *);
+};
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+ if (inode->i_sb->s_cop->dummy_context &&
+ inode->i_sb->s_cop->dummy_context(inode))
+ return true;
+ return false;
+}
+
+static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
+{
+ return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
+}
+
+static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
+{
+ return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
+}
+
+static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
+{
+ if (str->len == 1 && str->name[0] == '.')
+ return true;
+
+ if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
+ return true;
+
+ return false;
+}
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
+#else
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+#endif
+}
+
+static inline int fscrypt_has_encryption_key(struct inode *inode)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ return (inode->i_crypt_info != NULL);
+#else
+ return 0;
+#endif
+}
+
+static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
+ spin_unlock(&dentry->d_lock);
+#endif
+}
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+extern const struct dentry_operations fscrypt_d_ops;
+#endif
+
+static inline void fscrypt_set_d_op(struct dentry *dentry)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ d_set_d_op(dentry, &fscrypt_d_ops);
+#endif
+}
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+/* crypto.c */
+extern struct kmem_cache *fscrypt_info_cachep;
+int fscrypt_initialize(void);
+
+extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t);
+extern void fscrypt_release_ctx(struct fscrypt_ctx *);
+extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t);
+extern int fscrypt_decrypt_page(struct page *);
+extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
+extern void fscrypt_pullback_bio_page(struct page **, bool);
+extern void fscrypt_restore_control_page(struct page *);
+extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
+ unsigned int);
+/* policy.c */
+extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *);
+extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
+extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
+extern int fscrypt_inherit_context(struct inode *, struct inode *,
+ void *, bool);
+/* keyinfo.c */
+extern int get_crypt_info(struct inode *);
+extern int fscrypt_get_encryption_info(struct inode *);
+extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
+
+/* fname.c */
+extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
+ int lookup, struct fscrypt_name *);
+extern void fscrypt_free_filename(struct fscrypt_name *);
+extern u32 fscrypt_fname_encrypted_size(struct inode *, u32);
+extern int fscrypt_fname_alloc_buffer(struct inode *, u32,
+ struct fscrypt_str *);
+extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
+extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
+ const struct fscrypt_str *, struct fscrypt_str *);
+extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
+ struct fscrypt_str *);
+#endif
+
+/* crypto.c */
+static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i,
+ gfp_t f)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
+{
+ return;
+}
+
+static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
+ struct page *p, gfp_t f)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int fscrypt_notsupp_decrypt_page(struct page *p)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_notsupp_decrypt_bio_pages(struct fscrypt_ctx *c,
+ struct bio *b)
+{
+ return;
+}
+
+static inline void fscrypt_notsupp_pullback_bio_page(struct page **p, bool b)
+{
+ return;
+}
+
+static inline void fscrypt_notsupp_restore_control_page(struct page *p)
+{
+ return;
+}
+
+static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
+ sector_t s, unsigned int f)
+{
+ return -EOPNOTSUPP;
+}
+
+/* policy.c */
+static inline int fscrypt_notsupp_process_policy(struct file *f,
+ const struct fscrypt_policy *p)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_notsupp_get_policy(struct inode *i,
+ struct fscrypt_policy *p)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_notsupp_has_permitted_context(struct inode *p,
+ struct inode *i)
+{
+ return 0;
+}
+
+static inline int fscrypt_notsupp_inherit_context(struct inode *p,
+ struct inode *i, void *v, bool b)
+{
+ return -EOPNOTSUPP;
+}
+
+/* keyinfo.c */
+static inline int fscrypt_notsupp_get_encryption_info(struct inode *i)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_notsupp_put_encryption_info(struct inode *i,
+ struct fscrypt_info *f)
+{
+ return;
+}
+
+ /* fname.c */
+static inline int fscrypt_notsupp_setup_filename(struct inode *dir,
+ const struct qstr *iname,
+ int lookup, struct fscrypt_name *fname)
+{
+ if (dir->i_sb->s_cop->is_encrypted(dir))
+ return -EOPNOTSUPP;
+
+ memset(fname, 0, sizeof(struct fscrypt_name));
+ fname->usr_fname = iname;
+ fname->disk_name.name = (unsigned char *)iname->name;
+ fname->disk_name.len = iname->len;
+ return 0;
+}
+
+static inline void fscrypt_notsupp_free_filename(struct fscrypt_name *fname)
+{
+ return;
+}
+
+static inline u32 fscrypt_notsupp_fname_encrypted_size(struct inode *i, u32 s)
+{
+ /* never happens */
+ WARN_ON(1);
+ return 0;
+}
+
+static inline int fscrypt_notsupp_fname_alloc_buffer(struct inode *inode,
+ u32 ilen, struct fscrypt_str *crypto_str)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_notsupp_fname_free_buffer(struct fscrypt_str *c)
+{
+ return;
+}
+
+static inline int fscrypt_notsupp_fname_disk_to_usr(struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_notsupp_fname_usr_to_disk(struct inode *inode,
+ const struct qstr *iname,
+ struct fscrypt_str *oname)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* _LINUX_FSCRYPTO_H */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 782d4e814e21..4bc4b1b13193 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -310,6 +310,7 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
{
struct ppa_addr l;
+ l.ppa = 0;
/*
* (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
*/
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 33316a1ae98f..fe0a5de1eda5 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -12,6 +12,7 @@
#include <linux/cpumask.h>
#include <linux/uprobes.h>
#include <linux/page-flags-layout.h>
+#include <linux/workqueue.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -531,6 +532,7 @@ struct mm_struct {
int app_setting;
#endif
+ struct work_struct async_put_work;
};
static inline void mm_init_cpumask(struct mm_struct *mm)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 37f05cb1dfd6..1af616138d1d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -573,6 +573,7 @@
#define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095
#define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096
#define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097
+#define PCI_DEVICE_ID_AMD_CS5536_DEV_IDE 0x2092
#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081
#define PCI_DEVICE_ID_AMD_LX_AES 0x2082
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2716faadc618..7299540fe1ec 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2969,6 +2969,11 @@ static inline void mmdrop(struct mm_struct * mm)
/* mmput gets rid of the mappings and all user-space */
extern int mmput(struct mm_struct *);
+/* same as above but performs the slow path from the async kontext. Can
+ * be called from the atomic context as well
+ */
+extern void mmput_async(struct mm_struct *);
+
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
/*
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index c28dd523f96e..d43837f2ce3a 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -12,6 +12,7 @@ extern int tty_prepare_flip_string(struct tty_port *port,
unsigned char **chars, size_t size);
extern void tty_flip_buffer_push(struct tty_port *port);
void tty_schedule_flip(struct tty_port *port);
+int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
static inline int tty_insert_flip_char(struct tty_port *port,
unsigned char ch, char flag)
@@ -26,7 +27,7 @@ static inline int tty_insert_flip_char(struct tty_port *port,
*char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
- return tty_insert_flip_string_flags(port, &ch, &flag, 1);
+ return __tty_insert_flip_char(port, ch, flag);
}
static inline int tty_insert_flip_string(struct tty_port *port,
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 2e04fa5a5b58..4e4aee64f559 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -311,7 +311,7 @@ enum {
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
- __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
+ __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index ac42bbb37b2d..c26a6e4dc306 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -1,14 +1,9 @@
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
-#include <linux/percpu_counter.h>
-
struct netns_frags {
- /* The percpu_counter "mem" need to be cacheline aligned.
- * mem.count must not share cacheline with other writers
- */
- struct percpu_counter mem ____cacheline_aligned_in_smp;
-
+ /* Keep atomic mem on separate cachelines in structs that include it */
+ atomic_t mem ____cacheline_aligned_in_smp;
/* sysctls */
int timeout;
int high_thresh;
@@ -108,15 +103,10 @@ struct inet_frags {
int inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);
-static inline int inet_frags_init_net(struct netns_frags *nf)
-{
- return percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
-}
-static inline void inet_frags_uninit_net(struct netns_frags *nf)
+static inline void inet_frags_init_net(struct netns_frags *nf)
{
- percpu_counter_destroy(&nf->mem);
+ atomic_set(&nf->mem, 0);
}
-
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
@@ -140,37 +130,24 @@ static inline bool inet_frag_evicting(struct inet_frag_queue *q)
/* Memory Tracking Functions. */
-/* The default percpu_counter batch size is not big enough to scale to
- * fragmentation mem acct sizes.
- * The mem size of a 64K fragment is approx:
- * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
- */
-static unsigned int frag_percpu_counter_batch = 130000;
-
static inline int frag_mem_limit(struct netns_frags *nf)
{
- return percpu_counter_read(&nf->mem);
+ return atomic_read(&nf->mem);
}
static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
{
- __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
+ atomic_sub(i, &nf->mem);
}
static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
{
- __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
+ atomic_add(i, &nf->mem);
}
-static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
+static inline int sum_frag_mem_limit(struct netns_frags *nf)
{
- unsigned int res;
-
- local_bh_disable();
- res = percpu_counter_sum_positive(&nf->mem);
- local_bh_enable();
-
- return res;
+ return atomic_read(&nf->mem);
}
/* RFC 3168 support :
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index fb961a576abe..fa5e703a14ed 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -68,6 +68,7 @@ struct fib6_node {
__u16 fn_flags;
int fn_sernum;
struct rt6_info *rr_ptr;
+ struct rcu_head rcu;
};
#ifndef CONFIG_IPV6_SUBTREES
@@ -102,7 +103,7 @@ struct rt6_info {
* the same cache line.
*/
struct fib6_table *rt6i_table;
- struct fib6_node *rt6i_node;
+ struct fib6_node __rcu *rt6i_node;
struct in6_addr rt6i_gateway;
@@ -165,13 +166,40 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
rt0->rt6i_flags |= RTF_EXPIRES;
}
+/* Function to safely get fn->sernum for passed in rt
+ * and store result in passed in cookie.
+ * Return true if we can get cookie safely
+ * Return false if not
+ */
+static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
+ u32 *cookie)
+{
+ struct fib6_node *fn;
+ bool status = false;
+
+ rcu_read_lock();
+ fn = rcu_dereference(rt->rt6i_node);
+
+ if (fn) {
+ *cookie = fn->fn_sernum;
+ status = true;
+ }
+
+ rcu_read_unlock();
+ return status;
+}
+
static inline u32 rt6_get_cookie(const struct rt6_info *rt)
{
+ u32 cookie = 0;
+
if (rt->rt6i_flags & RTF_PCPU ||
(unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
rt = (struct rt6_info *)(rt->dst.from);
- return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+ rt6_get_cookie_safe(rt, &cookie);
+
+ return cookie;
}
static inline void ip6_rt_put(struct rt6_info *rt)
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 21b30bf7c74c..6078ef2e24de 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -9349,6 +9349,73 @@ struct asm_aptx_dec_fmt_blk_v2 {
*/
} __packed;
+/* Q6Core Specific */
+#define AVCS_CMD_GET_FWK_VERSION (0x0001292C)
+#define AVCS_CMDRSP_GET_FWK_VERSION (0x0001292D)
+
+#define AVCS_SERVICE_ID_ALL (0xFFFFFFFF)
+
+struct avcs_get_fwk_version {
+ /*
+ * Indicates the major version of the AVS build.
+ * This value is incremented on chipset family boundaries.
+ */
+ uint32_t build_major_version;
+
+ /*
+ * Minor version of the AVS build.
+ * This value represents the mainline to which the AVS build belongs.
+ */
+ uint32_t build_minor_version;
+
+ /* Indicates the AVS branch version to which the image belongs. */
+ uint32_t build_branch_version;
+
+ /* Indicates the AVS sub-branch or customer product line information. */
+ uint32_t build_subbranch_version;
+
+ /* Number of supported AVS services in the current build. */
+ uint32_t num_services;
+};
+
+struct avs_svc_api_info {
+ /*
+ * APRV2 service IDs for the individual static services.
+ *
+ * @values
+ * - APRV2_IDS_SERVICE_ID_ADSP_CORE_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_AFE_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_ASM_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_ADM_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_MVM_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_CVS_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_CVP_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_LSM_V
+ */
+ uint32_t service_id;
+
+ /*
+ * Indicates the API version of the service.
+ *
+ * Each new API update that warrants a change on the HLOS side triggers
+ * an increment in the version.
+ */
+ uint32_t api_version;
+
+ /*
+ * Indicates the API increments on a sub-branch (not on the mainline).
+ *
+ * API branch version numbers can increment independently on different
+ * sub-branches.
+ */
+ uint32_t api_branch_version;
+};
+
+struct avcs_fwk_ver_info {
+ struct avcs_get_fwk_version avcs_build;
+ struct avs_svc_api_info services[0];
+};
+
/* LSM Specific */
#define VW_FEAT_DIM (39)
diff --git a/include/sound/q6core.h b/include/sound/q6core.h
index 4f55880d410f..0ce171973cb5 100644
--- a/include/sound/q6core.h
+++ b/include/sound/q6core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, 2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,7 @@
#ifndef __Q6CORE_H__
#define __Q6CORE_H__
#include <linux/qdsp6v2/apr.h>
+#include <sound/apr_audio-v2.h>
@@ -23,6 +24,9 @@ bool q6core_is_adsp_ready(void);
int q6core_add_remove_pool_pages(phys_addr_t buf_add, uint32_t bufsz,
uint32_t mempool_id, bool add_pages);
+int q6core_get_avcs_fwk_ver_info(uint32_t service_id,
+ struct avcs_fwk_ver_info *ver_info);
+
#define ADSP_CMD_SET_DTS_EAGLE_DATA_ID 0x00012919
#define DTS_EAGLE_LICENSE_ID 0x00028346
struct adsp_dts_eagle {
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 00b4a6308249..90d6ad49a9c5 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -52,6 +52,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
{ META_FLUSH, "META_FLUSH" }, \
{ INMEM, "INMEM" }, \
{ INMEM_DROP, "INMEM_DROP" }, \
+ { INMEM_REVOKE, "INMEM_REVOKE" }, \
{ IPU, "IN-PLACE" }, \
{ OPU, "OUT-OF-PLACE" })
@@ -693,28 +694,32 @@ TRACE_EVENT(f2fs_direct_IO_exit,
__entry->ret)
);
-TRACE_EVENT(f2fs_reserve_new_block,
+TRACE_EVENT(f2fs_reserve_new_blocks,
- TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node),
+ TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node,
+ blkcnt_t count),
- TP_ARGS(inode, nid, ofs_in_node),
+ TP_ARGS(inode, nid, ofs_in_node, count),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(nid_t, nid)
__field(unsigned int, ofs_in_node)
+ __field(blkcnt_t, count)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->nid = nid;
__entry->ofs_in_node = ofs_in_node;
+ __entry->count = count;
),
- TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u",
+ TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %llu",
show_dev(__entry),
(unsigned int)__entry->nid,
- __entry->ofs_in_node)
+ __entry->ofs_in_node,
+ (unsigned long long)__entry->count)
);
DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
@@ -727,7 +732,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
__field(dev_t, dev)
__field(ino_t, ino)
__field(pgoff_t, index)
- __field(block_t, blkaddr)
+ __field(block_t, old_blkaddr)
+ __field(block_t, new_blkaddr)
__field(int, rw)
__field(int, type)
),
@@ -736,16 +742,18 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
__entry->dev = page->mapping->host->i_sb->s_dev;
__entry->ino = page->mapping->host->i_ino;
__entry->index = page->index;
- __entry->blkaddr = fio->blk_addr;
+ __entry->old_blkaddr = fio->old_blkaddr;
+ __entry->new_blkaddr = fio->new_blkaddr;
__entry->rw = fio->rw;
__entry->type = fio->type;
),
TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
- "blkaddr = 0x%llx, rw = %s%s, type = %s",
+ "oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%s, type = %s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
- (unsigned long long)__entry->blkaddr,
+ (unsigned long long)__entry->old_blkaddr,
+ (unsigned long long)__entry->new_blkaddr,
show_bio_type(__entry->rw),
show_block_type(__entry->type))
);
@@ -1102,6 +1110,27 @@ TRACE_EVENT(f2fs_issue_discard,
(unsigned long long)__entry->blklen)
);
+TRACE_EVENT(f2fs_issue_reset_zone,
+
+ TP_PROTO(struct super_block *sb, block_t blkstart),
+
+ TP_ARGS(sb, blkstart),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(block_t, blkstart)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->blkstart = blkstart;
+ ),
+
+ TP_printk("dev = (%d,%d), reset zone at block = 0x%llx",
+ show_dev(__entry),
+ (unsigned long long)__entry->blkstart)
+);
+
TRACE_EVENT(f2fs_issue_flush,
TP_PROTO(struct super_block *sb, unsigned int nobarrier,
@@ -1265,6 +1294,44 @@ TRACE_EVENT(f2fs_destroy_extent_tree,
__entry->node_cnt)
);
+DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes,
+
+ TP_PROTO(struct super_block *sb, int type, s64 count),
+
+ TP_ARGS(sb, type, count),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, type)
+ __field(s64, count)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->type = type;
+ __entry->count = count;
+ ),
+
+ TP_printk("dev = (%d,%d), %s, dirty count = %lld",
+ show_dev(__entry),
+ show_file_type(__entry->type),
+ __entry->count)
+);
+
+DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_enter,
+
+ TP_PROTO(struct super_block *sb, int type, s64 count),
+
+ TP_ARGS(sb, type, count)
+);
+
+DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_exit,
+
+ TP_PROTO(struct super_block *sb, int type, s64 count),
+
+ TP_ARGS(sb, type, count)
+);
+
#endif /* _TRACE_F2FS_H */
/* This part must be outside protection */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index cd0c1a1a9ccf..15048097910f 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -173,6 +173,24 @@ struct inodes_stat_t {
#define FS_IOC32_SETVERSION _IOW('v', 2, int)
/*
+ * File system encryption support
+ */
+/* Policy provided via an ioctl on the topmost directory */
+#define FS_KEY_DESCRIPTOR_SIZE 8
+
+struct fscrypt_policy {
+ __u8 version;
+ __u8 contents_encryption_mode;
+ __u8 filenames_encryption_mode;
+ __u8 flags;
+ __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+} __packed;
+
+#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
+#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy)
+
+/*
* Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
*/
#define FS_SECRM_FL 0x00000001 /* Secure deletion */
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1656a48d5bee..f92ec9a71af3 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1916,6 +1916,7 @@ static struct cftype files[] = {
{
.name = "memory_pressure",
.read_u64 = cpuset_read_u64,
+ .private = FILE_MEMORY_PRESSURE,
},
{
diff --git a/kernel/fork.c b/kernel/fork.c
index 07cd0d68ee02..1d168ba55118 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -178,13 +178,13 @@ static inline void free_thread_stack(unsigned long *stack)
# else
static struct kmem_cache *thread_stack_cache;
-static struct thread_info *alloc_thread_stack_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
int node)
{
return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
}
-static void free_stack(unsigned long *stack)
+static void free_thread_stack(unsigned long *stack)
{
kmem_cache_free(thread_stack_cache, stack);
}
@@ -697,6 +697,26 @@ void __mmdrop(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(__mmdrop);
+static inline void __mmput(struct mm_struct *mm)
+{
+ VM_BUG_ON(atomic_read(&mm->mm_users));
+
+ uprobe_clear_state(mm);
+ exit_aio(mm);
+ ksm_exit(mm);
+ khugepaged_exit(mm); /* must run before exit_mmap */
+ exit_mmap(mm);
+ set_mm_exe_file(mm, NULL);
+ if (!list_empty(&mm->mmlist)) {
+ spin_lock(&mmlist_lock);
+ list_del(&mm->mmlist);
+ spin_unlock(&mmlist_lock);
+ }
+ if (mm->binfmt)
+ module_put(mm->binfmt->module);
+ mmdrop(mm);
+}
+
/*
* Decrement the use count and release all resources for an mm.
*/
@@ -706,26 +726,27 @@ int mmput(struct mm_struct *mm)
might_sleep();
if (atomic_dec_and_test(&mm->mm_users)) {
- uprobe_clear_state(mm);
- exit_aio(mm);
- ksm_exit(mm);
- khugepaged_exit(mm); /* must run before exit_mmap */
- exit_mmap(mm);
- set_mm_exe_file(mm, NULL);
- if (!list_empty(&mm->mmlist)) {
- spin_lock(&mmlist_lock);
- list_del(&mm->mmlist);
- spin_unlock(&mmlist_lock);
- }
- if (mm->binfmt)
- module_put(mm->binfmt->module);
- mmdrop(mm);
+ __mmput(mm);
mm_freed = 1;
}
return mm_freed;
}
EXPORT_SYMBOL_GPL(mmput);
+static void mmput_async_fn(struct work_struct *work)
+{
+ struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
+ __mmput(mm);
+}
+
+void mmput_async(struct mm_struct *mm)
+{
+ if (atomic_dec_and_test(&mm->mm_users)) {
+ INIT_WORK(&mm->async_put_work, mmput_async_fn);
+ schedule_work(&mm->async_put_work);
+ }
+}
+
/**
* set_mm_exe_file - change a reference to the mm's executable file
*
diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
index 7080ae1eb6c1..f850e906564b 100644
--- a/kernel/gcov/base.c
+++ b/kernel/gcov/base.c
@@ -98,6 +98,12 @@ void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters)
}
EXPORT_SYMBOL(__gcov_merge_icall_topn);
+void __gcov_exit(void)
+{
+ /* Unused. */
+}
+EXPORT_SYMBOL(__gcov_exit);
+
/**
* gcov_enable_events - enable event reporting through gcov_event()
*
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index e25e92fb44fa..46a18e72bce6 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -18,7 +18,9 @@
#include <linux/vmalloc.h>
#include "gcov.h"
-#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1
+#if (__GNUC__ >= 7)
+#define GCOV_COUNTERS 9
+#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
#define GCOV_COUNTERS 10
#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
#define GCOV_COUNTERS 9
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 8ef1919d63b2..d580b7d6ee6d 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -776,6 +776,8 @@ static void lock_torture_cleanup(void)
else
lock_torture_print_module_parms(cxt.cur_ops,
"End of test: SUCCESS");
+ kfree(cxt.lwsa);
+ kfree(cxt.lrsa);
torture_cleanup_end();
}
@@ -917,6 +919,8 @@ static int __init lock_torture_init(void)
GFP_KERNEL);
if (reader_tasks == NULL) {
VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
+ kfree(writer_tasks);
+ writer_tasks = NULL;
firsterr = -ENOMEM;
goto unwind;
}
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 99b8d991126f..0befa20ce96e 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -106,6 +106,19 @@ bool osq_lock(struct optimistic_spin_queue *lock)
prev = decode_cpu(old);
node->prev = prev;
+
+ /*
+ * osq_lock() unqueue
+ *
+ * node->prev = prev osq_wait_next()
+ * WMB MB
+ * prev->next = node next->prev = prev // unqueue-C
+ *
+ * Here 'node->prev' and 'next->prev' are the same variable and we need
+ * to ensure these stores happen in-order to avoid corrupting the list.
+ */
+ smp_wmb();
+
WRITE_ONCE(prev->next, node);
/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2dbe599d34d5..0271e8e2c020 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3176,20 +3176,20 @@ static void sched_freq_tick_pelt(int cpu)
#ifdef CONFIG_SCHED_WALT
static void sched_freq_tick_walt(int cpu)
{
- unsigned long cpu_utilization = cpu_util(cpu);
+ unsigned long cpu_utilization = cpu_util_freq(cpu);
unsigned long capacity_curr = capacity_curr_of(cpu);
if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
return sched_freq_tick_pelt(cpu);
/*
- * Add a margin to the WALT utilization.
+ * Add a margin to the WALT utilization to check if we will need to
+ * increase frequency.
* NOTE: WALT tracks a single CPU signal for all the scheduling
* classes, thus this margin is going to be added to the DL class as
* well, which is something we do not do in sched_freq_tick_pelt case.
*/
- cpu_utilization = add_capacity_margin(cpu_utilization);
- if (cpu_utilization <= capacity_curr)
+ if (add_capacity_margin(cpu_utilization) <= capacity_curr)
return;
/*
diff --git a/kernel/sched/cpufreq_sched.c b/kernel/sched/cpufreq_sched.c
index f10d9f7d6d07..6ffb23adbcef 100644
--- a/kernel/sched/cpufreq_sched.c
+++ b/kernel/sched/cpufreq_sched.c
@@ -235,6 +235,18 @@ out:
cpufreq_cpu_put(policy);
}
+#ifdef CONFIG_SCHED_WALT
+static inline unsigned long
+requested_capacity(struct sched_capacity_reqs *scr)
+{
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ return scr->cfs;
+ return scr->cfs + scr->rt;
+}
+#else
+#define requested_capacity(scr) (scr->cfs + scr->rt)
+#endif
+
void update_cpu_capacity_request(int cpu, bool request)
{
unsigned long new_capacity;
@@ -245,7 +257,7 @@ void update_cpu_capacity_request(int cpu, bool request)
scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
- new_capacity = scr->cfs + scr->rt;
+ new_capacity = requested_capacity(scr);
new_capacity = new_capacity * capacity_margin
/ SCHED_CAPACITY_SCALE;
new_capacity += scr->dl;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index e12309c1b07b..b90f7434e13b 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -64,8 +64,9 @@ struct sugov_cpu {
struct update_util_data update_util;
struct sugov_policy *sg_policy;
- unsigned long iowait_boost;
- unsigned long iowait_boost_max;
+ bool iowait_boost_pending;
+ unsigned int iowait_boost;
+ unsigned int iowait_boost_max;
u64 last_update;
/* The fields below are only needed when sharing a policy. */
@@ -224,30 +225,54 @@ static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
unsigned int flags)
{
if (flags & SCHED_CPUFREQ_IOWAIT) {
- sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+ if (sg_cpu->iowait_boost_pending)
+ return;
+
+ sg_cpu->iowait_boost_pending = true;
+
+ if (sg_cpu->iowait_boost) {
+ sg_cpu->iowait_boost <<= 1;
+ if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
+ sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+ } else {
+ sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
+ }
} else if (sg_cpu->iowait_boost) {
s64 delta_ns = time - sg_cpu->last_update;
/* Clear iowait_boost if the CPU apprears to have been idle. */
- if (delta_ns > TICK_NSEC)
+ if (delta_ns > TICK_NSEC) {
sg_cpu->iowait_boost = 0;
+ sg_cpu->iowait_boost_pending = false;
+ }
}
}
static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
unsigned long *max)
{
- unsigned long boost_util = sg_cpu->iowait_boost;
- unsigned long boost_max = sg_cpu->iowait_boost_max;
+ unsigned int boost_util, boost_max;
- if (!boost_util)
+ if (!sg_cpu->iowait_boost)
return;
+ if (sg_cpu->iowait_boost_pending) {
+ sg_cpu->iowait_boost_pending = false;
+ } else {
+ sg_cpu->iowait_boost >>= 1;
+ if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
+ sg_cpu->iowait_boost = 0;
+ return;
+ }
+ }
+
+ boost_util = sg_cpu->iowait_boost;
+ boost_max = sg_cpu->iowait_boost_max;
+
if (*util * boost_max < *max * boost_util) {
*util = boost_util;
*max = boost_max;
}
- sg_cpu->iowait_boost >>= 1;
}
#ifdef CONFIG_NO_HZ_COMMON
@@ -320,6 +345,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
delta_ns = last_freq_update_time - j_sg_cpu->last_update;
if (delta_ns > TICK_NSEC) {
j_sg_cpu->iowait_boost = 0;
+ j_sg_cpu->iowait_boost_pending = false;
continue;
}
if (j_sg_cpu->flags & SCHED_CPUFREQ_DL)
@@ -589,7 +615,6 @@ static int sugov_init(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy;
struct sugov_tunables *tunables;
- unsigned int lat;
int ret = 0;
/* State should be equivalent to EXIT */
@@ -628,12 +653,19 @@ static int sugov_init(struct cpufreq_policy *policy)
goto stop_kthread;
}
- tunables->up_rate_limit_us = LATENCY_MULTIPLIER;
- tunables->down_rate_limit_us = LATENCY_MULTIPLIER;
- lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
- if (lat) {
- tunables->up_rate_limit_us *= lat;
- tunables->down_rate_limit_us *= lat;
+ if (policy->up_transition_delay_us && policy->down_transition_delay_us) {
+ tunables->up_rate_limit_us = policy->up_transition_delay_us;
+ tunables->down_rate_limit_us = policy->down_transition_delay_us;
+ } else {
+ unsigned int lat;
+
+ tunables->up_rate_limit_us = LATENCY_MULTIPLIER;
+ tunables->down_rate_limit_us = LATENCY_MULTIPLIER;
+ lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
+ if (lat) {
+ tunables->up_rate_limit_us *= lat;
+ tunables->down_rate_limit_us *= lat;
+ }
}
policy->governor_data = sg_policy;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index a105e97ab6bf..905d8cb5a6eb 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -18,6 +18,8 @@
#include <linux/slab.h>
+#include "walt.h"
+
struct dl_bandwidth def_dl_bandwidth;
static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 853064319b0d..aa016919eab8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5809,10 +5809,11 @@ static inline void hrtick_update(struct rq *rq)
#endif
#ifdef CONFIG_SMP
+static bool __cpu_overutilized(int cpu, int delta);
static bool cpu_overutilized(int cpu);
unsigned long boosted_cpu_util(int cpu);
#else
-#define boosted_cpu_util(cpu) cpu_util(cpu)
+#define boosted_cpu_util(cpu) cpu_util_freq(cpu)
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_CPU_FREQ_GOV_SCHED)
@@ -6626,10 +6627,8 @@ end:
*/
static int sched_group_energy(struct energy_env *eenv)
{
- struct sched_domain *sd;
- int cpu, total_energy = 0;
struct cpumask visit_cpus;
- struct sched_group *sg;
+ u64 total_energy = 0;
WARN_ON(!eenv->sg_top->sge);
@@ -6637,8 +6636,8 @@ static int sched_group_energy(struct energy_env *eenv)
while (!cpumask_empty(&visit_cpus)) {
struct sched_group *sg_shared_cap = NULL;
-
- cpu = cpumask_first(&visit_cpus);
+ int cpu = cpumask_first(&visit_cpus);
+ struct sched_domain *sd;
/*
* Is the group utilization affected by cpus outside this
@@ -6650,7 +6649,7 @@ static int sched_group_energy(struct energy_env *eenv)
sg_shared_cap = sd->parent->groups;
for_each_domain(cpu, sd) {
- sg = sd->groups;
+ struct sched_group *sg = sd->groups;
/* Has this sched_domain already been visited? */
if (sd->child && group_first_cpu(sg) != cpu)
@@ -6686,11 +6685,9 @@ static int sched_group_energy(struct energy_env *eenv)
idle_idx = group_idle_state(eenv, sg);
group_util = group_norm_util(eenv, sg);
- sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power)
- >> SCHED_CAPACITY_SHIFT;
+ sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power);
sg_idle_energy = ((SCHED_LOAD_SCALE-group_util)
- * sg->sge->idle_states[idle_idx].power)
- >> SCHED_CAPACITY_SHIFT;
+ * sg->sge->idle_states[idle_idx].power);
total_energy += sg_busy_energy + sg_idle_energy;
@@ -6715,7 +6712,7 @@ next_cpu:
continue;
}
- eenv->energy = total_energy;
+ eenv->energy = total_energy >> SCHED_CAPACITY_SHIFT;
return 0;
}
@@ -7004,9 +7001,14 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
return __task_fits(p, cpu, 0);
}
+static bool __cpu_overutilized(int cpu, int delta)
+{
+ return (capacity_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * capacity_margin);
+}
+
static bool cpu_overutilized(int cpu)
{
- return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
+ return __cpu_overutilized(cpu, 0);
}
#ifdef CONFIG_SCHED_TUNE
@@ -7085,7 +7087,7 @@ schedtune_task_margin(struct task_struct *task)
unsigned long
boosted_cpu_util(int cpu)
{
- unsigned long util = cpu_util(cpu);
+ unsigned long util = cpu_util_freq(cpu);
long margin = schedtune_cpu_margin(util, cpu);
trace_sched_boost_cpu(cpu, util, margin);
@@ -7729,6 +7731,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
}
if (target_cpu != prev_cpu) {
+ int delta = 0;
struct energy_env eenv = {
.util_delta = task_util(p),
.src_cpu = prev_cpu,
@@ -7736,8 +7739,13 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
.task = p,
};
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ delta = task_util(p);
+#endif
/* Not enough spare capacity on previous cpu */
- if (cpu_overutilized(prev_cpu)) {
+ if (__cpu_overutilized(prev_cpu, delta)) {
schedstat_inc(p, se.statistics.nr_wakeups_secb_insuff_cap);
schedstat_inc(this_rq(), eas_stats.secb_insuff_cap);
goto unlock;
@@ -11348,8 +11356,8 @@ static inline int _nohz_kick_needed(struct rq *rq, int cpu, int *type)
return true;
/* Do idle load balance if there have misfit task */
- if (energy_aware() && rq->misfit_task)
- return 1;
+ if (energy_aware())
+ return rq->misfit_task;
return (rq->nr_running >= 2);
}
@@ -11391,7 +11399,7 @@ static inline bool nohz_kick_needed(struct rq *rq, int *type)
#ifndef CONFIG_SCHED_HMP
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_busy, cpu));
- if (sd && !energy_aware()) {
+ if (sd) {
sgc = sd->groups->sgc;
nr_busy = atomic_read(&sgc->nr_busy_cpus);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 33bf0c07e757..c53970b5a8f0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2363,6 +2363,12 @@ static inline unsigned long __cpu_util(int cpu, int delta)
unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
unsigned long capacity = capacity_orig_of(cpu);
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ util = div64_u64(cpu_rq(cpu)->cumulative_runnable_avg,
+ walt_ravg_window >> SCHED_LOAD_SHIFT);
+#endif
+
delta += util;
if (delta < 0)
return 0;
@@ -2375,6 +2381,19 @@ static inline unsigned long cpu_util(int cpu)
return __cpu_util(cpu, 0);
}
+static inline unsigned long cpu_util_freq(int cpu)
+{
+ unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
+ unsigned long capacity = capacity_orig_of(cpu);
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ util = div64_u64(cpu_rq(cpu)->prev_runnable_sum,
+ walt_ravg_window >> SCHED_LOAD_SHIFT);
+#endif
+ return (util >= capacity) ? capacity : util;
+}
+
#endif
#ifdef CONFIG_CPU_FREQ_GOV_SCHED
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 92c3aae8e056..28e999554463 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -111,8 +111,10 @@ walt_dec_cumulative_runnable_avg(struct rq *rq,
static void
fixup_cumulative_runnable_avg(struct rq *rq,
- struct task_struct *p, s64 task_load_delta)
+ struct task_struct *p, u64 new_task_load)
{
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+
rq->cumulative_runnable_avg += task_load_delta;
if ((s64)rq->cumulative_runnable_avg < 0)
panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
@@ -802,11 +804,11 @@ void walt_set_window_start(struct rq *rq)
int cpu = cpu_of(rq);
struct rq *sync_rq = cpu_rq(sync_cpu);
- if (rq->window_start)
+ if (likely(rq->window_start))
return;
if (cpu == sync_cpu) {
- rq->window_start = walt_ktime_clock();
+ rq->window_start = 1;
} else {
raw_spin_unlock(&rq->lock);
double_rq_lock(rq, sync_rq);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index eba904bae48c..38d73a6e2857 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2667,13 +2667,14 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
if (!command || !ftrace_enabled) {
/*
- * If these are control ops, they still need their
- * per_cpu field freed. Since, function tracing is
+ * If these are dynamic or control ops, they still
+ * need their data freed. Since, function tracing is
* not currently active, we can just free them
* without synchronizing all CPUs.
*/
- if (ops->flags & FTRACE_OPS_FL_CONTROL)
- control_ops_free(ops);
+ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL))
+ goto free_ops;
+
return 0;
}
@@ -2728,6 +2729,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
schedule_on_each_cpu(ftrace_sync);
+ free_ops:
arch_ftrace_trampoline_free(ops);
if (ops->flags & FTRACE_OPS_FL_CONTROL)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a579a874045b..35613a2a5164 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -5397,7 +5397,7 @@ static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
- if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
+ if (tr->max_buffer.buffer)
ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
tracing_reset_online_cpus(&tr->max_buffer);
#endif
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index b0f86ea77881..ca70d11b8aa7 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -272,7 +272,7 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt)
goto out_free;
if (cnt > 1) {
if (trace_selftest_test_global_cnt == 0)
- goto out;
+ goto out_free;
}
if (trace_selftest_test_dyn_cnt == 0)
goto out_free;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index bd5937dc6fe9..f703d462b7f8 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -57,7 +57,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
u8 code, u8 ident, u16 dlen, void *data);
static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
void *data);
-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
@@ -1462,7 +1462,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
@@ -2966,12 +2966,15 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
return len;
}
-static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
+static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
{
struct l2cap_conf_opt *opt = *ptr;
BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
+ if (size < L2CAP_CONF_OPT_SIZE + len)
+ return;
+
opt->type = type;
opt->len = len;
@@ -2996,7 +2999,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
*ptr += L2CAP_CONF_OPT_SIZE + len;
}
-static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
{
struct l2cap_conf_efs efs;
@@ -3024,7 +3027,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
}
l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, size);
}
static void l2cap_ack_timeout(struct work_struct *work)
@@ -3170,11 +3173,12 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
chan->ack_win = chan->tx_win;
}
-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
{
struct l2cap_conf_req *req = data;
struct l2cap_conf_rfc rfc = { .mode = chan->mode };
void *ptr = req->data;
+ void *endptr = data + data_size;
u16 size;
BT_DBG("chan %pK", chan);
@@ -3199,7 +3203,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
done:
if (chan->imtu != L2CAP_DEFAULT_MTU)
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
switch (chan->mode) {
case L2CAP_MODE_BASIC:
@@ -3218,7 +3222,7 @@ done:
rfc.max_pdu_size = 0;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
break;
case L2CAP_MODE_ERTM:
@@ -3238,21 +3242,21 @@ done:
L2CAP_DEFAULT_TX_WINDOW);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
- l2cap_add_opt_efs(&ptr, chan);
+ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
- chan->tx_win);
+ chan->tx_win, endptr - ptr);
if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
if (chan->fcs == L2CAP_FCS_NONE ||
test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
chan->fcs = L2CAP_FCS_NONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
- chan->fcs);
+ chan->fcs, endptr - ptr);
}
break;
@@ -3270,17 +3274,17 @@ done:
rfc.max_pdu_size = cpu_to_le16(size);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
- l2cap_add_opt_efs(&ptr, chan);
+ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
if (chan->fcs == L2CAP_FCS_NONE ||
test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
chan->fcs = L2CAP_FCS_NONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
- chan->fcs);
+ chan->fcs, endptr - ptr);
}
break;
}
@@ -3291,10 +3295,11 @@ done:
return ptr - data;
}
-static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
{
struct l2cap_conf_rsp *rsp = data;
void *ptr = rsp->data;
+ void *endptr = data + data_size;
void *req = chan->conf_req;
int len = chan->conf_len;
int type, hint, olen;
@@ -3396,7 +3401,7 @@ done:
return -ECONNREFUSED;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
}
if (result == L2CAP_CONF_SUCCESS) {
@@ -3409,7 +3414,7 @@ done:
chan->omtu = mtu;
set_bit(CONF_MTU_DONE, &chan->conf_state);
}
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
if (remote_efs) {
if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
@@ -3423,7 +3428,7 @@ done:
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, endptr - ptr);
} else {
/* Send PENDING Conf Rsp */
result = L2CAP_CONF_PENDING;
@@ -3456,7 +3461,7 @@ done:
set_bit(CONF_MODE_DONE, &chan->conf_state);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
chan->remote_id = efs.id;
@@ -3470,7 +3475,7 @@ done:
le32_to_cpu(efs.sdu_itime);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, endptr - ptr);
}
break;
@@ -3484,7 +3489,7 @@ done:
set_bit(CONF_MODE_DONE, &chan->conf_state);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
break;
@@ -3506,10 +3511,11 @@ done:
}
static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
- void *data, u16 *result)
+ void *data, size_t size, u16 *result)
{
struct l2cap_conf_req *req = data;
void *ptr = req->data;
+ void *endptr = data + size;
int type, olen;
unsigned long val;
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
@@ -3527,13 +3533,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
chan->imtu = L2CAP_DEFAULT_MIN_MTU;
} else
chan->imtu = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
break;
case L2CAP_CONF_FLUSH_TO:
chan->flush_to = val;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
- 2, chan->flush_to);
+ 2, chan->flush_to, endptr - ptr);
break;
case L2CAP_CONF_RFC:
@@ -3547,13 +3553,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
chan->fcs = 0;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
break;
case L2CAP_CONF_EWS:
chan->ack_win = min_t(u16, val, chan->ack_win);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
- chan->tx_win);
+ chan->tx_win, endptr - ptr);
break;
case L2CAP_CONF_EFS:
@@ -3566,7 +3572,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
return -ECONNREFUSED;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, endptr - ptr);
break;
case L2CAP_CONF_FCS:
@@ -3671,7 +3677,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
return;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
@@ -3879,7 +3885,7 @@ sendresp:
u8 buf[128];
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
@@ -3957,7 +3963,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
break;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, req), req);
+ l2cap_build_conf_req(chan, req, sizeof(req)), req);
chan->num_conf_req++;
break;
@@ -4069,7 +4075,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
}
/* Complete config. */
- len = l2cap_parse_conf_req(chan, rsp);
+ len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
if (len < 0) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto unlock;
@@ -4103,7 +4109,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
u8 buf[64];
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
@@ -4163,7 +4169,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
char buf[64];
len = l2cap_parse_conf_rsp(chan, rsp->data, len,
- buf, &result);
+ buf, sizeof(buf), &result);
if (len < 0) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto done;
@@ -4193,7 +4199,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
/* throw out any old stored conf requests */
result = L2CAP_CONF_SUCCESS;
len = l2cap_parse_conf_rsp(chan, rsp->data, len,
- req, &result);
+ req, sizeof(req), &result);
if (len < 0) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto done;
@@ -4771,7 +4777,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
}
@@ -7443,7 +7449,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn),
L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf),
+ l2cap_build_conf_req(chan, buf, sizeof(buf)),
buf);
chan->num_conf_req++;
}
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index 6b437e8760d3..12e8cf4bda9f 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -580,19 +580,14 @@ static int __net_init lowpan_frags_init_net(struct net *net)
{
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
- int res;
ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
- res = inet_frags_init_net(&ieee802154_lowpan->frags);
- if (res)
- return res;
- res = lowpan_frags_ns_sysctl_register(net);
- if (res)
- inet_frags_uninit_net(&ieee802154_lowpan->frags);
- return res;
+ inet_frags_init_net(&ieee802154_lowpan->frags);
+
+ return lowpan_frags_ns_sysctl_register(net);
}
static void __net_exit lowpan_frags_exit_net(struct net *net)
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index fe144dae7372..c5fb2f694ed0 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -234,10 +234,8 @@ evict_again:
cond_resched();
if (read_seqretry(&f->rnd_seqlock, seq) ||
- percpu_counter_sum(&nf->mem))
+ sum_frag_mem_limit(nf))
goto evict_again;
-
- percpu_counter_destroy(&nf->mem);
}
EXPORT_SYMBOL(inet_frags_exit_net);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b8a0607dab96..e2e162432aa3 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -840,8 +840,6 @@ static void __init ip4_frags_ctl_register(void)
static int __net_init ipv4_frags_init_net(struct net *net)
{
- int res;
-
/* Fragment cache limits.
*
* The fragment memory accounting code, (tries to) account for
@@ -865,13 +863,9 @@ static int __net_init ipv4_frags_init_net(struct net *net)
*/
net->ipv4.frags.timeout = IP_FRAG_TIME;
- res = inet_frags_init_net(&net->ipv4.frags);
- if (res)
- return res;
- res = ip4_frags_ns_ctl_register(net);
- if (res)
- inet_frags_uninit_net(&net->ipv4.frags);
- return res;
+ inet_frags_init_net(&net->ipv4.frags);
+
+ return ip4_frags_ns_ctl_register(net);
}
static void __net_exit ipv4_frags_exit_net(struct net *net)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9bdd7847ef3a..79203972bfc6 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2269,6 +2269,10 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_set_ca_state(sk, TCP_CA_Open);
tcp_clear_retrans(tp);
inet_csk_delack_init(sk);
+ /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
+ * issue in __tcp_select_window()
+ */
+ icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
tcp_init_send_head(sk);
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 4b707ad4ffbd..40c29712f32a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5230,7 +5230,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
* our DAD process, so we don't need
* to do it again
*/
- if (!(ifp->rt->rt6i_node))
+ if (!rcu_access_pointer(ifp->rt->rt6i_node))
ip6_ins_rt(ifp->rt);
if (ifp->idev->cnf.forwarding)
addrconf_join_anycast(ifp);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index aad8cdf15472..c23e02a7ccb0 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -150,11 +150,23 @@ static struct fib6_node *node_alloc(void)
return fn;
}
-static void node_free(struct fib6_node *fn)
+static void node_free_immediate(struct fib6_node *fn)
+{
+ kmem_cache_free(fib6_node_kmem, fn);
+}
+
+static void node_free_rcu(struct rcu_head *head)
{
+ struct fib6_node *fn = container_of(head, struct fib6_node, rcu);
+
kmem_cache_free(fib6_node_kmem, fn);
}
+static void node_free(struct fib6_node *fn)
+{
+ call_rcu(&fn->rcu, node_free_rcu);
+}
+
static void rt6_rcu_free(struct rt6_info *rt)
{
call_rcu(&rt->dst.rcu_head, dst_rcu_free);
@@ -191,6 +203,12 @@ static void rt6_release(struct rt6_info *rt)
}
}
+static void fib6_free_table(struct fib6_table *table)
+{
+ inetpeer_invalidate_tree(&table->tb6_peers);
+ kfree(table);
+}
+
static void fib6_link_table(struct net *net, struct fib6_table *tb)
{
unsigned int h;
@@ -588,9 +606,9 @@ insert_above:
if (!in || !ln) {
if (in)
- node_free(in);
+ node_free_immediate(in);
if (ln)
- node_free(ln);
+ node_free_immediate(ln);
return ERR_PTR(-ENOMEM);
}
@@ -857,7 +875,7 @@ add:
rt->dst.rt6_next = iter;
*ins = rt;
- rt->rt6i_node = fn;
+ rcu_assign_pointer(rt->rt6i_node, fn);
atomic_inc(&rt->rt6i_ref);
inet6_rt_notify(RTM_NEWROUTE, rt, info, 0);
info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
@@ -882,7 +900,7 @@ add:
return err;
*ins = rt;
- rt->rt6i_node = fn;
+ rcu_assign_pointer(rt->rt6i_node, fn);
rt->dst.rt6_next = iter->dst.rt6_next;
atomic_inc(&rt->rt6i_ref);
inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
@@ -1015,7 +1033,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
root, and then (in failure) stale node
in main tree.
*/
- node_free(sfn);
+ node_free_immediate(sfn);
err = PTR_ERR(sn);
goto failure;
}
@@ -1442,8 +1460,9 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
int fib6_del(struct rt6_info *rt, struct nl_info *info)
{
+ struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
+ lockdep_is_held(&rt->rt6i_table->tb6_lock));
struct net *net = info->nl_net;
- struct fib6_node *fn = rt->rt6i_node;
struct rt6_info **rtp;
#if RT6_DEBUG >= 2
@@ -1632,7 +1651,9 @@ static int fib6_clean_node(struct fib6_walker *w)
if (res) {
#if RT6_DEBUG >= 2
pr_debug("%s: del failed: rt=%p@%p err=%d\n",
- __func__, rt, rt->rt6i_node, res);
+ __func__, rt,
+ rcu_access_pointer(rt->rt6i_node),
+ res);
#endif
continue;
}
@@ -1870,15 +1891,22 @@ out_timer:
static void fib6_net_exit(struct net *net)
{
+ unsigned int i;
+
rt6_ifdown(net, NULL);
del_timer_sync(&net->ipv6.ip6_fib_timer);
-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
- inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
- kfree(net->ipv6.fib6_local_tbl);
-#endif
- inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
- kfree(net->ipv6.fib6_main_tbl);
+ for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
+ struct hlist_head *head = &net->ipv6.fib_table_hash[i];
+ struct hlist_node *tmp;
+ struct fib6_table *tb;
+
+ hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) {
+ hlist_del(&tb->tb6_hlist);
+ fib6_free_table(tb);
+ }
+ }
+
kfree(net->ipv6.fib_table_hash);
kfree(net->ipv6.rt6_stats);
}
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index bab4441ed4e4..eb2dc39f7066 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -649,18 +649,12 @@ EXPORT_SYMBOL_GPL(nf_ct_frag6_consume_orig);
static int nf_ct_net_init(struct net *net)
{
- int res;
-
net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
- res = inet_frags_init_net(&net->nf_frag.frags);
- if (res)
- return res;
- res = nf_ct_frag6_sysctl_register(net);
- if (res)
- inet_frags_uninit_net(&net->nf_frag.frags);
- return res;
+ inet_frags_init_net(&net->nf_frag.frags);
+
+ return nf_ct_frag6_sysctl_register(net);
}
static void nf_ct_net_exit(struct net *net)
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index f9f02581c4ca..f99a04674419 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -86,7 +86,6 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
while (offset <= packet_len) {
struct ipv6_opt_hdr *exthdr;
- unsigned int len;
switch (**nexthdr) {
@@ -112,10 +111,9 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
- len = ipv6_optlen(exthdr);
- if (len + offset >= IPV6_MAXPLEN)
+ offset += ipv6_optlen(exthdr);
+ if (offset > IPV6_MAXPLEN)
return -EINVAL;
- offset += len;
*nexthdr = &exthdr->nexthdr;
}
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index a234552a7e3d..58f2139ebb5e 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -708,19 +708,13 @@ static void ip6_frags_sysctl_unregister(void)
static int __net_init ipv6_frags_init_net(struct net *net)
{
- int res;
-
net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
- res = inet_frags_init_net(&net->ipv6.frags);
- if (res)
- return res;
- res = ip6_frags_ns_sysctl_register(net);
- if (res)
- inet_frags_uninit_net(&net->ipv6.frags);
- return res;
+ inet_frags_init_net(&net->ipv6.frags);
+
+ return ip6_frags_ns_sysctl_register(net);
}
static void __net_exit ipv6_frags_exit_net(struct net *net)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index dd37fe0b6a49..2e249d4a2bf3 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1241,7 +1241,9 @@ static void rt6_dst_from_metrics_check(struct rt6_info *rt)
static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
{
- if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
+ u32 rt_cookie;
+
+ if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
return NULL;
if (rt6_check_expired(rt))
@@ -1309,8 +1311,14 @@ static void ip6_link_failure(struct sk_buff *skb)
if (rt->rt6i_flags & RTF_CACHE) {
dst_hold(&rt->dst);
ip6_del_rt(rt);
- } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
- rt->rt6i_node->fn_sernum = -1;
+ } else {
+ struct fib6_node *fn;
+
+ rcu_read_lock();
+ fn = rcu_dereference(rt->rt6i_node);
+ if (fn && (rt->rt6i_flags & RTF_DEFAULT))
+ fn->fn_sernum = -1;
+ rcu_read_unlock();
}
}
}
@@ -1327,7 +1335,8 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
{
return !(rt->rt6i_flags & RTF_CACHE) &&
- (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
+ (rt->rt6i_flags & RTF_PCPU ||
+ rcu_access_pointer(rt->rt6i_node));
}
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index b0e0555e79ad..d44469cd5754 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -1714,8 +1714,7 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
-
- if (sk == NULL) {
+ if (!sk) {
/*
* Here, the qtaguid_find_sk() using connection tracking
* couldn't find the owner, so for now we just count them
@@ -1733,7 +1732,8 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
}
sock_uid = sk->sk_uid;
if (do_tag_stat)
- account_for_uid(skb, sk, from_kuid(&init_user_ns, sock_uid), par);
+ account_for_uid(skb, sk, from_kuid(&init_user_ns, sock_uid),
+ par);
/*
* The following two tests fail the match when:
@@ -1745,8 +1745,8 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min);
kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max);
- if ((uid_gte(sk->sk_uid, uid_min) &&
- uid_lte(sk->sk_uid, uid_max)) ^
+ if ((uid_gte(sock_uid, uid_min) &&
+ uid_lte(sock_uid, uid_max)) ^
!(info->invert & XT_QTAGUID_UID)) {
MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
par->hooknum);
@@ -1760,16 +1760,18 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
set_sk_callback_lock = true;
read_lock_bh(&sk->sk_callback_lock);
MT_DEBUG("qtaguid[%d]: sk=%pK->sk_socket=%pK->file=%pK\n",
- par->hooknum, sk, sk->sk_socket,
- sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
+ par->hooknum, sk, sk->sk_socket,
+ sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
filp = sk->sk_socket ? sk->sk_socket->file : NULL;
if (!filp) {
- res = ((info->match ^ info->invert) & XT_QTAGUID_GID) == 0;
+ res = ((info->match ^ info->invert) &
+ XT_QTAGUID_GID) == 0;
atomic64_inc(&qtu_events.match_no_sk_gid);
goto put_sock_ret_res;
}
MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
- par->hooknum, filp ? from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
+ par->hooknum, filp ?
+ from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
if ((gid_gte(filp->f_cred->fsgid, gid_min) &&
gid_lte(filp->f_cred->fsgid, gid_max)) ^
!(info->invert & XT_QTAGUID_GID)) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index c8700399d7fd..e8994c345c53 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3272,9 +3272,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_state *x_new[XFRM_MAX_DEPTH];
struct xfrm_migrate *mp;
+ /* Stage 0 - sanity checks */
if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
goto out;
+ if (dir >= XFRM_POLICY_MAX) {
+ err = -EINVAL;
+ goto out;
+ }
+
/* Stage 1 - find policy */
if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
err = -ENOENT;
diff --git a/sound/isa/msnd/msnd_midi.c b/sound/isa/msnd/msnd_midi.c
index ffc67fd80c23..58e59cd3c95c 100644
--- a/sound/isa/msnd/msnd_midi.c
+++ b/sound/isa/msnd/msnd_midi.c
@@ -120,24 +120,24 @@ void snd_msndmidi_input_read(void *mpuv)
unsigned long flags;
struct snd_msndmidi *mpu = mpuv;
void *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF;
+ u16 head, tail, size;
spin_lock_irqsave(&mpu->input_lock, flags);
- while (readw(mpu->dev->MIDQ + JQS_wTail) !=
- readw(mpu->dev->MIDQ + JQS_wHead)) {
- u16 wTmp, val;
- val = readw(pwMIDQData + 2 * readw(mpu->dev->MIDQ + JQS_wHead));
-
- if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER,
- &mpu->mode))
- snd_rawmidi_receive(mpu->substream_input,
- (unsigned char *)&val, 1);
-
- wTmp = readw(mpu->dev->MIDQ + JQS_wHead) + 1;
- if (wTmp > readw(mpu->dev->MIDQ + JQS_wSize))
- writew(0, mpu->dev->MIDQ + JQS_wHead);
- else
- writew(wTmp, mpu->dev->MIDQ + JQS_wHead);
+ head = readw(mpu->dev->MIDQ + JQS_wHead);
+ tail = readw(mpu->dev->MIDQ + JQS_wTail);
+ size = readw(mpu->dev->MIDQ + JQS_wSize);
+ if (head > size || tail > size)
+ goto out;
+ while (head != tail) {
+ unsigned char val = readw(pwMIDQData + 2 * head);
+
+ if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode))
+ snd_rawmidi_receive(mpu->substream_input, &val, 1);
+ if (++head > size)
+ head = 0;
+ writew(head, mpu->dev->MIDQ + JQS_wHead);
}
+ out:
spin_unlock_irqrestore(&mpu->input_lock, flags);
}
EXPORT_SYMBOL(snd_msndmidi_input_read);
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
index 4c072666115d..a31ea6c22d19 100644
--- a/sound/isa/msnd/msnd_pinnacle.c
+++ b/sound/isa/msnd/msnd_pinnacle.c
@@ -170,23 +170,24 @@ static irqreturn_t snd_msnd_interrupt(int irq, void *dev_id)
{
struct snd_msnd *chip = dev_id;
void *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF;
+ u16 head, tail, size;
/* Send ack to DSP */
/* inb(chip->io + HP_RXL); */
/* Evaluate queued DSP messages */
- while (readw(chip->DSPQ + JQS_wTail) != readw(chip->DSPQ + JQS_wHead)) {
- u16 wTmp;
-
- snd_msnd_eval_dsp_msg(chip,
- readw(pwDSPQData + 2 * readw(chip->DSPQ + JQS_wHead)));
-
- wTmp = readw(chip->DSPQ + JQS_wHead) + 1;
- if (wTmp > readw(chip->DSPQ + JQS_wSize))
- writew(0, chip->DSPQ + JQS_wHead);
- else
- writew(wTmp, chip->DSPQ + JQS_wHead);
+ head = readw(chip->DSPQ + JQS_wHead);
+ tail = readw(chip->DSPQ + JQS_wTail);
+ size = readw(chip->DSPQ + JQS_wSize);
+ if (head > size || tail > size)
+ goto out;
+ while (head != tail) {
+ snd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * head));
+ if (++head > size)
+ head = 0;
+ writew(head, chip->DSPQ + JQS_wHead);
}
+ out:
/* Send ack to DSP */
inb(chip->io + HP_RXL);
return IRQ_HANDLED;
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 74177189063c..d3125c169684 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -2150,8 +2150,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
stream->resources, en,
VORTEX_RESOURCE_SRC)) < 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
if (stream->type != VORTEX_PCM_A3D) {
@@ -2161,7 +2160,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
VORTEX_RESOURCE_MIXIN)) < 0) {
memset(stream->resources,
0,
- sizeof(unsigned char) * VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
}
@@ -2174,8 +2173,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
stream->resources, en,
VORTEX_RESOURCE_A3D)) < 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
dev_err(vortex->card->dev,
"out of A3D sources. Sorry\n");
return -EBUSY;
@@ -2289,8 +2287,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
VORTEX_RESOURCE_MIXOUT))
< 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
if ((src[i] =
@@ -2298,8 +2295,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
stream->resources, en,
VORTEX_RESOURCE_SRC)) < 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
}
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
index 6fed443186e5..c38cdda74623 100644
--- a/sound/soc/msm/qdsp6v2/q6core.c
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -22,6 +22,7 @@
#include <soc/qcom/smd.h>
#include <sound/q6core.h>
#include <sound/audio_cal_utils.h>
+#include <sound/adsp_err.h>
#define TIMEOUT_MS 1000
/*
@@ -37,16 +38,30 @@ enum {
CORE_MAX_CAL
};
+enum ver_query_status {
+ VER_QUERY_UNATTEMPTED,
+ VER_QUERY_UNSUPPORTED,
+ VER_QUERY_SUPPORTED
+};
+
+struct q6core_avcs_ver_info {
+ enum ver_query_status status;
+ struct avcs_fwk_ver_info avcs_fwk_ver_info;
+};
+
struct q6core_str {
struct apr_svc *core_handle_q;
wait_queue_head_t bus_bw_req_wait;
wait_queue_head_t cmd_req_wait;
+ wait_queue_head_t avcs_fwk_ver_req_wait;
u32 bus_bw_resp_received;
enum cmd_flags {
FLAG_NONE,
FLAG_CMDRSP_LICENSE_RESULT
} cmd_resp_received_flag;
+ u32 avcs_fwk_ver_resp_received;
struct mutex cmd_lock;
+ struct mutex ver_lock;
union {
struct avcs_cmdrsp_get_license_validation_result
cmdrsp_license_result;
@@ -55,6 +70,7 @@ struct q6core_str {
struct cal_type_data *cal_data[CORE_MAX_CAL];
uint32_t mem_map_cal_handle;
int32_t adsp_status;
+ struct q6core_avcs_ver_info q6core_avcs_ver_info;
};
static struct q6core_str q6core_lcl;
@@ -131,6 +147,17 @@ static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
q6core_lcl.bus_bw_resp_received = 1;
wake_up(&q6core_lcl.bus_bw_req_wait);
break;
+ case AVCS_CMD_GET_FWK_VERSION:
+ pr_debug("%s: Cmd = AVCS_CMD_GET_FWK_VERSION status[%s]\n",
+ __func__, adsp_err_get_err_str(payload1[1]));
+ /* ADSP status to match Linux error standard */
+ q6core_lcl.adsp_status = -payload1[1];
+ if (payload1[1] == ADSP_EUNSUPPORTED)
+ q6core_lcl.q6core_avcs_ver_info.status =
+ VER_QUERY_UNSUPPORTED;
+ q6core_lcl.avcs_fwk_ver_resp_received = 1;
+ wake_up(&q6core_lcl.avcs_fwk_ver_req_wait);
+ break;
default:
pr_err("%s: Invalid cmd rsp[0x%x][0x%x] opcode %d\n",
__func__,
@@ -174,6 +201,13 @@ static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
q6core_lcl.cmd_resp_received_flag = FLAG_CMDRSP_LICENSE_RESULT;
wake_up(&q6core_lcl.cmd_req_wait);
break;
+ case AVCS_CMDRSP_GET_FWK_VERSION:
+ pr_debug("%s: Received AVCS_CMDRSP_GET_FWK_VERSION\n",
+ __func__);
+ q6core_lcl.q6core_avcs_ver_info.status = VER_QUERY_SUPPORTED;
+ q6core_lcl.avcs_fwk_ver_resp_received = 1;
+ wake_up(&q6core_lcl.avcs_fwk_ver_req_wait);
+ break;
default:
pr_err("%s: Message id from adsp core svc: 0x%x\n",
__func__, data->opcode);
@@ -230,6 +264,97 @@ struct cal_block_data *cal_utils_get_cal_block_by_key(
return NULL;
}
+static int q6core_send_get_avcs_fwk_ver_cmd(void)
+{
+ struct apr_hdr avcs_ver_cmd;
+ int ret;
+
+ avcs_ver_cmd.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ avcs_ver_cmd.pkt_size = sizeof(struct apr_hdr);
+ avcs_ver_cmd.src_port = 0;
+ avcs_ver_cmd.dest_port = 0;
+ avcs_ver_cmd.token = 0;
+ avcs_ver_cmd.opcode = AVCS_CMD_GET_FWK_VERSION;
+
+ q6core_lcl.adsp_status = 0;
+ q6core_lcl.avcs_fwk_ver_resp_received = 0;
+
+ ret = apr_send_pkt(q6core_lcl.core_handle_q,
+ (uint32_t *) &avcs_ver_cmd);
+ if (ret < 0) {
+ pr_err("%s: failed to send apr packet, ret=%d\n", __func__,
+ ret);
+ goto done;
+ }
+
+ ret = wait_event_timeout(q6core_lcl.avcs_fwk_ver_req_wait,
+ (q6core_lcl.avcs_fwk_ver_resp_received == 1),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout for AVCS fwk version info\n",
+ __func__);
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ if (q6core_lcl.adsp_status < 0) {
+ /*
+ * adsp_err_get_err_str expects a positive value but we store
+ * the DSP error as negative to match the Linux error standard.
+ * Pass in the negated value so adsp_err_get_err_str returns
+ * the correct string.
+ */
+ pr_err("%s: DSP returned error[%s]\n", __func__,
+ adsp_err_get_err_str(-q6core_lcl.adsp_status));
+ ret = adsp_err_get_lnx_err_code(q6core_lcl.adsp_status);
+ goto done;
+ }
+
+ ret = 0;
+
+done:
+ return ret;
+}
+
+int q6core_get_avcs_fwk_ver_info(uint32_t service_id,
+ struct avcs_fwk_ver_info *ver_info)
+{
+ int ret;
+
+ mutex_lock(&(q6core_lcl.ver_lock));
+ pr_debug("%s: q6core_avcs_ver_info.status(%d)\n", __func__,
+ q6core_lcl.q6core_avcs_ver_info.status);
+
+ switch (q6core_lcl.q6core_avcs_ver_info.status) {
+ case VER_QUERY_SUPPORTED:
+ ret = 0;
+ break;
+ case VER_QUERY_UNSUPPORTED:
+ ret = -EOPNOTSUPP;
+ break;
+ case VER_QUERY_UNATTEMPTED:
+ if (q6core_is_adsp_ready()) {
+ ret = q6core_send_get_avcs_fwk_ver_cmd();
+ } else {
+ pr_err("%s: ADSP is not ready to query version\n",
+ __func__);
+ ret = -ENODEV;
+ }
+ break;
+ default:
+ pr_err("%s: Invalid version query status %d\n", __func__,
+ q6core_lcl.q6core_avcs_ver_info.status);
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&(q6core_lcl.ver_lock));
+
+ return ret;
+}
+EXPORT_SYMBOL(q6core_get_avcs_fwk_ver_info);
+
int32_t core_set_license(uint32_t key, uint32_t module_id)
{
struct avcs_cmd_set_license *cmd_setl = NULL;
@@ -887,18 +1012,16 @@ err:
static int __init core_init(void)
{
+ memset(&q6core_lcl, 0, sizeof(struct q6core_str));
init_waitqueue_head(&q6core_lcl.bus_bw_req_wait);
- q6core_lcl.bus_bw_resp_received = 0;
-
- q6core_lcl.core_handle_q = NULL;
-
init_waitqueue_head(&q6core_lcl.cmd_req_wait);
+ init_waitqueue_head(&q6core_lcl.avcs_fwk_ver_req_wait);
q6core_lcl.cmd_resp_received_flag = FLAG_NONE;
mutex_init(&q6core_lcl.cmd_lock);
- q6core_lcl.mem_map_cal_handle = 0;
- q6core_lcl.adsp_status = 0;
+ mutex_init(&q6core_lcl.ver_lock);
q6core_init_cal_data();
+
return 0;
}
module_init(core_init);
@@ -906,6 +1029,7 @@ module_init(core_init);
static void __exit core_exit(void)
{
mutex_destroy(&q6core_lcl.cmd_lock);
+ mutex_destroy(&q6core_lcl.ver_lock);
q6core_delete_cal_data();
}
module_exit(core_exit);