summaryrefslogtreecommitdiff
path: root/fs/f2fs/node.c
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk@google.com>2018-07-30 17:25:04 -0700
committerJaegeuk Kim <jaegeuk@google.com>2018-07-30 17:25:04 -0700
commit8ddb600e033f69a1316e65d6af21eb3dc11501f2 (patch)
treec58c940325459ce055936aa7abaabdc0ea640ded /fs/f2fs/node.c
parentb826c97998ef8d98843e9207214a869f4ca3e448 (diff)
parent6944da0a68ca00f8f27bd71e0e0e292ea14b5ca5 (diff)
Merge remote-tracking branch 'origin/upstream-f2fs-stable-linux-4.4.y' into android-4.4
6944da0a68ca treewide: Use array_size in f2fs_kvzalloc() f15443db99c3 treewide: Use array_size() in f2fs_kzalloc() 3ea03ea4bd09 treewide: Use array_size() in f2fs_kmalloc() c41203299a52 overflow.h: Add allocation size calculation helpers d400752f547f f2fs: fix to clear FI_VOLATILE_FILE correctly 853e7339b634 f2fs: let sync node IO interrupt async one 6a4540cf1984 f2fs: don't change wbc->sync_mode 588ecdfd7d02 f2fs: fix to update mtime correctly 1ae5aadab191 fs: f2fs: insert space around that ':' and ', ' 39ee53e22320 fs: f2fs: add missing blank lines after declarations d5b4710fcf38 fs: f2fs: changed variable type of offset "unsigned" to "loff_t" c35da89531b3 f2fs: clean up symbol namespace fcf37e16f3cb f2fs: make set_de_type() static 5d1633aa1071 f2fs: make __f2fs_write_data_pages() static cc8093af7c42 f2fs: fix to avoid accessing cross the boundary b7f559467095 f2fs: fix to let caller retry allocating block address e48fcd857657 disable loading f2fs module on PAGE_SIZE > 4KB 02afc275a5bd f2fs: fix error path of move_data_page 0291bd36d076 f2fs: don't drop dentry pages after fs shutdown a1259450b6db f2fs: fix to avoid race during access gc_thread pointer d2e0f2f786a6 f2fs: clean up with clear_radix_tree_dirty_tag c74034518fdc f2fs: fix to don't trigger writeback during recovery e72a2cca82d8 f2fs: clear discard_wake earlier b25a1872e9a5 f2fs: let discard thread wait a little longer if dev is busy b125dfb20d18 f2fs: avoid stucking GC due to atomic write 405909e7f532 f2fs: introduce sbi->gc_mode to determine the policy 1f62e4702a34 f2fs: keep migration IO order in LFS mode c4408c238722 f2fs: fix to wait page writeback during revoking atomic write 9db5be4af890 f2fs: Fix deadlock in shutdown ioctl ed74404955cd f2fs: detect synchronous writeback more earlier 91e7d9d2ddbf mm: remove nr_pages argument from pagevec_lookup_{,range}_tag() feb94dc82928 ceph: use pagevec_lookup_range_nr_tag() f3aa4a25b8b0 mm: add variant of pagevec_lookup_range_tag() taking number of pages 8914877e374a mm: use pagevec_lookup_range_tag() in write_cache_pages() 26778b87a006 mm: use pagevec_lookup_range_tag() in __filemap_fdatawait_range() 94f1b99298bd nilfs2: use pagevec_lookup_range_tag() 160355d69f46 gfs2: use pagevec_lookup_range_tag() 564108e83a74 f2fs: use find_get_pages_tag() for looking up single page 6cf6fb8645ff f2fs: simplify page iteration loops a05d8a6a2bde f2fs: use pagevec_lookup_range_tag() 18a4848ffded ext4: use pagevec_lookup_range_tag() 1c7be24f65cd ceph: use pagevec_lookup_range_tag() e25fadabb5c7 btrfs: use pagevec_lookup_range_tag() bf9510b162c4 mm: implement find_get_pages_range_tag() 461247b21fde f2fs: clean up with is_valid_blkaddr() a5d0ccbc189a f2fs: fix to initialize min_mtime with ULLONG_MAX 9bb4d22cf5de f2fs: fix to let checkpoint guarantee atomic page persistence cdcf2b3e2559 f2fs: fix to initialize i_current_depth according to inode type 331ae0c25b44 Revert "f2fs: add ovp valid_blocks check for bg gc victim to fg_gc" 2494cc7c0bcd f2fs: don't drop any page on f2fs_cp_error() case 0037c639e63d f2fs: fix spelling mistake: "extenstion" -> "extension" 2bba5b8eb867 f2fs: enhance sanity_check_raw_super() to avoid potential overflows 9bb86b63dc0f f2fs: treat volatile file's data as hot one 2cf64590361e f2fs: introduce release_discard_addr() for cleanup 03279ce90b46 f2fs: fix potential overflow f46eddc4da48 f2fs: rename dio_rwsem to i_gc_rwsem bb015824532c f2fs: move mnt_want_write_file after range check 8bb9a8da75d1 f2fs: fix missing clear FI_NO_PREALLOC in some error case cb38cc4e1d02 f2fs: enforce fsync_mode=strict for renamed directory 26bf4e8a96aa f2fs: sanity check for total valid node blocks 78f8b0f46fa2 f2fs: sanity check on sit entry ab758ada220f f2fs: avoid bug_on on corrupted inode 1a5d1966c0ca f2fs: give message and set need_fsck given broken node id b025f6dfc018 f2fs: clean up commit_inmem_pages() 7aff5c69da4c f2fs: do not check F2FS_INLINE_DOTS in recover 23d00b02878e f2fs: remove duplicated dquot_initialize and fix error handling 937f4ef79e25 f2fs: stop issue discard if something wrong with f2fs a6d74bb282ad f2fs: fix return value in f2fs_ioc_commit_atomic_write 258489ec5220 f2fs: allocate hot_data for atomic write more strictly aa857e0f3b09 f2fs: check if inmem_pages list is empty correctly 9d77ded0a71d f2fs: fix race in between GC and atomic open 0d17eb90b56a f2fs: change le32 to le16 of f2fs_inode->i_extra_size ea2813111f1f f2fs: check cur_valid_map_mir & raw_sit block count when flush sit entries 9190cadf38db f2fs: correct return value of f2fs_trim_fs 17f85d070886 f2fs: fix to show missing bits in FS_IOC_GETFLAGS 3e90db63fcfc f2fs: remove unneeded F2FS_PROJINHERIT_FL 298032d4d4a6 f2fs: don't use GFP_ZERO for page caches fdf61219dc25 f2fs: issue all big range discards in umount process cd79eb2b5e45 f2fs: remove redundant block plug ec034d0f14ca f2fs: remove unmatched zero_user_segment when convert inline dentry 71aaced0e1ee f2fs: introduce private inode status mapping e7724207f71e fscrypt: log the crypto algorithm implementations 4cbda579cd3d crypto: api - Add crypto_type_has_alg helper b24dcaae8753 crypto: skcipher - Add low-level skcipher interface a9146e423547 crypto: skcipher - Add helper to retrieve driver name a0ca4bdf4744 crypto: skcipher - Add default key size helper eb13e0b69296 fscrypt: add Speck128/256 support 27a0e77380a3 fscrypt: only derive the needed portion of the key f68a71fa8f77 fscrypt: separate key lookup from key derivation 52359cf4fd6d fscrypt: use a common logging function ff8e7c745e2b fscrypt: remove internal key size constants 7149dd4d39b5 fscrypt: remove unnecessary check for non-logon key type 56446c91422e fscrypt: make fscrypt_operations.max_namelen an integer f572a22ef9a5 fscrypt: drop empty name check from fname_decrypt() 0077eff1d2e3 fscrypt: drop max_namelen check from fname_decrypt() 3f7af9d27fd6 fscrypt: don't special-case EOPNOTSUPP from fscrypt_get_encryption_info() 52c51f7b7bde fscrypt: don't clear flags on crypto transform 89b7fb82982f fscrypt: remove stale comment from fscrypt_d_revalidate() d56de4e926ad fscrypt: remove error messages for skcipher_request_alloc() failure f68d3b84aef1 fscrypt: remove unnecessary NULL check when allocating skcipher fb10231825e9 fscrypt: clean up after fscrypt_prepare_lookup() conversions 39b144490606 fscrypt: use unbound workqueue for decryption Change-Id: Ied79ecd97385c05ef26e6b7b24d250eee9ec4e47 Signed-off-by: Jaegeuk Kim <jaegeuk@google.com>
Diffstat (limited to 'fs/f2fs/node.c')
-rw-r--r--fs/f2fs/node.c353
1 files changed, 180 insertions, 173 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 16aee2a7b8a9..b72fac4766a9 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -23,13 +23,28 @@
#include "trace.h"
#include <trace/events/f2fs.h>
-#define on_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
+#define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
static struct kmem_cache *nat_entry_slab;
static struct kmem_cache *free_nid_slab;
static struct kmem_cache *nat_entry_set_slab;
-bool available_free_memory(struct f2fs_sb_info *sbi, int type)
+/*
+ * Check whether the given nid is within node id range.
+ */
+int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
+{
+ if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: out-of-range nid=%x, run fsck to fix.",
+ __func__, nid);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct sysinfo val;
@@ -87,18 +102,10 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
static void clear_node_page_dirty(struct page *page)
{
- struct address_space *mapping = page->mapping;
- unsigned int long flags;
-
if (PageDirty(page)) {
- spin_lock_irqsave(&mapping->tree_lock, flags);
- radix_tree_tag_clear(&mapping->page_tree,
- page_index(page),
- PAGECACHE_TAG_DIRTY);
- spin_unlock_irqrestore(&mapping->tree_lock, flags);
-
+ f2fs_clear_radix_tree_dirty_tag(page);
clear_page_dirty_for_io(page);
- dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
+ dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
}
ClearPageUptodate(page);
}
@@ -106,7 +113,7 @@ static void clear_node_page_dirty(struct page *page)
static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
{
pgoff_t index = current_nat_addr(sbi, nid);
- return get_meta_page(sbi, index);
+ return f2fs_get_meta_page(sbi, index);
}
static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
@@ -123,8 +130,8 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
dst_off = next_nat_addr(sbi, src_off);
/* get current nat block page with lock */
- src_page = get_meta_page(sbi, src_off);
- dst_page = grab_meta_page(sbi, dst_off);
+ src_page = f2fs_get_meta_page(sbi, src_off);
+ dst_page = f2fs_grab_meta_page(sbi, dst_off);
f2fs_bug_on(sbi, PageDirty(src_page));
src_addr = page_address(src_page);
@@ -260,7 +267,7 @@ static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
start, nr);
}
-int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
+int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
@@ -277,7 +284,7 @@ int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
return need;
}
-bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
+bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
@@ -291,7 +298,7 @@ bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
return is_cp;
}
-bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
+bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
@@ -364,8 +371,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
new_blkaddr == NULL_ADDR);
f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
new_blkaddr == NEW_ADDR);
- f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR &&
- nat_get_blkaddr(e) != NULL_ADDR &&
+ f2fs_bug_on(sbi, is_valid_blkaddr(nat_get_blkaddr(e)) &&
new_blkaddr == NEW_ADDR);
/* increment version no as node is removed */
@@ -376,7 +382,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
/* change address */
nat_set_blkaddr(e, new_blkaddr);
- if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR)
+ if (!is_valid_blkaddr(new_blkaddr))
set_nat_flag(e, IS_CHECKPOINTED, false);
__set_nat_cache_dirty(nm_i, e);
@@ -391,7 +397,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
up_write(&nm_i->nat_tree_lock);
}
-int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
+int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
int nr = nr_shrink;
@@ -413,7 +419,8 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
/*
* This function always returns success
*/
-void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
+void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
+ struct node_info *ni)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -443,7 +450,7 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
/* Check current segment summary */
down_read(&curseg->journal_rwsem);
- i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
+ i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
if (i >= 0) {
ne = nat_in_journal(journal, i);
node_info_from_raw_nat(ni, &ne);
@@ -458,7 +465,7 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
index = current_nat_addr(sbi, nid);
up_read(&nm_i->nat_tree_lock);
- page = get_meta_page(sbi, index);
+ page = f2fs_get_meta_page(sbi, index);
nat_blk = (struct f2fs_nat_block *)page_address(page);
ne = nat_blk->entries[nid - start_nid];
node_info_from_raw_nat(ni, &ne);
@@ -471,7 +478,7 @@ cache:
/*
* readahead MAX_RA_NODE number of node pages.
*/
-static void ra_node_pages(struct page *parent, int start, int n)
+static void f2fs_ra_node_pages(struct page *parent, int start, int n)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
struct blk_plug plug;
@@ -485,13 +492,13 @@ static void ra_node_pages(struct page *parent, int start, int n)
end = min(end, NIDS_PER_BLOCK);
for (i = start; i < end; i++) {
nid = get_nid(parent, i, false);
- ra_node_page(sbi, nid);
+ f2fs_ra_node_page(sbi, nid);
}
blk_finish_plug(&plug);
}
-pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
+pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
{
const long direct_index = ADDRS_PER_INODE(dn->inode);
const long direct_blks = ADDRS_PER_BLOCK;
@@ -606,7 +613,7 @@ got:
* f2fs_unlock_op() only if ro is not set RDONLY_NODE.
* In the case of RDONLY_NODE, we don't need to care about mutex.
*/
-int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
+int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct page *npage[4];
@@ -625,7 +632,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
npage[0] = dn->inode_page;
if (!npage[0]) {
- npage[0] = get_node_page(sbi, nids[0]);
+ npage[0] = f2fs_get_node_page(sbi, nids[0]);
if (IS_ERR(npage[0]))
return PTR_ERR(npage[0]);
}
@@ -649,24 +656,24 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
if (!nids[i] && mode == ALLOC_NODE) {
/* alloc new node */
- if (!alloc_nid(sbi, &(nids[i]))) {
+ if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
err = -ENOSPC;
goto release_pages;
}
dn->nid = nids[i];
- npage[i] = new_node_page(dn, noffset[i]);
+ npage[i] = f2fs_new_node_page(dn, noffset[i]);
if (IS_ERR(npage[i])) {
- alloc_nid_failed(sbi, nids[i]);
+ f2fs_alloc_nid_failed(sbi, nids[i]);
err = PTR_ERR(npage[i]);
goto release_pages;
}
set_nid(parent, offset[i - 1], nids[i], i == 1);
- alloc_nid_done(sbi, nids[i]);
+ f2fs_alloc_nid_done(sbi, nids[i]);
done = true;
} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
- npage[i] = get_node_page_ra(parent, offset[i - 1]);
+ npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
if (IS_ERR(npage[i])) {
err = PTR_ERR(npage[i]);
goto release_pages;
@@ -681,7 +688,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
}
if (!done) {
- npage[i] = get_node_page(sbi, nids[i]);
+ npage[i] = f2fs_get_node_page(sbi, nids[i]);
if (IS_ERR(npage[i])) {
err = PTR_ERR(npage[i]);
f2fs_put_page(npage[0], 0);
@@ -720,15 +727,15 @@ static void truncate_node(struct dnode_of_data *dn)
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct node_info ni;
- get_node_info(sbi, dn->nid, &ni);
+ f2fs_get_node_info(sbi, dn->nid, &ni);
/* Deallocate node address */
- invalidate_blocks(sbi, ni.blk_addr);
+ f2fs_invalidate_blocks(sbi, ni.blk_addr);
dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
set_node_addr(sbi, &ni, NULL_ADDR, false);
if (dn->nid == dn->inode->i_ino) {
- remove_orphan_inode(sbi, dn->nid);
+ f2fs_remove_orphan_inode(sbi, dn->nid);
dec_valid_inode_count(sbi);
f2fs_inode_synced(dn->inode);
}
@@ -753,7 +760,7 @@ static int truncate_dnode(struct dnode_of_data *dn)
return 1;
/* get direct node */
- page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
+ page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
return 1;
else if (IS_ERR(page))
@@ -762,7 +769,7 @@ static int truncate_dnode(struct dnode_of_data *dn)
/* Make dnode_of_data for parameter */
dn->node_page = page;
dn->ofs_in_node = 0;
- truncate_data_blocks(dn);
+ f2fs_truncate_data_blocks(dn);
truncate_node(dn);
return 1;
}
@@ -783,13 +790,13 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
- page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
+ page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
if (IS_ERR(page)) {
trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
return PTR_ERR(page);
}
- ra_node_pages(page, ofs, NIDS_PER_BLOCK);
+ f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
rn = F2FS_NODE(page);
if (depth < 3) {
@@ -859,7 +866,7 @@ static int truncate_partial_nodes(struct dnode_of_data *dn,
/* get indirect nodes in the path */
for (i = 0; i < idx + 1; i++) {
/* reference count'll be increased */
- pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]);
+ pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
if (IS_ERR(pages[i])) {
err = PTR_ERR(pages[i]);
idx = i - 1;
@@ -868,7 +875,7 @@ static int truncate_partial_nodes(struct dnode_of_data *dn,
nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
}
- ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
+ f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
/* free direct nodes linked to a partial indirect node */
for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
@@ -905,7 +912,7 @@ fail:
/*
* All the block addresses of data and nodes should be nullified.
*/
-int truncate_inode_blocks(struct inode *inode, pgoff_t from)
+int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int err = 0, cont = 1;
@@ -921,7 +928,7 @@ int truncate_inode_blocks(struct inode *inode, pgoff_t from)
if (level < 0)
return level;
- page = get_node_page(sbi, inode->i_ino);
+ page = f2fs_get_node_page(sbi, inode->i_ino);
if (IS_ERR(page)) {
trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
return PTR_ERR(page);
@@ -1001,7 +1008,7 @@ fail:
}
/* caller must lock inode page */
-int truncate_xattr_node(struct inode *inode)
+int f2fs_truncate_xattr_node(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t nid = F2FS_I(inode)->i_xattr_nid;
@@ -1011,7 +1018,7 @@ int truncate_xattr_node(struct inode *inode)
if (!nid)
return 0;
- npage = get_node_page(sbi, nid);
+ npage = f2fs_get_node_page(sbi, nid);
if (IS_ERR(npage))
return PTR_ERR(npage);
@@ -1026,17 +1033,17 @@ int truncate_xattr_node(struct inode *inode)
* Caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op().
*/
-int remove_inode_page(struct inode *inode)
+int f2fs_remove_inode_page(struct inode *inode)
{
struct dnode_of_data dn;
int err;
set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
- err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
+ err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
if (err)
return err;
- err = truncate_xattr_node(inode);
+ err = f2fs_truncate_xattr_node(inode);
if (err) {
f2fs_put_dnode(&dn);
return err;
@@ -1045,7 +1052,7 @@ int remove_inode_page(struct inode *inode)
/* remove potential inline_data blocks */
if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode))
- truncate_data_blocks_range(&dn, 1);
+ f2fs_truncate_data_blocks_range(&dn, 1);
/* 0 is possible, after f2fs_new_inode() has failed */
f2fs_bug_on(F2FS_I_SB(inode),
@@ -1056,7 +1063,7 @@ int remove_inode_page(struct inode *inode)
return 0;
}
-struct page *new_inode_page(struct inode *inode)
+struct page *f2fs_new_inode_page(struct inode *inode)
{
struct dnode_of_data dn;
@@ -1064,10 +1071,10 @@ struct page *new_inode_page(struct inode *inode)
set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
/* caller should f2fs_put_page(page, 1); */
- return new_node_page(&dn, 0);
+ return f2fs_new_node_page(&dn, 0);
}
-struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
+struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct node_info new_ni;
@@ -1085,7 +1092,7 @@ struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
goto fail;
#ifdef CONFIG_F2FS_CHECK_FS
- get_node_info(sbi, dn->nid, &new_ni);
+ f2fs_get_node_info(sbi, dn->nid, &new_ni);
f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
#endif
new_ni.nid = dn->nid;
@@ -1137,7 +1144,7 @@ static int read_node_page(struct page *page, int op_flags)
if (PageUptodate(page))
return LOCKED_PAGE;
- get_node_info(sbi, page->index, &ni);
+ f2fs_get_node_info(sbi, page->index, &ni);
if (unlikely(ni.blk_addr == NULL_ADDR)) {
ClearPageUptodate(page);
@@ -1151,14 +1158,15 @@ static int read_node_page(struct page *page, int op_flags)
/*
* Readahead a node page
*/
-void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
+void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
{
struct page *apage;
int err;
if (!nid)
return;
- f2fs_bug_on(sbi, check_nid_range(sbi, nid));
+ if (f2fs_check_nid_range(sbi, nid))
+ return;
rcu_read_lock();
apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
@@ -1182,7 +1190,8 @@ static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
if (!nid)
return ERR_PTR(-ENOENT);
- f2fs_bug_on(sbi, check_nid_range(sbi, nid));
+ if (f2fs_check_nid_range(sbi, nid))
+ return ERR_PTR(-EINVAL);
repeat:
page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
if (!page)
@@ -1198,7 +1207,7 @@ repeat:
}
if (parent)
- ra_node_pages(parent, start + 1, MAX_RA_NODE);
+ f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
lock_page(page);
@@ -1232,12 +1241,12 @@ out_err:
return page;
}
-struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
+struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
{
return __get_node_page(sbi, nid, NULL, 0);
}
-struct page *get_node_page_ra(struct page *parent, int start)
+struct page *f2fs_get_node_page_ra(struct page *parent, int start)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
nid_t nid = get_nid(parent, start, false);
@@ -1272,7 +1281,7 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
ret = f2fs_write_inline_data(inode, page);
inode_dec_dirty_pages(inode);
- remove_dirty_inode(inode);
+ f2fs_remove_dirty_inode(inode);
if (ret)
set_page_dirty(page);
page_out:
@@ -1283,21 +1292,17 @@ iput_out:
static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
{
- pgoff_t index, end;
+ pgoff_t index;
struct pagevec pvec;
struct page *last_page = NULL;
+ int nr_pages;
pagevec_init(&pvec, 0);
index = 0;
- end = ULONG_MAX;
-
- while (index <= end) {
- int i, nr_pages;
- nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
- PAGECACHE_TAG_DIRTY,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
- if (nr_pages == 0)
- break;
+
+ while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+ PAGECACHE_TAG_DIRTY))) {
+ int i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
@@ -1363,11 +1368,8 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
trace_f2fs_writepage(page, NODE);
- if (unlikely(f2fs_cp_error(sbi))) {
- dec_page_count(sbi, F2FS_DIRTY_NODES);
- unlock_page(page);
- return 0;
- }
+ if (unlikely(f2fs_cp_error(sbi)))
+ goto redirty_out;
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
@@ -1383,7 +1385,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
down_read(&sbi->node_write);
}
- get_node_info(sbi, nid, &ni);
+ f2fs_get_node_info(sbi, nid, &ni);
/* This page is already truncated */
if (unlikely(ni.blk_addr == NULL_ADDR)) {
@@ -1400,7 +1402,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
set_page_writeback(page);
ClearPageError(page);
fio.old_blkaddr = ni.blk_addr;
- write_node_page(nid, &fio);
+ f2fs_do_write_node_page(nid, &fio);
set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
dec_page_count(sbi, F2FS_DIRTY_NODES);
up_read(&sbi->node_write);
@@ -1429,7 +1431,7 @@ redirty_out:
return AOP_WRITEPAGE_ACTIVATE;
}
-void move_node_page(struct page *node_page, int gc_type)
+void f2fs_move_node_page(struct page *node_page, int gc_type)
{
if (gc_type == FG_GC) {
struct writeback_control wbc = {
@@ -1466,16 +1468,17 @@ static int f2fs_write_node_page(struct page *page,
return __write_node_page(page, false, NULL, wbc, false, FS_NODE_IO);
}
-int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
+int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct writeback_control *wbc, bool atomic)
{
- pgoff_t index, end;
+ pgoff_t index;
pgoff_t last_idx = ULONG_MAX;
struct pagevec pvec;
int ret = 0;
struct page *last_page = NULL;
bool marked = false;
nid_t ino = inode->i_ino;
+ int nr_pages;
if (atomic) {
last_page = last_fsync_dnode(sbi, ino);
@@ -1485,15 +1488,10 @@ int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
retry:
pagevec_init(&pvec, 0);
index = 0;
- end = ULONG_MAX;
-
- while (index <= end) {
- int i, nr_pages;
- nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
- PAGECACHE_TAG_DIRTY,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
- if (nr_pages == 0)
- break;
+
+ while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+ PAGECACHE_TAG_DIRTY))) {
+ int i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
@@ -1537,9 +1535,9 @@ continue_unlock:
if (IS_INODE(page)) {
if (is_inode_flag_set(inode,
FI_DIRTY_INODE))
- update_inode(inode, page);
+ f2fs_update_inode(inode, page);
set_dentry_mark(page,
- need_dentry_mark(sbi, ino));
+ f2fs_need_dentry_mark(sbi, ino));
}
/* may be written by other thread */
if (!PageDirty(page))
@@ -1589,33 +1587,37 @@ out:
return ret ? -EIO: 0;
}
-int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc,
+int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
+ struct writeback_control *wbc,
bool do_balance, enum iostat_type io_type)
{
- pgoff_t index, end;
+ pgoff_t index;
struct pagevec pvec;
int step = 0;
int nwritten = 0;
int ret = 0;
+ int nr_pages, done = 0;
pagevec_init(&pvec, 0);
next_step:
index = 0;
- end = ULONG_MAX;
-
- while (index <= end) {
- int i, nr_pages;
- nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
- PAGECACHE_TAG_DIRTY,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
- if (nr_pages == 0)
- break;
+
+ while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
+ NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
+ int i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
bool submitted = false;
+ /* give a priority to WB_SYNC threads */
+ if (atomic_read(&sbi->wb_sync_req[NODE]) &&
+ wbc->sync_mode == WB_SYNC_NONE) {
+ done = 1;
+ break;
+ }
+
/*
* flushing sequence with step:
* 0. indirect nodes
@@ -1694,29 +1696,22 @@ continue_unlock:
return ret;
}
-int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
+int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
{
- pgoff_t index = 0, end = ULONG_MAX;
+ pgoff_t index = 0;
struct pagevec pvec;
int ret2 = 0, ret = 0;
+ int nr_pages;
pagevec_init(&pvec, 0);
- while (index <= end) {
- int i, nr_pages;
- nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
- PAGECACHE_TAG_WRITEBACK,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
- if (nr_pages == 0)
- break;
+ while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+ PAGECACHE_TAG_WRITEBACK))) {
+ int i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- /* until radix tree lookup accepts end_index */
- if (unlikely(page->index > end))
- continue;
-
if (ino && ino_of_node(page) == ino) {
f2fs_wait_on_page_writeback(page, NODE, true);
if (TestClearPageError(page))
@@ -1753,14 +1748,21 @@ static int f2fs_write_node_pages(struct address_space *mapping,
if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
goto skip_write;
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ atomic_inc(&sbi->wb_sync_req[NODE]);
+ else if (atomic_read(&sbi->wb_sync_req[NODE]))
+ goto skip_write;
+
trace_f2fs_writepages(mapping->host, wbc, NODE);
diff = nr_pages_to_write(sbi, NODE, wbc);
- wbc->sync_mode = WB_SYNC_NONE;
blk_start_plug(&plug);
- sync_node_pages(sbi, wbc, true, FS_NODE_IO);
+ f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
blk_finish_plug(&plug);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
+
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ atomic_dec(&sbi->wb_sync_req[NODE]);
return 0;
skip_write:
@@ -1906,20 +1908,20 @@ static bool add_free_nid(struct f2fs_sb_info *sbi,
* Thread A Thread B
* - f2fs_create
* - f2fs_new_inode
- * - alloc_nid
+ * - f2fs_alloc_nid
* - __insert_nid_to_list(PREALLOC_NID)
* - f2fs_balance_fs_bg
- * - build_free_nids
- * - __build_free_nids
+ * - f2fs_build_free_nids
+ * - __f2fs_build_free_nids
* - scan_nat_page
* - add_free_nid
* - __lookup_nat_cache
* - f2fs_add_link
- * - init_inode_metadata
- * - new_inode_page
- * - new_node_page
+ * - f2fs_init_inode_metadata
+ * - f2fs_new_inode_page
+ * - f2fs_new_node_page
* - set_node_addr
- * - alloc_nid_done
+ * - f2fs_alloc_nid_done
* - __remove_nid_from_list(PREALLOC_NID)
* - __insert_nid_to_list(FREE_NID)
*/
@@ -2051,7 +2053,8 @@ out:
up_read(&nm_i->nat_tree_lock);
}
-static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
+static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
+ bool sync, bool mount)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
int i = 0;
@@ -2064,7 +2067,7 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
return;
- if (!sync && !available_free_memory(sbi, FREE_NIDS))
+ if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
return;
if (!mount) {
@@ -2076,7 +2079,7 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
}
/* readahead nat pages to be scanned */
- ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
+ f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
META_NAT, true);
down_read(&nm_i->nat_tree_lock);
@@ -2106,14 +2109,14 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
up_read(&nm_i->nat_tree_lock);
- ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
+ f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
nm_i->ra_nid_pages, META_NAT, false);
}
-void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
+void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
{
mutex_lock(&NM_I(sbi)->build_lock);
- __build_free_nids(sbi, sync, mount);
+ __f2fs_build_free_nids(sbi, sync, mount);
mutex_unlock(&NM_I(sbi)->build_lock);
}
@@ -2122,7 +2125,7 @@ void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
* from second parameter of this function.
* The returned nid could be used ino as well as nid when inode is created.
*/
-bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
+bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i = NULL;
@@ -2140,8 +2143,8 @@ retry:
return false;
}
- /* We should not use stale free nids created by build_free_nids */
- if (nm_i->nid_cnt[FREE_NID] && !on_build_free_nids(nm_i)) {
+ /* We should not use stale free nids created by f2fs_build_free_nids */
+ if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
i = list_first_entry(&nm_i->free_nid_list,
struct free_nid, list);
@@ -2158,14 +2161,14 @@ retry:
spin_unlock(&nm_i->nid_list_lock);
/* Let's scan nat pages and its caches to get free nids */
- build_free_nids(sbi, true, false);
+ f2fs_build_free_nids(sbi, true, false);
goto retry;
}
/*
- * alloc_nid() should be called prior to this function.
+ * f2fs_alloc_nid() should be called prior to this function.
*/
-void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
+void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
@@ -2180,9 +2183,9 @@ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
}
/*
- * alloc_nid() should be called prior to this function.
+ * f2fs_alloc_nid() should be called prior to this function.
*/
-void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
+void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
@@ -2195,7 +2198,7 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
i = __lookup_free_nid_list(nm_i, nid);
f2fs_bug_on(sbi, !i);
- if (!available_free_memory(sbi, FREE_NIDS)) {
+ if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
__remove_free_nid(sbi, i, PREALLOC_NID);
need_free = true;
} else {
@@ -2212,7 +2215,7 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
kmem_cache_free(free_nid_slab, i);
}
-int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
+int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i, *next;
@@ -2240,14 +2243,14 @@ int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
return nr - nr_shrink;
}
-void recover_inline_xattr(struct inode *inode, struct page *page)
+void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
{
void *src_addr, *dst_addr;
size_t inline_size;
struct page *ipage;
struct f2fs_inode *ri;
- ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
+ ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
ri = F2FS_INODE(page);
@@ -2265,11 +2268,11 @@ void recover_inline_xattr(struct inode *inode, struct page *page)
f2fs_wait_on_page_writeback(ipage, NODE, true);
memcpy(dst_addr, src_addr, inline_size);
update_inode:
- update_inode(inode, ipage);
+ f2fs_update_inode(inode, ipage);
f2fs_put_page(ipage, 1);
}
-int recover_xattr_data(struct inode *inode, struct page *page)
+int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
@@ -2282,25 +2285,25 @@ int recover_xattr_data(struct inode *inode, struct page *page)
goto recover_xnid;
/* 1: invalidate the previous xattr nid */
- get_node_info(sbi, prev_xnid, &ni);
- invalidate_blocks(sbi, ni.blk_addr);
+ f2fs_get_node_info(sbi, prev_xnid, &ni);
+ f2fs_invalidate_blocks(sbi, ni.blk_addr);
dec_valid_node_count(sbi, inode, false);
set_node_addr(sbi, &ni, NULL_ADDR, false);
recover_xnid:
/* 2: update xattr nid in inode */
- if (!alloc_nid(sbi, &new_xnid))
+ if (!f2fs_alloc_nid(sbi, &new_xnid))
return -ENOSPC;
set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
- xpage = new_node_page(&dn, XATTR_NODE_OFFSET);
+ xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
if (IS_ERR(xpage)) {
- alloc_nid_failed(sbi, new_xnid);
+ f2fs_alloc_nid_failed(sbi, new_xnid);
return PTR_ERR(xpage);
}
- alloc_nid_done(sbi, new_xnid);
- update_inode_page(inode);
+ f2fs_alloc_nid_done(sbi, new_xnid);
+ f2fs_update_inode_page(inode);
/* 3: update and set xattr node page dirty */
memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
@@ -2311,14 +2314,14 @@ recover_xnid:
return 0;
}
-int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
+int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
{
struct f2fs_inode *src, *dst;
nid_t ino = ino_of_node(page);
struct node_info old_ni, new_ni;
struct page *ipage;
- get_node_info(sbi, ino, &old_ni);
+ f2fs_get_node_info(sbi, ino, &old_ni);
if (unlikely(old_ni.blk_addr != NULL_ADDR))
return -EINVAL;
@@ -2372,7 +2375,7 @@ retry:
return 0;
}
-void restore_node_summary(struct f2fs_sb_info *sbi,
+void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum)
{
struct f2fs_node *rn;
@@ -2389,10 +2392,10 @@ void restore_node_summary(struct f2fs_sb_info *sbi,
nrpages = min(last_offset - i, BIO_MAX_PAGES);
/* readahead node pages */
- ra_meta_pages(sbi, addr, nrpages, META_POR, true);
+ f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
for (idx = addr; idx < addr + nrpages; idx++) {
- struct page *page = get_tmp_page(sbi, idx);
+ struct page *page = f2fs_get_tmp_page(sbi, idx);
rn = F2FS_NODE(page);
sum_entry->nid = rn->footer.nid;
@@ -2534,7 +2537,7 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
if (to_journal) {
- offset = lookup_journal_in_cursum(journal,
+ offset = f2fs_lookup_journal_in_cursum(journal,
NAT_JOURNAL, nid, 1);
f2fs_bug_on(sbi, offset < 0);
raw_ne = &nat_in_journal(journal, offset);
@@ -2571,7 +2574,7 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
/*
* This function is called during the checkpointing process.
*/
-void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -2634,7 +2637,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
nm_i->nat_bits_blocks;
for (i = 0; i < nm_i->nat_bits_blocks; i++) {
- struct page *page = get_meta_page(sbi, nat_bits_addr++);
+ struct page *page = f2fs_get_meta_page(sbi, nat_bits_addr++);
memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
page_address(page), F2FS_BLKSIZE);
@@ -2753,8 +2756,10 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi)
struct f2fs_nm_info *nm_i = NM_I(sbi);
int i;
- nm_i->free_nid_bitmap = f2fs_kzalloc(sbi, nm_i->nat_blocks *
- sizeof(unsigned char *), GFP_KERNEL);
+ nm_i->free_nid_bitmap =
+ f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *),
+ nm_i->nat_blocks),
+ GFP_KERNEL);
if (!nm_i->free_nid_bitmap)
return -ENOMEM;
@@ -2770,14 +2775,16 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi)
if (!nm_i->nat_block_bitmap)
return -ENOMEM;
- nm_i->free_nid_count = f2fs_kvzalloc(sbi, nm_i->nat_blocks *
- sizeof(unsigned short), GFP_KERNEL);
+ nm_i->free_nid_count =
+ f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
+ nm_i->nat_blocks),
+ GFP_KERNEL);
if (!nm_i->free_nid_count)
return -ENOMEM;
return 0;
}
-int build_node_manager(struct f2fs_sb_info *sbi)
+int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
{
int err;
@@ -2797,11 +2804,11 @@ int build_node_manager(struct f2fs_sb_info *sbi)
/* load free nid status from nat_bits table */
load_free_nid_bitmap(sbi);
- build_free_nids(sbi, true, true);
+ f2fs_build_free_nids(sbi, true, true);
return 0;
}
-void destroy_node_manager(struct f2fs_sb_info *sbi)
+void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i, *next_i;
@@ -2873,7 +2880,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
kfree(nm_i);
}
-int __init create_node_manager_caches(void)
+int __init f2fs_create_node_manager_caches(void)
{
nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
sizeof(struct nat_entry));
@@ -2899,7 +2906,7 @@ fail:
return -ENOMEM;
}
-void destroy_node_manager_caches(void)
+void f2fs_destroy_node_manager_caches(void)
{
kmem_cache_destroy(nat_entry_set_slab);
kmem_cache_destroy(free_nid_slab);