summaryrefslogtreecommitdiff
path: root/fs/f2fs/node.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/node.c')
-rw-r--r--fs/f2fs/node.c423
1 files changed, 265 insertions, 158 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index a161e955c4c8..d3d90d284631 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -25,21 +25,30 @@
static struct kmem_cache *nat_entry_slab;
static struct kmem_cache *free_nid_slab;
+static struct kmem_cache *nat_entry_set_slab;
-static inline bool available_free_memory(struct f2fs_nm_info *nm_i, int type)
+bool available_free_memory(struct f2fs_sb_info *sbi, int type)
{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
struct sysinfo val;
unsigned long mem_size = 0;
+ bool res = false;
si_meminfo(&val);
- if (type == FREE_NIDS)
- mem_size = nm_i->fcnt * sizeof(struct free_nid);
- else if (type == NAT_ENTRIES)
- mem_size += nm_i->nat_cnt * sizeof(struct nat_entry);
- mem_size >>= 12;
-
- /* give 50:50 memory for free nids and nat caches respectively */
- return (mem_size < ((val.totalram * nm_i->ram_thresh) >> 11));
+ /* give 25%, 25%, 50% memory for each components respectively */
+ if (type == FREE_NIDS) {
+ mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 12;
+ res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
+ } else if (type == NAT_ENTRIES) {
+ mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12;
+ res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
+ } else if (type == DIRTY_DENTS) {
+ if (sbi->sb->s_bdi->dirty_exceeded)
+ return false;
+ mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
+ res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1);
+ }
+ return res;
}
static void clear_node_page_dirty(struct page *page)
@@ -82,12 +91,8 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
/* get current nat block page with lock */
src_page = get_meta_page(sbi, src_off);
-
- /* Dirty src_page means that it is already the new target NAT page. */
- if (PageDirty(src_page))
- return src_page;
-
dst_page = grab_meta_page(sbi, dst_off);
+ f2fs_bug_on(PageDirty(src_page));
src_addr = page_address(src_page);
dst_addr = page_address(dst_page);
@@ -147,6 +152,18 @@ bool fsync_mark_done(struct f2fs_sb_info *sbi, nid_t nid)
return fsync_done;
}
+void fsync_mark_clear(struct f2fs_sb_info *sbi, nid_t nid)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct nat_entry *e;
+
+ write_lock(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, nid);
+ if (e)
+ e->fsync_done = false;
+ write_unlock(&nm_i->nat_tree_lock);
+}
+
static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
{
struct nat_entry *new;
@@ -179,9 +196,7 @@ retry:
write_unlock(&nm_i->nat_tree_lock);
goto retry;
}
- nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
- nat_set_ino(e, le32_to_cpu(ne->ino));
- nat_set_version(e, ne->version);
+ node_info_from_raw_nat(&e->ni, ne);
}
write_unlock(&nm_i->nat_tree_lock);
}
@@ -243,7 +258,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- if (available_free_memory(nm_i, NAT_ENTRIES))
+ if (available_free_memory(sbi, NAT_ENTRIES))
return 0;
write_lock(&nm_i->nat_tree_lock);
@@ -827,7 +842,7 @@ void remove_inode_page(struct inode *inode)
truncate_node(&dn);
}
-struct page *new_inode_page(struct inode *inode, const struct qstr *name)
+struct page *new_inode_page(struct inode *inode)
{
struct dnode_of_data dn;
@@ -849,8 +864,7 @@ struct page *new_node_page(struct dnode_of_data *dn,
if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
return ERR_PTR(-EPERM);
- page = grab_cache_page_write_begin(NODE_MAPPING(sbi),
- dn->nid, AOP_FLAG_NOFS);
+ page = grab_cache_page(NODE_MAPPING(sbi), dn->nid);
if (!page)
return ERR_PTR(-ENOMEM);
@@ -867,6 +881,7 @@ struct page *new_node_page(struct dnode_of_data *dn,
new_ni.ino = dn->inode->i_ino;
set_node_addr(sbi, &new_ni, NEW_ADDR, false);
+ f2fs_wait_on_page_writeback(page, NODE);
fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
set_cold_node(dn->inode, page);
SetPageUptodate(page);
@@ -946,8 +961,7 @@ struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
struct page *page;
int err;
repeat:
- page = grab_cache_page_write_begin(NODE_MAPPING(sbi),
- nid, AOP_FLAG_NOFS);
+ page = grab_cache_page(NODE_MAPPING(sbi), nid);
if (!page)
return ERR_PTR(-ENOMEM);
@@ -967,7 +981,6 @@ repeat:
goto repeat;
}
got_it:
- mark_page_accessed(page);
return page;
}
@@ -1022,7 +1035,6 @@ page_hit:
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
- mark_page_accessed(page);
return page;
}
@@ -1196,6 +1208,8 @@ static int f2fs_write_node_page(struct page *page,
.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
};
+ trace_f2fs_writepage(page, NODE);
+
if (unlikely(sbi->por_doing))
goto redirty_out;
@@ -1217,20 +1231,17 @@ static int f2fs_write_node_page(struct page *page,
if (wbc->for_reclaim)
goto redirty_out;
- mutex_lock(&sbi->node_write);
+ down_read(&sbi->node_write);
set_page_writeback(page);
write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr);
set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page));
dec_page_count(sbi, F2FS_DIRTY_NODES);
- mutex_unlock(&sbi->node_write);
+ up_read(&sbi->node_write);
unlock_page(page);
return 0;
redirty_out:
- dec_page_count(sbi, F2FS_DIRTY_NODES);
- wbc->pages_skipped++;
- account_page_redirty(page);
- set_page_dirty(page);
+ redirty_page_for_writepage(wbc, page);
return AOP_WRITEPAGE_ACTIVATE;
}
@@ -1240,6 +1251,8 @@ static int f2fs_write_node_pages(struct address_space *mapping,
struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
long diff;
+ trace_f2fs_writepages(mapping->host, wbc, NODE);
+
/* balancing f2fs's metadata in background */
f2fs_balance_fs_bg(sbi);
@@ -1315,13 +1328,14 @@ static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
radix_tree_delete(&nm_i->free_nid_root, i->nid);
}
-static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
+static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
struct nat_entry *ne;
bool allocated = false;
- if (!available_free_memory(nm_i, FREE_NIDS))
+ if (!available_free_memory(sbi, FREE_NIDS))
return -1;
/* 0 nid should not be used */
@@ -1374,9 +1388,10 @@ static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
kmem_cache_free(free_nid_slab, i);
}
-static void scan_nat_page(struct f2fs_nm_info *nm_i,
+static void scan_nat_page(struct f2fs_sb_info *sbi,
struct page *nat_page, nid_t start_nid)
{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
struct f2fs_nat_block *nat_blk = page_address(nat_page);
block_t blk_addr;
int i;
@@ -1391,7 +1406,7 @@ static void scan_nat_page(struct f2fs_nm_info *nm_i,
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
f2fs_bug_on(blk_addr == NEW_ADDR);
if (blk_addr == NULL_ADDR) {
- if (add_free_nid(nm_i, start_nid, true) < 0)
+ if (add_free_nid(sbi, start_nid, true) < 0)
break;
}
}
@@ -1415,7 +1430,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
while (1) {
struct page *page = get_current_nat_page(sbi, nid);
- scan_nat_page(nm_i, page, nid);
+ scan_nat_page(sbi, page, nid);
f2fs_put_page(page, 1);
nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
@@ -1435,7 +1450,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
nid = le32_to_cpu(nid_in_journal(sum, i));
if (addr == NULL_ADDR)
- add_free_nid(nm_i, nid, true);
+ add_free_nid(sbi, nid, true);
else
remove_free_nid(nm_i, nid);
}
@@ -1452,7 +1467,7 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i = NULL;
retry:
- if (unlikely(sbi->total_valid_node_count + 1 >= nm_i->max_nid))
+ if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
return false;
spin_lock(&nm_i->free_nid_list_lock);
@@ -1512,7 +1527,7 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
spin_lock(&nm_i->free_nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
f2fs_bug_on(!i || i->state != NID_ALLOC);
- if (!available_free_memory(nm_i, FREE_NIDS)) {
+ if (!available_free_memory(sbi, FREE_NIDS)) {
__del_from_free_nid_list(nm_i, i);
need_free = true;
} else {
@@ -1559,6 +1574,7 @@ void recover_inline_xattr(struct inode *inode, struct page *page)
src_addr = inline_xattr_addr(page);
inline_size = inline_xattr_size(inode);
+ f2fs_wait_on_page_writeback(ipage, NODE);
memcpy(dst_addr, src_addr, inline_size);
update_inode(inode, ipage);
@@ -1572,8 +1588,6 @@ bool recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
nid_t new_xnid = nid_of_node(page);
struct node_info ni;
- recover_inline_xattr(inode, page);
-
if (!f2fs_has_xattr_block(ofs_of_node(page)))
return false;
@@ -1614,6 +1628,11 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
struct node_info old_ni, new_ni;
struct page *ipage;
+ get_node_info(sbi, ino, &old_ni);
+
+ if (unlikely(old_ni.blk_addr != NULL_ADDR))
+ return -EINVAL;
+
ipage = grab_cache_page(NODE_MAPPING(sbi), ino);
if (!ipage)
return -ENOMEM;
@@ -1621,7 +1640,6 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
/* Should not use this inode from free nid list */
remove_free_nid(NM_I(sbi), ino);
- get_node_info(sbi, ino, &old_ni);
SetPageUptodate(ipage);
fill_node_footer(ipage, ino, ino, 0, true);
@@ -1647,35 +1665,29 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
/*
* ra_sum_pages() merge contiguous pages into one bio and submit.
- * these pre-readed pages are linked in pages list.
+ * these pre-readed pages are alloced in bd_inode's mapping tree.
*/
-static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
+static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages,
int start, int nrpages)
{
- struct page *page;
- int page_idx = start;
+ struct inode *inode = sbi->sb->s_bdev->bd_inode;
+ struct address_space *mapping = inode->i_mapping;
+ int i, page_idx = start;
struct f2fs_io_info fio = {
.type = META,
.rw = READ_SYNC | REQ_META | REQ_PRIO
};
- for (; page_idx < start + nrpages; page_idx++) {
- /* alloc temporal page for read node summary info*/
- page = alloc_page(GFP_F2FS_ZERO);
- if (!page)
+ for (i = 0; page_idx < start + nrpages; page_idx++, i++) {
+ /* alloc page in bd_inode for reading node summary info */
+ pages[i] = grab_cache_page(mapping, page_idx);
+ if (!pages[i])
break;
-
- lock_page(page);
- page->index = page_idx;
- list_add_tail(&page->lru, pages);
+ f2fs_submit_page_mbio(sbi, pages[i], page_idx, &fio);
}
- list_for_each_entry(page, pages, lru)
- f2fs_submit_page_mbio(sbi, page, page->index, &fio);
-
f2fs_submit_merged_bio(sbi, META, READ);
-
- return page_idx - start;
+ return i;
}
int restore_node_summary(struct f2fs_sb_info *sbi,
@@ -1683,11 +1695,11 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
{
struct f2fs_node *rn;
struct f2fs_summary *sum_entry;
- struct page *page, *tmp;
+ struct inode *inode = sbi->sb->s_bdev->bd_inode;
block_t addr;
int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
- int i, last_offset, nrpages, err = 0;
- LIST_HEAD(page_list);
+ struct page *pages[bio_blocks];
+ int i, idx, last_offset, nrpages, err = 0;
/* scan the node segment */
last_offset = sbi->blocks_per_seg;
@@ -1698,34 +1710,119 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
nrpages = min(last_offset - i, bio_blocks);
/* read ahead node pages */
- nrpages = ra_sum_pages(sbi, &page_list, addr, nrpages);
+ nrpages = ra_sum_pages(sbi, pages, addr, nrpages);
if (!nrpages)
return -ENOMEM;
- list_for_each_entry_safe(page, tmp, &page_list, lru) {
+ for (idx = 0; idx < nrpages; idx++) {
if (err)
goto skip;
- lock_page(page);
- if (unlikely(!PageUptodate(page))) {
+ lock_page(pages[idx]);
+ if (unlikely(!PageUptodate(pages[idx]))) {
err = -EIO;
} else {
- rn = F2FS_NODE(page);
+ rn = F2FS_NODE(pages[idx]);
sum_entry->nid = rn->footer.nid;
sum_entry->version = 0;
sum_entry->ofs_in_node = 0;
sum_entry++;
}
- unlock_page(page);
+ unlock_page(pages[idx]);
skip:
- list_del(&page->lru);
- __free_pages(page, 0);
+ page_cache_release(pages[idx]);
}
+
+ invalidate_mapping_pages(inode->i_mapping, addr,
+ addr + nrpages);
}
return err;
}
-static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
+static struct nat_entry_set *grab_nat_entry_set(void)
+{
+ struct nat_entry_set *nes =
+ f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC);
+
+ nes->entry_cnt = 0;
+ INIT_LIST_HEAD(&nes->set_list);
+ INIT_LIST_HEAD(&nes->entry_list);
+ return nes;
+}
+
+static void release_nat_entry_set(struct nat_entry_set *nes,
+ struct f2fs_nm_info *nm_i)
+{
+ f2fs_bug_on(!list_empty(&nes->entry_list));
+
+ nm_i->dirty_nat_cnt -= nes->entry_cnt;
+ list_del(&nes->set_list);
+ kmem_cache_free(nat_entry_set_slab, nes);
+}
+
+static void adjust_nat_entry_set(struct nat_entry_set *nes,
+ struct list_head *head)
+{
+ struct nat_entry_set *next = nes;
+
+ if (list_is_last(&nes->set_list, head))
+ return;
+
+ list_for_each_entry_continue(next, head, set_list)
+ if (nes->entry_cnt <= next->entry_cnt)
+ break;
+
+ list_move_tail(&nes->set_list, &next->set_list);
+}
+
+static void add_nat_entry(struct nat_entry *ne, struct list_head *head)
+{
+ struct nat_entry_set *nes;
+ nid_t start_nid = START_NID(ne->ni.nid);
+
+ list_for_each_entry(nes, head, set_list) {
+ if (nes->start_nid == start_nid) {
+ list_move_tail(&ne->list, &nes->entry_list);
+ nes->entry_cnt++;
+ adjust_nat_entry_set(nes, head);
+ return;
+ }
+ }
+
+ nes = grab_nat_entry_set();
+
+ nes->start_nid = start_nid;
+ list_move_tail(&ne->list, &nes->entry_list);
+ nes->entry_cnt++;
+ list_add(&nes->set_list, head);
+}
+
+static void merge_nats_in_set(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct list_head *dirty_list = &nm_i->dirty_nat_entries;
+ struct list_head *set_list = &nm_i->nat_entry_set;
+ struct nat_entry *ne, *tmp;
+
+ write_lock(&nm_i->nat_tree_lock);
+ list_for_each_entry_safe(ne, tmp, dirty_list, list) {
+ if (nat_get_blkaddr(ne) == NEW_ADDR)
+ continue;
+ add_nat_entry(ne, set_list);
+ nm_i->dirty_nat_cnt++;
+ }
+ write_unlock(&nm_i->nat_tree_lock);
+}
+
+static bool __has_cursum_space(struct f2fs_summary_block *sum, int size)
+{
+ if (nats_in_cursum(sum) + size <= NAT_JOURNAL_ENTRIES)
+ return true;
+ else
+ return false;
+}
+
+static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -1733,12 +1830,6 @@ static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
int i;
mutex_lock(&curseg->curseg_mutex);
-
- if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
- mutex_unlock(&curseg->curseg_mutex);
- return false;
- }
-
for (i = 0; i < nats_in_cursum(sum); i++) {
struct nat_entry *ne;
struct f2fs_nat_entry raw_ne;
@@ -1748,25 +1839,21 @@ static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
retry:
write_lock(&nm_i->nat_tree_lock);
ne = __lookup_nat_cache(nm_i, nid);
- if (ne) {
- __set_nat_cache_dirty(nm_i, ne);
- write_unlock(&nm_i->nat_tree_lock);
- continue;
- }
+ if (ne)
+ goto found;
+
ne = grab_nat_entry(nm_i, nid);
if (!ne) {
write_unlock(&nm_i->nat_tree_lock);
goto retry;
}
- nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
- nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
- nat_set_version(ne, raw_ne.version);
+ node_info_from_raw_nat(&ne->ni, &raw_ne);
+found:
__set_nat_cache_dirty(nm_i, ne);
write_unlock(&nm_i->nat_tree_lock);
}
update_nats_in_cursum(sum, -i);
mutex_unlock(&curseg->curseg_mutex);
- return true;
}
/*
@@ -1777,85 +1864,91 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
struct f2fs_summary_block *sum = curseg->sum_blk;
- struct nat_entry *ne, *cur;
- struct page *page = NULL;
- struct f2fs_nat_block *nat_blk = NULL;
- nid_t start_nid = 0, end_nid = 0;
- bool flushed;
+ struct nat_entry_set *nes, *tmp;
+ struct list_head *head = &nm_i->nat_entry_set;
+ bool to_journal = true;
- flushed = flush_nats_in_journal(sbi);
+ /* merge nat entries of dirty list to nat entry set temporarily */
+ merge_nats_in_set(sbi);
- if (!flushed)
- mutex_lock(&curseg->curseg_mutex);
-
- /* 1) flush dirty nat caches */
- list_for_each_entry_safe(ne, cur, &nm_i->dirty_nat_entries, list) {
- nid_t nid;
- struct f2fs_nat_entry raw_ne;
- int offset = -1;
- block_t new_blkaddr;
+ /*
+ * if there are no enough space in journal to store dirty nat
+ * entries, remove all entries from journal and merge them
+ * into nat entry set.
+ */
+ if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt)) {
+ remove_nats_in_journal(sbi);
- if (nat_get_blkaddr(ne) == NEW_ADDR)
- continue;
-
- nid = nat_get_nid(ne);
-
- if (flushed)
- goto to_nat_page;
+ /*
+ * merge nat entries of dirty list to nat entry set temporarily
+ */
+ merge_nats_in_set(sbi);
+ }
- /* if there is room for nat enries in curseg->sumpage */
- offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
- if (offset >= 0) {
- raw_ne = nat_in_journal(sum, offset);
- goto flush_now;
- }
-to_nat_page:
- if (!page || (start_nid > nid || nid > end_nid)) {
- if (page) {
- f2fs_put_page(page, 1);
- page = NULL;
- }
- start_nid = START_NID(nid);
- end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
+ if (!nm_i->dirty_nat_cnt)
+ return;
- /*
- * get nat block with dirty flag, increased reference
- * count, mapped and lock
- */
+ /*
+ * there are two steps to flush nat entries:
+ * #1, flush nat entries to journal in current hot data summary block.
+ * #2, flush nat entries to nat page.
+ */
+ list_for_each_entry_safe(nes, tmp, head, set_list) {
+ struct f2fs_nat_block *nat_blk;
+ struct nat_entry *ne, *cur;
+ struct page *page;
+ nid_t start_nid = nes->start_nid;
+
+ if (to_journal && !__has_cursum_space(sum, nes->entry_cnt))
+ to_journal = false;
+
+ if (to_journal) {
+ mutex_lock(&curseg->curseg_mutex);
+ } else {
page = get_next_nat_page(sbi, start_nid);
nat_blk = page_address(page);
+ f2fs_bug_on(!nat_blk);
}
- f2fs_bug_on(!nat_blk);
- raw_ne = nat_blk->entries[nid - start_nid];
-flush_now:
- new_blkaddr = nat_get_blkaddr(ne);
-
- raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
- raw_ne.block_addr = cpu_to_le32(new_blkaddr);
- raw_ne.version = nat_get_version(ne);
+ /* flush dirty nats in nat entry set */
+ list_for_each_entry_safe(ne, cur, &nes->entry_list, list) {
+ struct f2fs_nat_entry *raw_ne;
+ nid_t nid = nat_get_nid(ne);
+ int offset;
+
+ if (to_journal) {
+ offset = lookup_journal_in_cursum(sum,
+ NAT_JOURNAL, nid, 1);
+ f2fs_bug_on(offset < 0);
+ raw_ne = &nat_in_journal(sum, offset);
+ nid_in_journal(sum, offset) = cpu_to_le32(nid);
+ } else {
+ raw_ne = &nat_blk->entries[nid - start_nid];
+ }
+ raw_nat_from_node_info(raw_ne, &ne->ni);
- if (offset < 0) {
- nat_blk->entries[nid - start_nid] = raw_ne;
- } else {
- nat_in_journal(sum, offset) = raw_ne;
- nid_in_journal(sum, offset) = cpu_to_le32(nid);
+ if (nat_get_blkaddr(ne) == NULL_ADDR &&
+ add_free_nid(sbi, nid, false) <= 0) {
+ write_lock(&nm_i->nat_tree_lock);
+ __del_from_nat_cache(nm_i, ne);
+ write_unlock(&nm_i->nat_tree_lock);
+ } else {
+ write_lock(&nm_i->nat_tree_lock);
+ __clear_nat_cache_dirty(nm_i, ne);
+ write_unlock(&nm_i->nat_tree_lock);
+ }
}
- if (nat_get_blkaddr(ne) == NULL_ADDR &&
- add_free_nid(NM_I(sbi), nid, false) <= 0) {
- write_lock(&nm_i->nat_tree_lock);
- __del_from_nat_cache(nm_i, ne);
- write_unlock(&nm_i->nat_tree_lock);
- } else {
- write_lock(&nm_i->nat_tree_lock);
- __clear_nat_cache_dirty(nm_i, ne);
- write_unlock(&nm_i->nat_tree_lock);
- }
+ if (to_journal)
+ mutex_unlock(&curseg->curseg_mutex);
+ else
+ f2fs_put_page(page, 1);
+
+ release_nat_entry_set(nes, nm_i);
}
- if (!flushed)
- mutex_unlock(&curseg->curseg_mutex);
- f2fs_put_page(page, 1);
+
+ f2fs_bug_on(!list_empty(head));
+ f2fs_bug_on(nm_i->dirty_nat_cnt);
}
static int init_node_manager(struct f2fs_sb_info *sbi)
@@ -1871,8 +1964,10 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
+ nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
+
/* not used nids: 0, node, meta, (and root counted as valid node) */
- nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks - 3;
+ nm_i->available_nids = nm_i->max_nid - 3;
nm_i->fcnt = 0;
nm_i->nat_cnt = 0;
nm_i->ram_thresh = DEF_RAM_THRESHOLD;
@@ -1882,6 +1977,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
INIT_LIST_HEAD(&nm_i->nat_entries);
INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
+ INIT_LIST_HEAD(&nm_i->nat_entry_set);
mutex_init(&nm_i->build_lock);
spin_lock_init(&nm_i->free_nid_list_lock);
@@ -1962,19 +2058,30 @@ int __init create_node_manager_caches(void)
nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
sizeof(struct nat_entry));
if (!nat_entry_slab)
- return -ENOMEM;
+ goto fail;
free_nid_slab = f2fs_kmem_cache_create("free_nid",
sizeof(struct free_nid));
- if (!free_nid_slab) {
- kmem_cache_destroy(nat_entry_slab);
- return -ENOMEM;
- }
+ if (!free_nid_slab)
+ goto destory_nat_entry;
+
+ nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
+ sizeof(struct nat_entry_set));
+ if (!nat_entry_set_slab)
+ goto destory_free_nid;
return 0;
+
+destory_free_nid:
+ kmem_cache_destroy(free_nid_slab);
+destory_nat_entry:
+ kmem_cache_destroy(nat_entry_slab);
+fail:
+ return -ENOMEM;
}
void destroy_node_manager_caches(void)
{
+ kmem_cache_destroy(nat_entry_set_slab);
kmem_cache_destroy(free_nid_slab);
kmem_cache_destroy(nat_entry_slab);
}