diff options
Diffstat (limited to 'mm/swapfile.c')
| -rw-r--r-- | mm/swapfile.c | 84 |
1 files changed, 75 insertions, 9 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c index dea145ed1868..204224d8e0d1 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -75,8 +75,8 @@ PLIST_HEAD(swap_active_head); * is held and the locking order requires swap_lock to be taken * before any swap_info_struct->lock. */ -static PLIST_HEAD(swap_avail_head); -static DEFINE_SPINLOCK(swap_avail_lock); +PLIST_HEAD(swap_avail_head); +DEFINE_SPINLOCK(swap_avail_lock); struct swap_info_struct *swap_info[MAX_SWAPFILES]; @@ -91,6 +91,26 @@ static inline unsigned char swap_count(unsigned char ent) return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */ } +bool is_swap_fast(swp_entry_t entry) +{ + struct swap_info_struct *p; + unsigned long type; + + if (non_swap_entry(entry)) + return false; + + type = swp_type(entry); + if (type >= nr_swapfiles) + return false; + + p = swap_info[type]; + + if (p->flags & SWP_FAST) + return true; + + return false; +} + /* returns 1 if swap entry is freed */ static int __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset) @@ -193,7 +213,6 @@ static void discard_swap_cluster(struct swap_info_struct *si, } } -#define SWAPFILE_CLUSTER 256 #define LATENCY_LIMIT 256 static inline void cluster_set_flag(struct swap_cluster_info *info, @@ -564,7 +583,7 @@ checks: scan_base = offset = si->lowest_bit; /* reuse swap entry of cache-only swap if not busy. */ - if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { + if (vm_swap_full(si) && si->swap_map[offset] == SWAP_HAS_CACHE) { int swap_was_freed; spin_unlock(&si->lock); swap_was_freed = __try_to_reclaim_swap(si, offset); @@ -604,7 +623,8 @@ scan: spin_lock(&si->lock); goto checks; } - if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { + if (vm_swap_full(si) && + si->swap_map[offset] == SWAP_HAS_CACHE) { spin_lock(&si->lock); goto checks; } @@ -619,7 +639,8 @@ scan: spin_lock(&si->lock); goto checks; } - if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { + if (vm_swap_full(si) && + si->swap_map[offset] == SWAP_HAS_CACHE) { spin_lock(&si->lock); goto checks; } @@ -640,18 +661,39 @@ swp_entry_t get_swap_page(void) { struct swap_info_struct *si, *next; pgoff_t offset; + int swap_ratio_off = 0; if (atomic_long_read(&nr_swap_pages) <= 0) goto noswap; atomic_long_dec(&nr_swap_pages); +lock_and_start: spin_lock(&swap_avail_lock); start_over: plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) { + + if (sysctl_swap_ratio && !swap_ratio_off) { + int ret; + + spin_unlock(&swap_avail_lock); + ret = swap_ratio(&si); + if (0 > ret) { + /* + * Error. Start again with swap + * ratio disabled. + */ + swap_ratio_off = 1; + goto lock_and_start; + } else { + goto start; + } + } + /* requeue si to after same-priority siblings */ plist_requeue(&si->avail_list, &swap_avail_head); spin_unlock(&swap_avail_lock); +start: spin_lock(&si->lock); if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { spin_lock(&swap_avail_lock); @@ -932,11 +974,25 @@ int reuse_swap_page(struct page *page) count = page_mapcount(page); if (count <= 1 && PageSwapCache(page)) { count += page_swapcount(page); - if (count == 1 && !PageWriteback(page)) { + if (count != 1) + goto out; + if (!PageWriteback(page)) { delete_from_swap_cache(page); SetPageDirty(page); + } else { + swp_entry_t entry; + struct swap_info_struct *p; + + entry.val = page_private(page); + p = swap_info_get(entry); + if (p->flags & SWP_STABLE_WRITES) { + spin_unlock(&p->lock); + return false; + } + spin_unlock(&p->lock); } } +out: return count <= 1; } @@ -1008,7 +1064,8 @@ int free_swap_and_cache(swp_entry_t entry) * Also recheck PageSwapCache now page is locked (above). */ if (PageSwapCache(page) && !PageWriteback(page) && - (!page_mapped(page) || vm_swap_full())) { + (!page_mapped(page) || + vm_swap_full(page_swap_info(page)))) { delete_from_swap_cache(page); SetPageDirty(page); } @@ -2481,6 +2538,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) error = -ENOMEM; goto bad_swap; } + + if (bdi_cap_stable_pages_required(inode_to_bdi(inode))) + p->flags |= SWP_STABLE_WRITES; + if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) { int cpu; @@ -2553,11 +2614,16 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) } } + if (p->bdev && blk_queue_fast(bdev_get_queue(p->bdev))) + p->flags |= SWP_FAST; + mutex_lock(&swapon_mutex); prio = -1; - if (swap_flags & SWAP_FLAG_PREFER) + if (swap_flags & SWAP_FLAG_PREFER) { prio = (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; + setup_swap_ratio(p, prio); + } enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map); pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s%s\n", |
