summaryrefslogtreecommitdiff
path: root/mm/readahead.c
diff options
context:
space:
mode:
authorMarkus Stockhausen <stockhausen@collogia.de>2018-07-27 09:09:53 -0600
committerAlistair Strachan <astrachan@google.com>2019-01-23 21:46:33 +0000
commitf2d65ea603e2043e80c63cf6dcd41d6a0ce8c841 (patch)
treed2412f69d6ba6acbc5ed6c3f470d4bf4828c57c3 /mm/readahead.c
parente7c8b35e486775af65636a2a2b2766c3def9a8e8 (diff)
UPSTREAM: readahead: stricter check for bdi io_pages
ondemand_readahead() checks bdi->io_pages to cap the maximum pages that need to be processed. This works until the readit section. If we would do an async only readahead (async size = sync size) and target is at beginning of window we expand the pages by another get_next_ra_size() pages. Btrace for large reads shows that kernel always issues a doubled size read at the beginning of processing. Add an additional check for io_pages in the lower part of the func. The fix helps devices that hard limit bio pages and rely on proper handling of max_hw_read_sectors (e.g. older FusionIO cards). For that reason it could qualify for stable. Fixes: 9491ae4a ("mm: don't cap request size based on read-ahead setting") Change-Id: If111344b54897555085c2a6c442d697069962f11 Cc: stable@vger.kernel.org Signed-off-by: Markus Stockhausen <stockhausen@collogia.de> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Jaegeuk Kim <jaegeuk@google.com>
Diffstat (limited to 'mm/readahead.c')
-rw-r--r--mm/readahead.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index 920aa20de891..f230b942cda2 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -381,6 +381,7 @@ ondemand_readahead(struct address_space *mapping,
{
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
unsigned long max_pages = ra->ra_pages;
+ unsigned long add_pages;
pgoff_t prev_offset;
/*
@@ -470,10 +471,17 @@ readit:
* Will this read hit the readahead marker made by itself?
* If so, trigger the readahead marker hit now, and merge
* the resulted next readahead window into the current one.
+ * Take care of maximum IO pages as above.
*/
if (offset == ra->start && ra->size == ra->async_size) {
- ra->async_size = get_next_ra_size(ra, max_pages);
- ra->size += ra->async_size;
+ add_pages = get_next_ra_size(ra, max_pages);
+ if (ra->size + add_pages <= max_pages) {
+ ra->async_size = add_pages;
+ ra->size += add_pages;
+ } else {
+ ra->size = max_pages;
+ ra->async_size = max_pages >> 1;
+ }
}
return ra_submit(ra, mapping, filp);