summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_aops.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_aops.c')
-rw-r--r--fs/xfs/xfs_aops.c37
1 files changed, 28 insertions, 9 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 29e7e5dd5178..da72090b9ce7 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -288,7 +288,7 @@ xfs_map_blocks(
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
- ssize_t count = 1 << inode->i_blkbits;
+ ssize_t count = i_blocksize(inode);
xfs_fileoff_t offset_fsb, end_fsb;
int error = 0;
int bmapi_flags = XFS_BMAPI_ENTIRE;
@@ -310,7 +310,7 @@ xfs_map_blocks(
(ip->i_df.if_flags & XFS_IFEXTENTS));
ASSERT(offset <= mp->m_super->s_maxbytes);
- if (offset + count > mp->m_super->s_maxbytes)
+ if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes)
count = mp->m_super->s_maxbytes - offset;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
@@ -921,7 +921,7 @@ xfs_aops_discard_page(
break;
}
next_buffer:
- offset += 1 << inode->i_blkbits;
+ offset += i_blocksize(inode);
} while ((bh = bh->b_this_page) != head);
@@ -1360,10 +1360,10 @@ xfs_map_trim_size(
if (mapping_size > size)
mapping_size = size;
if (offset < i_size_read(inode) &&
- offset + mapping_size >= i_size_read(inode)) {
+ (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
/* limit mapping to block that spans EOF */
mapping_size = roundup_64(i_size_read(inode) - offset,
- 1 << inode->i_blkbits);
+ i_blocksize(inode));
}
if (mapping_size > LONG_MAX)
mapping_size = LONG_MAX;
@@ -1395,7 +1395,7 @@ __xfs_get_blocks(
return -EIO;
offset = (xfs_off_t)iblock << inode->i_blkbits;
- ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
+ ASSERT(bh_result->b_size >= i_blocksize(inode));
size = bh_result->b_size;
if (!create && direct && offset >= i_size_read(inode))
@@ -1416,7 +1416,7 @@ __xfs_get_blocks(
}
ASSERT(offset <= mp->m_super->s_maxbytes);
- if (offset + size > mp->m_super->s_maxbytes)
+ if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes)
size = mp->m_super->s_maxbytes - offset;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
@@ -1426,6 +1426,26 @@ __xfs_get_blocks(
if (error)
goto out_unlock;
+ /*
+ * The only time we can ever safely find delalloc blocks on direct I/O
+ * is a dio write to post-eof speculative preallocation. All other
+ * scenarios are indicative of a problem or misuse (such as mixing
+ * direct and mapped I/O).
+ *
+ * The file may be unmapped by the time we get here so we cannot
+ * reliably fail the I/O based on mapping. Instead, fail the I/O if this
+ * is a read or a write within eof. Otherwise, carry on but warn as a
+ * precuation if the file happens to be mapped.
+ */
+ if (direct && imap.br_startblock == DELAYSTARTBLOCK) {
+ if (!create || offset < i_size_read(VFS_I(ip))) {
+ WARN_ON_ONCE(1);
+ error = -EIO;
+ goto out_unlock;
+ }
+ WARN_ON_ONCE(mapping_mapped(VFS_I(ip)->i_mapping));
+ }
+
/* for DAX, we convert unwritten extents directly */
if (create &&
(!nimaps ||
@@ -1525,7 +1545,6 @@ __xfs_get_blocks(
set_buffer_new(bh_result);
if (imap.br_startblock == DELAYSTARTBLOCK) {
- BUG_ON(direct);
if (create) {
set_buffer_uptodate(bh_result);
set_buffer_mapped(bh_result);
@@ -1968,7 +1987,7 @@ xfs_vm_set_page_dirty(
if (offset < end_offset)
set_buffer_dirty(bh);
bh = bh->b_this_page;
- offset += 1 << inode->i_blkbits;
+ offset += i_blocksize(inode);
} while (bh != head);
}
/*