aboutsummaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_file.c')
-rw-r--r--fs/xfs/xfs_file.c66
1 files changed, 30 insertions, 36 deletions
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index b19916b11fd5..c6de6b865ef1 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1425,6 +1425,8 @@ xfs_dax_read_fault(
struct xfs_inode *ip = XFS_I(file_inode(vmf->vma->vm_file));
vm_fault_t ret;
+ trace_xfs_read_fault(ip, order);
+
xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
ret = xfs_dax_fault_locked(vmf, order, false);
xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
@@ -1432,6 +1434,16 @@ xfs_dax_read_fault(
return ret;
}
+/*
+ * Locking for serialisation of IO during page faults. This results in a lock
+ * ordering of:
+ *
+ * mmap_lock (MM)
+ * sb_start_pagefault(vfs, freeze)
+ * invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
+ * page_lock (MM)
+ * i_lock (XFS - extent map serialisation)
+ */
static vm_fault_t
xfs_write_fault(
struct vm_fault *vmf,
@@ -1442,6 +1454,8 @@ xfs_write_fault(
unsigned int lock_mode = XFS_MMAPLOCK_SHARED;
vm_fault_t ret;
+ trace_xfs_write_fault(ip, order);
+
sb_start_pagefault(inode->i_sb);
file_update_time(vmf->vma->vm_file);
@@ -1460,40 +1474,13 @@ xfs_write_fault(
if (IS_DAX(inode))
ret = xfs_dax_fault_locked(vmf, order, true);
else
- ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops);
+ ret = iomap_page_mkwrite(vmf, &xfs_buffered_write_iomap_ops);
xfs_iunlock(ip, lock_mode);
sb_end_pagefault(inode->i_sb);
return ret;
}
-/*
- * Locking for serialisation of IO during page faults. This results in a lock
- * ordering of:
- *
- * mmap_lock (MM)
- * sb_start_pagefault(vfs, freeze)
- * invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
- * page_lock (MM)
- * i_lock (XFS - extent map serialisation)
- */
-static vm_fault_t
-__xfs_filemap_fault(
- struct vm_fault *vmf,
- unsigned int order,
- bool write_fault)
-{
- struct inode *inode = file_inode(vmf->vma->vm_file);
-
- trace_xfs_filemap_fault(XFS_I(inode), order, write_fault);
-
- if (write_fault)
- return xfs_write_fault(vmf, order);
- if (IS_DAX(inode))
- return xfs_dax_read_fault(vmf, order);
- return filemap_fault(vmf);
-}
-
static inline bool
xfs_is_write_fault(
struct vm_fault *vmf)
@@ -1506,10 +1493,17 @@ static vm_fault_t
xfs_filemap_fault(
struct vm_fault *vmf)
{
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+
/* DAX can shortcut the normal fault path on write faults! */
- return __xfs_filemap_fault(vmf, 0,
- IS_DAX(file_inode(vmf->vma->vm_file)) &&
- xfs_is_write_fault(vmf));
+ if (IS_DAX(inode)) {
+ if (xfs_is_write_fault(vmf))
+ return xfs_write_fault(vmf, 0);
+ return xfs_dax_read_fault(vmf, 0);
+ }
+
+ trace_xfs_read_fault(XFS_I(inode), 0);
+ return filemap_fault(vmf);
}
static vm_fault_t
@@ -1521,15 +1515,16 @@ xfs_filemap_huge_fault(
return VM_FAULT_FALLBACK;
/* DAX can shortcut the normal fault path on write faults! */
- return __xfs_filemap_fault(vmf, order,
- xfs_is_write_fault(vmf));
+ if (xfs_is_write_fault(vmf))
+ return xfs_write_fault(vmf, order);
+ return xfs_dax_read_fault(vmf, order);
}
static vm_fault_t
xfs_filemap_page_mkwrite(
struct vm_fault *vmf)
{
- return __xfs_filemap_fault(vmf, 0, true);
+ return xfs_write_fault(vmf, 0);
}
/*
@@ -1541,8 +1536,7 @@ static vm_fault_t
xfs_filemap_pfn_mkwrite(
struct vm_fault *vmf)
{
-
- return __xfs_filemap_fault(vmf, 0, true);
+ return xfs_write_fault(vmf, 0);
}
static const struct vm_operations_struct xfs_file_vm_ops = {