diff options
-rw-r--r-- | fs/xfs/libxfs/xfs_alloc.c | 2 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_sb.c | 14 | ||||
-rw-r--r-- | fs/xfs/xfs_bmap_util.c | 17 | ||||
-rw-r--r-- | fs/xfs/xfs_file.c | 66 | ||||
-rw-r--r-- | fs/xfs/xfs_iomap.c | 17 | ||||
-rw-r--r-- | fs/xfs/xfs_iomap.h | 1 | ||||
-rw-r--r-- | fs/xfs/xfs_trace.h | 20 |
7 files changed, 68 insertions, 69 deletions
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index d33c2fdaf4f2..a3bf7352a383 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -3158,8 +3158,6 @@ xfs_alloc_put_freelist( logflags |= XFS_AGF_BTREEBLKS; } - xfs_alloc_log_agf(tp, agbp, logflags); - ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)); agfl_bno = xfs_buf_to_agfl_bno(agflbp); diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index 21891aa10ada..5ca11c3c4711 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c @@ -400,6 +400,20 @@ xfs_validate_sb_common( sbp->sb_inoalignmt, align); return -EINVAL; } + + if (!sbp->sb_spino_align || + sbp->sb_spino_align > sbp->sb_inoalignmt || + (sbp->sb_inoalignmt % sbp->sb_spino_align) != 0) { + xfs_warn(mp, + "Sparse inode alignment (%u) is invalid.", + sbp->sb_spino_align); + return -EINVAL; + } + } else if (sbp->sb_spino_align) { + xfs_warn(mp, + "Sparse inode alignment (%u) should be zero.", + sbp->sb_spino_align); + return -EINVAL; } } else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD | XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) { diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 1ac0b0facb7f..2c3b3ff6d7be 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -48,10 +48,6 @@ xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) /* * Routine to zero an extent on disk allocated to the specific inode. - * - * The VFS functions take a linearised filesystem block offset, so we have to - * convert the sparse xfs fsb to the right format first. - * VFS types are real funky, too. */ int xfs_zero_extent( @@ -59,15 +55,10 @@ xfs_zero_extent( xfs_fsblock_t start_fsb, xfs_off_t count_fsb) { - struct xfs_mount *mp = ip->i_mount; - struct xfs_buftarg *target = xfs_inode_buftarg(ip); - xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb); - sector_t block = XFS_BB_TO_FSBT(mp, sector); - - return blkdev_issue_zeroout(target->bt_bdev, - block << (mp->m_super->s_blocksize_bits - 9), - count_fsb << (mp->m_super->s_blocksize_bits - 9), - GFP_KERNEL, 0); + return blkdev_issue_zeroout(xfs_inode_buftarg(ip)->bt_bdev, + xfs_fsb_to_db(ip, start_fsb), + XFS_FSB_TO_BB(ip->i_mount, count_fsb), + GFP_KERNEL, 0); } /* diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index b19916b11fd5..c6de6b865ef1 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1425,6 +1425,8 @@ xfs_dax_read_fault( struct xfs_inode *ip = XFS_I(file_inode(vmf->vma->vm_file)); vm_fault_t ret; + trace_xfs_read_fault(ip, order); + xfs_ilock(ip, XFS_MMAPLOCK_SHARED); ret = xfs_dax_fault_locked(vmf, order, false); xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); @@ -1432,6 +1434,16 @@ xfs_dax_read_fault( return ret; } +/* + * Locking for serialisation of IO during page faults. This results in a lock + * ordering of: + * + * mmap_lock (MM) + * sb_start_pagefault(vfs, freeze) + * invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation) + * page_lock (MM) + * i_lock (XFS - extent map serialisation) + */ static vm_fault_t xfs_write_fault( struct vm_fault *vmf, @@ -1442,6 +1454,8 @@ xfs_write_fault( unsigned int lock_mode = XFS_MMAPLOCK_SHARED; vm_fault_t ret; + trace_xfs_write_fault(ip, order); + sb_start_pagefault(inode->i_sb); file_update_time(vmf->vma->vm_file); @@ -1460,40 +1474,13 @@ xfs_write_fault( if (IS_DAX(inode)) ret = xfs_dax_fault_locked(vmf, order, true); else - ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops); + ret = iomap_page_mkwrite(vmf, &xfs_buffered_write_iomap_ops); xfs_iunlock(ip, lock_mode); sb_end_pagefault(inode->i_sb); return ret; } -/* - * Locking for serialisation of IO during page faults. This results in a lock - * ordering of: - * - * mmap_lock (MM) - * sb_start_pagefault(vfs, freeze) - * invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation) - * page_lock (MM) - * i_lock (XFS - extent map serialisation) - */ -static vm_fault_t -__xfs_filemap_fault( - struct vm_fault *vmf, - unsigned int order, - bool write_fault) -{ - struct inode *inode = file_inode(vmf->vma->vm_file); - - trace_xfs_filemap_fault(XFS_I(inode), order, write_fault); - - if (write_fault) - return xfs_write_fault(vmf, order); - if (IS_DAX(inode)) - return xfs_dax_read_fault(vmf, order); - return filemap_fault(vmf); -} - static inline bool xfs_is_write_fault( struct vm_fault *vmf) @@ -1506,10 +1493,17 @@ static vm_fault_t xfs_filemap_fault( struct vm_fault *vmf) { + struct inode *inode = file_inode(vmf->vma->vm_file); + /* DAX can shortcut the normal fault path on write faults! */ - return __xfs_filemap_fault(vmf, 0, - IS_DAX(file_inode(vmf->vma->vm_file)) && - xfs_is_write_fault(vmf)); + if (IS_DAX(inode)) { + if (xfs_is_write_fault(vmf)) + return xfs_write_fault(vmf, 0); + return xfs_dax_read_fault(vmf, 0); + } + + trace_xfs_read_fault(XFS_I(inode), 0); + return filemap_fault(vmf); } static vm_fault_t @@ -1521,15 +1515,16 @@ xfs_filemap_huge_fault( return VM_FAULT_FALLBACK; /* DAX can shortcut the normal fault path on write faults! */ - return __xfs_filemap_fault(vmf, order, - xfs_is_write_fault(vmf)); + if (xfs_is_write_fault(vmf)) + return xfs_write_fault(vmf, order); + return xfs_dax_read_fault(vmf, order); } static vm_fault_t xfs_filemap_page_mkwrite( struct vm_fault *vmf) { - return __xfs_filemap_fault(vmf, 0, true); + return xfs_write_fault(vmf, 0); } /* @@ -1541,8 +1536,7 @@ static vm_fault_t xfs_filemap_pfn_mkwrite( struct vm_fault *vmf) { - - return __xfs_filemap_fault(vmf, 0, true); + return xfs_write_fault(vmf, 0); } static const struct vm_operations_struct xfs_file_vm_ops = { diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index e810e901cd35..ba6584a6f101 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -1234,6 +1234,14 @@ xfs_buffered_write_iomap_end( if (iomap->type != IOMAP_DELALLOC || !(iomap->flags & IOMAP_F_NEW)) return 0; + /* + * iomap_page_mkwrite() will never fail in a way that requires delalloc + * extents that it allocated to be revoked. Hence never try to release + * them here. + */ + if (flags & IOMAP_FAULT) + return 0; + /* Nothing to do if we've written the entire delalloc extent */ start_byte = iomap_last_written_block(inode, offset, written); end_byte = round_up(offset + length, i_blocksize(inode)); @@ -1260,15 +1268,6 @@ const struct iomap_ops xfs_buffered_write_iomap_ops = { .iomap_end = xfs_buffered_write_iomap_end, }; -/* - * iomap_page_mkwrite() will never fail in a way that requires delalloc extents - * that it allocated to be revoked. Hence we do not need an .iomap_end method - * for this operation. - */ -const struct iomap_ops xfs_page_mkwrite_iomap_ops = { - .iomap_begin = xfs_buffered_write_iomap_begin, -}; - static int xfs_read_iomap_begin( struct inode *inode, diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h index 4da13440bae9..8347268af727 100644 --- a/fs/xfs/xfs_iomap.h +++ b/fs/xfs/xfs_iomap.h @@ -48,7 +48,6 @@ xfs_aligned_fsb_count( } extern const struct iomap_ops xfs_buffered_write_iomap_ops; -extern const struct iomap_ops xfs_page_mkwrite_iomap_ops; extern const struct iomap_ops xfs_direct_write_iomap_ops; extern const struct iomap_ops xfs_read_iomap_ops; extern const struct iomap_ops xfs_seek_iomap_ops; diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index f66e5b590d75..caa02caac2cd 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -863,28 +863,32 @@ DEFINE_INODE_EVENT(xfs_inode_inactivating); TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_SHARED); TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_COW); -TRACE_EVENT(xfs_filemap_fault, - TP_PROTO(struct xfs_inode *ip, unsigned int order, bool write_fault), - TP_ARGS(ip, order, write_fault), +DECLARE_EVENT_CLASS(xfs_fault_class, + TP_PROTO(struct xfs_inode *ip, unsigned int order), + TP_ARGS(ip, order), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(unsigned int, order) - __field(bool, write_fault) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->order = order; - __entry->write_fault = write_fault; ), - TP_printk("dev %d:%d ino 0x%llx order %u write_fault %d", + TP_printk("dev %d:%d ino 0x%llx order %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, - __entry->order, - __entry->write_fault) + __entry->order) ) +#define DEFINE_FAULT_EVENT(name) \ +DEFINE_EVENT(xfs_fault_class, name, \ + TP_PROTO(struct xfs_inode *ip, unsigned int order), \ + TP_ARGS(ip, order)) +DEFINE_FAULT_EVENT(xfs_read_fault); +DEFINE_FAULT_EVENT(xfs_write_fault); + DECLARE_EVENT_CLASS(xfs_iref_class, TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), TP_ARGS(ip, caller_ip), |