aboutsummaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_trans.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_trans.c')
-rw-r--r--fs/xfs/xfs_trans.c97
1 files changed, 75 insertions, 22 deletions
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index bdf3704dc301..30fbed27cf05 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -25,6 +25,8 @@
#include "xfs_dquot.h"
#include "xfs_icache.h"
#include "xfs_rtbitmap.h"
+#include "xfs_rtgroup.h"
+#include "xfs_sb.h"
struct kmem_cache *xfs_trans_cache;
@@ -67,7 +69,7 @@ xfs_trans_free(
struct xfs_trans *tp)
{
xfs_extent_busy_sort(&tp->t_busy);
- xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
+ xfs_extent_busy_clear(&tp->t_busy, false);
trace_xfs_trans_free(tp, _RET_IP_);
xfs_trans_clear_context(tp);
@@ -420,6 +422,8 @@ xfs_trans_mod_sb(
ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
}
tp->t_frextents_delta += delta;
+ if (xfs_has_rtgroups(mp))
+ flags &= ~XFS_TRANS_SB_DIRTY;
break;
case XFS_TRANS_SB_RES_FREXTENTS:
/*
@@ -429,6 +433,8 @@ xfs_trans_mod_sb(
*/
ASSERT(delta < 0);
tp->t_res_frextents_delta += delta;
+ if (xfs_has_rtgroups(mp))
+ flags &= ~XFS_TRANS_SB_DIRTY;
break;
case XFS_TRANS_SB_DBLOCKS:
tp->t_dblocks_delta += delta;
@@ -455,6 +461,10 @@ xfs_trans_mod_sb(
case XFS_TRANS_SB_REXTSLOG:
tp->t_rextslog_delta += delta;
break;
+ case XFS_TRANS_SB_RGCOUNT:
+ ASSERT(delta > 0);
+ tp->t_rgcount_delta += delta;
+ break;
default:
ASSERT(0);
return;
@@ -497,20 +507,22 @@ xfs_trans_apply_sb_deltas(
}
/*
- * Updating frextents requires careful handling because it does not
- * behave like the lazysb counters because we cannot rely on log
- * recovery in older kenels to recompute the value from the rtbitmap.
- * This means that the ondisk frextents must be consistent with the
- * rtbitmap.
+ * sb_frextents was added to the lazy sb counters when the rt groups
+ * feature was introduced. This is possible because we know that all
+ * kernels supporting rtgroups will also recompute frextents from the
+ * realtime bitmap.
+ *
+ * For older file systems, updating frextents requires careful handling
+ * because we cannot rely on log recovery in older kernels to recompute
+ * the value from the rtbitmap. This means that the ondisk frextents
+ * must be consistent with the rtbitmap.
*
* Therefore, log the frextents change to the ondisk superblock and
* update the incore superblock so that future calls to xfs_log_sb
* write the correct value ondisk.
- *
- * Don't touch m_frextents because it includes incore reservations,
- * and those are handled by the unreserve function.
*/
- if (tp->t_frextents_delta || tp->t_res_frextents_delta) {
+ if ((tp->t_frextents_delta || tp->t_res_frextents_delta) &&
+ !xfs_has_rtgroups(tp->t_mountp)) {
struct xfs_mount *mp = tp->t_mountp;
int64_t rtxdelta;
@@ -536,6 +548,18 @@ xfs_trans_apply_sb_deltas(
}
if (tp->t_rextsize_delta) {
be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
+
+ /*
+ * Because the ondisk sb records rtgroup size in units of rt
+ * extents, any time we update the rt extent size we have to
+ * recompute the ondisk rtgroup block log. The incore values
+ * will be recomputed in xfs_trans_unreserve_and_mod_sb.
+ */
+ if (xfs_has_rtgroups(tp->t_mountp)) {
+ sbp->sb_rgblklog = xfs_compute_rgblklog(
+ be32_to_cpu(sbp->sb_rgextents),
+ be32_to_cpu(sbp->sb_rextsize));
+ }
whole = 1;
}
if (tp->t_rbmblocks_delta) {
@@ -554,6 +578,10 @@ xfs_trans_apply_sb_deltas(
sbp->sb_rextslog += tp->t_rextslog_delta;
whole = 1;
}
+ if (tp->t_rgcount_delta) {
+ be32_add_cpu(&sbp->sb_rgcount, tp->t_rgcount_delta);
+ whole = 1;
+ }
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
if (whole)
@@ -618,7 +646,7 @@ xfs_trans_unreserve_and_mod_sb(
}
ASSERT(tp->t_rtx_res || tp->t_frextents_delta >= 0);
- if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
+ if (xfs_has_rtgroups(mp) || (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
rtxdelta += tp->t_frextents_delta;
ASSERT(rtxdelta >= 0);
}
@@ -651,23 +679,21 @@ xfs_trans_unreserve_and_mod_sb(
mp->m_sb.sb_icount += idelta;
mp->m_sb.sb_ifree += ifreedelta;
/*
- * Do not touch sb_frextents here because we are dealing with incore
- * reservation. sb_frextents is not part of the lazy sb counters so it
- * must be consistent with the ondisk rtbitmap and must never include
- * incore reservations.
+ * Do not touch sb_frextents here because it is handled in
+ * xfs_trans_apply_sb_deltas for file systems where it isn't a lazy
+ * counter anyway.
*/
mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
mp->m_sb.sb_agcount += tp->t_agcount_delta;
mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
- mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
- if (tp->t_rextsize_delta) {
- mp->m_rtxblklog = log2_if_power2(mp->m_sb.sb_rextsize);
- mp->m_rtxblkmask = mask64_if_power2(mp->m_sb.sb_rextsize);
- }
+ if (tp->t_rextsize_delta)
+ xfs_mount_sb_set_rextsize(mp, &mp->m_sb,
+ mp->m_sb.sb_rextsize + tp->t_rextsize_delta);
mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
mp->m_sb.sb_rextents += tp->t_rextents_delta;
mp->m_sb.sb_rextslog += tp->t_rextslog_delta;
+ mp->m_sb.sb_rgcount += tp->t_rgcount_delta;
spin_unlock(&mp->m_sb_lock);
/*
@@ -1262,11 +1288,26 @@ retry:
gdqp = (new_gdqp != ip->i_gdquot) ? new_gdqp : NULL;
pdqp = (new_pdqp != ip->i_pdquot) ? new_pdqp : NULL;
if (udqp || gdqp || pdqp) {
+ xfs_filblks_t dblocks, rblocks;
unsigned int qflags = XFS_QMOPT_RES_REGBLKS;
+ bool isrt = XFS_IS_REALTIME_INODE(ip);
if (force)
qflags |= XFS_QMOPT_FORCE_RES;
+ if (isrt) {
+ error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
+ if (error)
+ goto out_cancel;
+ }
+
+ xfs_inode_count_blocks(tp, ip, &dblocks, &rblocks);
+
+ if (isrt)
+ rblocks += ip->i_delayed_blks;
+ else
+ dblocks += ip->i_delayed_blks;
+
/*
* Reserve enough quota to handle blocks on disk and reserved
* for a delayed allocation. We'll actually transfer the
@@ -1274,8 +1315,20 @@ retry:
* though that part is only semi-transactional.
*/
error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp,
- pdqp, ip->i_nblocks + ip->i_delayed_blks,
- 1, qflags);
+ pdqp, dblocks, 1, qflags);
+ if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
+ xfs_trans_cancel(tp);
+ xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
+ retried = true;
+ goto retry;
+ }
+ if (error)
+ goto out_cancel;
+
+ /* Do the same for realtime. */
+ qflags = XFS_QMOPT_RES_RTBLKS | (qflags & XFS_QMOPT_FORCE_RES);
+ error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp,
+ pdqp, rblocks, 0, qflags);
if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
xfs_trans_cancel(tp);
xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);