diff options
author | Brian Foster <bfoster@redhat.com> | 2018-03-09 14:02:32 -0800 |
---|---|---|
committer | Darrick J. Wong <darrick.wong@oracle.com> | 2018-03-11 20:27:57 -0700 |
commit | 0ab32086d0becee56c75a8ba21f16ac08b80f304 (patch) | |
tree | dc34a6a17938d1fa4797074f8903dfaf6988ea81 /fs/xfs | |
parent | 215928633502a7296fec42614463bb49859787d6 (diff) |
xfs: account only rmapbt-used blocks against rmapbt perag res
The rmapbt perag metadata reservation reserves blocks for the
reverse mapping btree (rmapbt). Since the rmapbt uses blocks from
the agfl and perag accounting is updated as blocks are allocated
from the allocation btrees, the reservation actually accounts blocks
as they are allocated to (or freed from) the agfl rather than the
rmapbt itself.
While this works for blocks that are eventually used for the rmapbt,
not all agfl blocks are destined for the rmapbt. Blocks that are
allocated to the agfl (and thus "reserved" for the rmapbt) but then
used by another structure leads to a growing inconsistency over time
between the runtime tracking of rmapbt usage vs. actual rmapbt
usage. Since the runtime tracking thinks all agfl blocks are rmapbt
blocks, it essentially believes that less future reservation is
required to satisfy the rmapbt than what is actually necessary.
The inconsistency is rectified across mount cycles because the perag
reservation is initialized based on the actual rmapbt usage at mount
time. The problem, however, is that the excessive drain of the
reservation at runtime opens a window to allocate blocks for other
purposes that might be required for the rmapbt on a subsequent
mount. This problem can be demonstrated by a simple test that runs
an allocation workload to consume agfl blocks over time and then
observe the difference in the agfl reservation requirement across an
unmount/mount cycle:
mount ...: xfs_ag_resv_init: ... resv 3193 ask 3194 len 3194
...
... : xfs_ag_resv_alloc_extent: ... resv 2957 ask 3194 len 1
umount...: xfs_ag_resv_free: ... resv 2956 ask 3194 len 0
mount ...: xfs_ag_resv_init: ... resv 3052 ask 3194 len 3194
As the above tracepoints show, the reservation requirement reduces
from 3194 blocks to 2956 blocks as the workload runs. Without any
other changes in the filesystem, the same reservation requirement
jumps from 2956 to 3052 blocks over a umount/mount cycle.
To address this divergence, update the RMAPBT reservation to account
blocks used for the rmapbt only rather than all blocks filled into
the agfl. This patch makes several high-level changes toward that
end:
1.) Reintroduce an AGFL reservation type to serve as an accounting
no-op for blocks allocated to (or freed from) the AGFL.
2.) Invoke RMAPBT usage accounting from the actual rmapbt block
allocation path rather than the AGFL allocation path.
The first change is required because agfl blocks are considered free
blocks throughout their lifetime. The perag reservation subsystem is
invoked unconditionally by the allocation subsystem, so we need a
way to tell the perag subsystem (via the allocation subsystem) to
not make any accounting changes for blocks filled into the AGFL.
The second change causes the in-core RMAPBT reservation usage
accounting to remain consistent with the on-disk state at all times
and eliminates the risk of leaving the rmapbt reservation
underfilled.
Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/libxfs/xfs_ag_resv.c | 4 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_ag_resv.h | 31 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_alloc.c | 18 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_rmap_btree.c | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_mount.h | 1 |
5 files changed, 46 insertions, 12 deletions
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c index 0ca2e680034a..03885a968de8 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.c +++ b/fs/xfs/libxfs/xfs_ag_resv.c @@ -326,6 +326,8 @@ xfs_ag_resv_alloc_extent( trace_xfs_ag_resv_alloc_extent(pag, type, args->len); switch (type) { + case XFS_AG_RESV_AGFL: + return; case XFS_AG_RESV_METADATA: case XFS_AG_RESV_RMAPBT: resv = xfs_perag_resv(pag, type); @@ -366,6 +368,8 @@ xfs_ag_resv_free_extent( trace_xfs_ag_resv_free_extent(pag, type, len); switch (type) { + case XFS_AG_RESV_AGFL: + return; case XFS_AG_RESV_METADATA: case XFS_AG_RESV_RMAPBT: resv = xfs_perag_resv(pag, type); diff --git a/fs/xfs/libxfs/xfs_ag_resv.h b/fs/xfs/libxfs/xfs_ag_resv.h index 8d6c687deef3..938f2f96c5e8 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.h +++ b/fs/xfs/libxfs/xfs_ag_resv.h @@ -32,4 +32,35 @@ void xfs_ag_resv_alloc_extent(struct xfs_perag *pag, enum xfs_ag_resv_type type, void xfs_ag_resv_free_extent(struct xfs_perag *pag, enum xfs_ag_resv_type type, struct xfs_trans *tp, xfs_extlen_t len); +/* + * RMAPBT reservation accounting wrappers. Since rmapbt blocks are sourced from + * the AGFL, they are allocated one at a time and the reservation updates don't + * require a transaction. + */ +static inline void +xfs_ag_resv_rmapbt_alloc( + struct xfs_mount *mp, + xfs_agnumber_t agno) +{ + struct xfs_alloc_arg args = {0}; + struct xfs_perag *pag; + + args.len = 1; + pag = xfs_perag_get(mp, agno); + xfs_ag_resv_alloc_extent(pag, XFS_AG_RESV_RMAPBT, &args); + xfs_perag_put(pag); +} + +static inline void +xfs_ag_resv_rmapbt_free( + struct xfs_mount *mp, + xfs_agnumber_t agno) +{ + struct xfs_perag *pag; + + pag = xfs_perag_get(mp, agno); + xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, NULL, 1); + xfs_perag_put(pag); +} + #endif /* __XFS_AG_RESV_H__ */ diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 1dc244d15a75..3db90b707fb2 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -728,7 +728,7 @@ xfs_alloc_ag_vextent( ASSERT(args->len >= args->minlen); ASSERT(args->len <= args->maxlen); - ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_RMAPBT); + ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL); ASSERT(args->agbno % args->alignment == 0); /* if not file data, insert new block into the reverse map btree */ @@ -1581,7 +1581,6 @@ xfs_alloc_ag_vextent_small( int *stat) /* status: 0-freelist, 1-normal/none */ { struct xfs_owner_info oinfo; - struct xfs_perag *pag; int error; xfs_agblock_t fbno; xfs_extlen_t flen; @@ -1600,7 +1599,7 @@ xfs_alloc_ag_vextent_small( * freelist. */ else if (args->minlen == 1 && args->alignment == 1 && - args->resv != XFS_AG_RESV_RMAPBT && + args->resv != XFS_AG_RESV_AGFL && (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount) > args->minleft)) { error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0); @@ -1633,18 +1632,13 @@ xfs_alloc_ag_vextent_small( /* * If we're feeding an AGFL block to something that * doesn't live in the free space, we need to clear - * out the OWN_AG rmap and add the block back to - * the RMAPBT per-AG reservation. + * out the OWN_AG rmap. */ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG); error = xfs_rmap_free(args->tp, args->agbp, args->agno, fbno, 1, &oinfo); if (error) goto error0; - pag = xfs_perag_get(args->mp, args->agno); - xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, - args->tp, 1); - xfs_perag_put(pag); *stat = 0; return 0; @@ -2170,7 +2164,7 @@ xfs_alloc_fix_freelist( if (error) goto out_agbp_relse; error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, - &targs.oinfo, XFS_AG_RESV_RMAPBT); + &targs.oinfo, XFS_AG_RESV_AGFL); if (error) goto out_agbp_relse; bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); @@ -2196,7 +2190,7 @@ xfs_alloc_fix_freelist( while (pag->pagf_flcount < need) { targs.agbno = 0; targs.maxlen = need - pag->pagf_flcount; - targs.resv = XFS_AG_RESV_RMAPBT; + targs.resv = XFS_AG_RESV_AGFL; /* Allocate as many blocks as possible at once. */ error = xfs_alloc_ag_vextent(&targs); @@ -2877,7 +2871,7 @@ xfs_free_extent( int error; ASSERT(len != 0); - ASSERT(type != XFS_AG_RESV_RMAPBT); + ASSERT(type != XFS_AG_RESV_AGFL); if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_FREE_EXTENT)) diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c index 738df3f9b5f2..8b0d0de1cd11 100644 --- a/fs/xfs/libxfs/xfs_rmap_btree.c +++ b/fs/xfs/libxfs/xfs_rmap_btree.c @@ -125,6 +125,8 @@ xfs_rmapbt_alloc_block( be32_add_cpu(&agf->agf_rmap_blocks, 1); xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS); + xfs_ag_resv_rmapbt_alloc(cur->bc_mp, cur->bc_private.a.agno); + *stat = 1; return 0; } @@ -152,6 +154,8 @@ xfs_rmapbt_free_block( XFS_EXTENT_BUSY_SKIP_DISCARD); xfs_trans_agbtree_delta(cur->bc_tp, -1); + xfs_ag_resv_rmapbt_free(cur->bc_mp, cur->bc_private.a.agno); + return 0; } diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index a2cf3718bea9..1808f56decaa 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -325,6 +325,7 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d) /* per-AG block reservation data structures*/ enum xfs_ag_resv_type { XFS_AG_RESV_NONE = 0, + XFS_AG_RESV_AGFL, XFS_AG_RESV_METADATA, XFS_AG_RESV_RMAPBT, }; |