xfs: refactor xfsaild_push loop into helper

Factor the loop body of xfsaild_push() into a separate
xfsaild_process_logitem() helper to improve readability.

This is a pure code movement with no functional change.

Signed-off-by: Yuto Ohnuki <ytohnuki@amazon.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Carlos Maiolino <cem@kernel.org>
This commit is contained in:
Yuto Ohnuki 2026-03-10 18:38:40 +00:00 committed by Carlos Maiolino
parent 394d70b86f
commit 7cac609473
1 changed files with 69 additions and 58 deletions

View File

@ -464,6 +464,74 @@ xfs_ail_calc_push_target(
return target_lsn;
}
static void
xfsaild_process_logitem(
struct xfs_ail *ailp,
struct xfs_log_item *lip,
int *stuck,
int *flushing)
{
struct xfs_mount *mp = ailp->ail_log->l_mp;
uint type = lip->li_type;
unsigned long flags = lip->li_flags;
xfs_lsn_t item_lsn = lip->li_lsn;
int lock_result;
/*
* Note that iop_push may unlock and reacquire the AIL lock. We
* rely on the AIL cursor implementation to be able to deal with
* the dropped lock.
*
* The log item may have been freed by the push, so it must not
* be accessed or dereferenced below this line.
*/
lock_result = xfsaild_push_item(ailp, lip);
switch (lock_result) {
case XFS_ITEM_SUCCESS:
XFS_STATS_INC(mp, xs_push_ail_success);
trace_xfs_ail_push(ailp, type, flags, item_lsn);
ailp->ail_last_pushed_lsn = item_lsn;
break;
case XFS_ITEM_FLUSHING:
/*
* The item or its backing buffer is already being
* flushed. The typical reason for that is that an
* inode buffer is locked because we already pushed the
* updates to it as part of inode clustering.
*
* We do not want to stop flushing just because lots
* of items are already being flushed, but we need to
* re-try the flushing relatively soon if most of the
* AIL is being flushed.
*/
XFS_STATS_INC(mp, xs_push_ail_flushing);
trace_xfs_ail_flushing(ailp, type, flags, item_lsn);
(*flushing)++;
ailp->ail_last_pushed_lsn = item_lsn;
break;
case XFS_ITEM_PINNED:
XFS_STATS_INC(mp, xs_push_ail_pinned);
trace_xfs_ail_pinned(ailp, type, flags, item_lsn);
(*stuck)++;
ailp->ail_log_flush++;
break;
case XFS_ITEM_LOCKED:
XFS_STATS_INC(mp, xs_push_ail_locked);
trace_xfs_ail_locked(ailp, type, flags, item_lsn);
(*stuck)++;
break;
default:
ASSERT(0);
break;
}
}
static long
xfsaild_push(
struct xfs_ail *ailp)
@ -511,68 +579,11 @@ xfsaild_push(
lsn = lip->li_lsn;
while ((XFS_LSN_CMP(lip->li_lsn, ailp->ail_target) <= 0)) {
int lock_result;
uint type = lip->li_type;
unsigned long flags = lip->li_flags;
xfs_lsn_t item_lsn = lip->li_lsn;
if (test_bit(XFS_LI_FLUSHING, &lip->li_flags))
goto next_item;
/*
* Note that iop_push may unlock and reacquire the AIL lock. We
* rely on the AIL cursor implementation to be able to deal with
* the dropped lock.
*
* The log item may have been freed by the push, so it must not
* be accessed or dereferenced below this line.
*/
lock_result = xfsaild_push_item(ailp, lip);
switch (lock_result) {
case XFS_ITEM_SUCCESS:
XFS_STATS_INC(mp, xs_push_ail_success);
trace_xfs_ail_push(ailp, type, flags, item_lsn);
ailp->ail_last_pushed_lsn = item_lsn;
break;
case XFS_ITEM_FLUSHING:
/*
* The item or its backing buffer is already being
* flushed. The typical reason for that is that an
* inode buffer is locked because we already pushed the
* updates to it as part of inode clustering.
*
* We do not want to stop flushing just because lots
* of items are already being flushed, but we need to
* re-try the flushing relatively soon if most of the
* AIL is being flushed.
*/
XFS_STATS_INC(mp, xs_push_ail_flushing);
trace_xfs_ail_flushing(ailp, type, flags, item_lsn);
flushing++;
ailp->ail_last_pushed_lsn = item_lsn;
break;
case XFS_ITEM_PINNED:
XFS_STATS_INC(mp, xs_push_ail_pinned);
trace_xfs_ail_pinned(ailp, type, flags, item_lsn);
stuck++;
ailp->ail_log_flush++;
break;
case XFS_ITEM_LOCKED:
XFS_STATS_INC(mp, xs_push_ail_locked);
trace_xfs_ail_locked(ailp, type, flags, item_lsn);
stuck++;
break;
default:
ASSERT(0);
break;
}
xfsaild_process_logitem(ailp, lip, &stuck, &flushing);
count++;
/*