mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:04:51 +01:00
xfs: use a seprate member to track space availabe in the GC scatch buffer
When scratch_head wraps back to 0 and scratch_tail is also 0 because no I/O has completed yet, the ring buffer could be mistaken for empty. Fix this by introducing a separate scratch_available member in struct xfs_zone_gc_data. This actually ends up simplifying the code as well. Reported-by: Chris Mason <clm@meta.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hans Holmberg <hans.holmberg@wdc.com> Signed-off-by: Carlos Maiolino <cem@kernel.org>
This commit is contained in:
parent
692243cac6
commit
c17a1c0349
1 changed files with 9 additions and 16 deletions
|
|
@ -131,10 +131,13 @@ struct xfs_zone_gc_data {
|
|||
/*
|
||||
* Scratchpad to buffer GC data, organized as a ring buffer over
|
||||
* discontiguous folios. scratch_head is where the buffer is filled,
|
||||
* and scratch_tail tracks the buffer space freed.
|
||||
* scratch_tail tracks the buffer space freed, and scratch_available
|
||||
* counts the space available in the ring buffer between the head and
|
||||
* the tail.
|
||||
*/
|
||||
struct folio *scratch_folios[XFS_GC_NR_BUFS];
|
||||
unsigned int scratch_size;
|
||||
unsigned int scratch_available;
|
||||
unsigned int scratch_head;
|
||||
unsigned int scratch_tail;
|
||||
|
||||
|
|
@ -212,6 +215,7 @@ xfs_zone_gc_data_alloc(
|
|||
goto out_free_scratch;
|
||||
}
|
||||
data->scratch_size = XFS_GC_BUF_SIZE * XFS_GC_NR_BUFS;
|
||||
data->scratch_available = data->scratch_size;
|
||||
INIT_LIST_HEAD(&data->reading);
|
||||
INIT_LIST_HEAD(&data->writing);
|
||||
INIT_LIST_HEAD(&data->resetting);
|
||||
|
|
@ -574,18 +578,6 @@ xfs_zone_gc_ensure_target(
|
|||
return oz;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
xfs_zone_gc_scratch_available(
|
||||
struct xfs_zone_gc_data *data)
|
||||
{
|
||||
if (!data->scratch_tail)
|
||||
return data->scratch_size - data->scratch_head;
|
||||
|
||||
if (!data->scratch_head)
|
||||
return data->scratch_tail;
|
||||
return (data->scratch_size - data->scratch_head) + data->scratch_tail;
|
||||
}
|
||||
|
||||
static bool
|
||||
xfs_zone_gc_space_available(
|
||||
struct xfs_zone_gc_data *data)
|
||||
|
|
@ -596,7 +588,7 @@ xfs_zone_gc_space_available(
|
|||
if (!oz)
|
||||
return false;
|
||||
return oz->oz_allocated < rtg_blocks(oz->oz_rtg) &&
|
||||
xfs_zone_gc_scratch_available(data);
|
||||
data->scratch_available;
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -625,8 +617,7 @@ xfs_zone_gc_alloc_blocks(
|
|||
if (!oz)
|
||||
return NULL;
|
||||
|
||||
*count_fsb = min(*count_fsb,
|
||||
XFS_B_TO_FSB(mp, xfs_zone_gc_scratch_available(data)));
|
||||
*count_fsb = min(*count_fsb, XFS_B_TO_FSB(mp, data->scratch_available));
|
||||
|
||||
/*
|
||||
* Directly allocate GC blocks from the reserved pool.
|
||||
|
|
@ -730,6 +721,7 @@ xfs_zone_gc_start_chunk(
|
|||
bio->bi_end_io = xfs_zone_gc_end_io;
|
||||
xfs_zone_gc_add_data(chunk);
|
||||
data->scratch_head = (data->scratch_head + len) % data->scratch_size;
|
||||
data->scratch_available -= len;
|
||||
|
||||
WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW);
|
||||
list_add_tail(&chunk->entry, &data->reading);
|
||||
|
|
@ -862,6 +854,7 @@ xfs_zone_gc_finish_chunk(
|
|||
|
||||
data->scratch_tail =
|
||||
(data->scratch_tail + chunk->len) % data->scratch_size;
|
||||
data->scratch_available += chunk->len;
|
||||
|
||||
/*
|
||||
* Cycle through the iolock and wait for direct I/O and layouts to
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue