zram: move post-processing target allocation

Allocate post-processing target in place_pp_slot().  This simplifies
scan_slots_for_writeback() and scan_slots_for_recompress() loops because
we don't need to track pps pointer state anymore.  Previously we have to
explicitly NULL the point if it has been added to a post-processing bucket
or re-use previously allocated pointer otherwise and make sure we don't
leak the memory in the end.

We are also fine doing GFP_NOIO allocation, as post-processing can be
called under memory pressure so we better pick as many slots as we can as
soon as we can and start post-processing them, possibly saving the memory.
Allocation failure there is not fatal, we will post-process whatever we
put into the buckets on previous iterations.

Link: https://lkml.kernel.org/r/20250303022425.285971-12-senozhatsky@chromium.org
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Hillf Danton <hdanton@sina.com>
Cc: Kairui Song <ryncsn@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Yosry Ahmed <yosry.ahmed@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Sergey Senozhatsky 2025-03-03 11:03:20 +09:00 committed by Andrew Morton
parent b0624f0b22
commit 7e1b0212d4
1 changed files with 22 additions and 28 deletions

View File

@ -295,15 +295,24 @@ static void release_pp_ctl(struct zram *zram, struct zram_pp_ctl *ctl)
kfree(ctl);
}
static void place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl,
struct zram_pp_slot *pps)
static bool place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl,
u32 index)
{
u32 idx;
struct zram_pp_slot *pps;
u32 bid;
idx = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE;
list_add(&pps->entry, &ctl->pp_buckets[idx]);
pps = kmalloc(sizeof(*pps), GFP_NOIO | __GFP_NOWARN);
if (!pps)
return false;
INIT_LIST_HEAD(&pps->entry);
pps->index = index;
bid = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE;
list_add(&pps->entry, &ctl->pp_buckets[bid]);
zram_set_flag(zram, pps->index, ZRAM_PP_SLOT);
return true;
}
static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl)
@ -737,15 +746,8 @@ static int scan_slots_for_writeback(struct zram *zram, u32 mode,
unsigned long index,
struct zram_pp_ctl *ctl)
{
struct zram_pp_slot *pps = NULL;
for (; nr_pages != 0; index++, nr_pages--) {
if (!pps)
pps = kmalloc(sizeof(*pps), GFP_KERNEL);
if (!pps)
return -ENOMEM;
INIT_LIST_HEAD(&pps->entry);
bool ok = true;
zram_slot_lock(zram, index);
if (!zram_allocated(zram, index))
@ -765,14 +767,13 @@ static int scan_slots_for_writeback(struct zram *zram, u32 mode,
!zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
goto next;
pps->index = index;
place_pp_slot(zram, ctl, pps);
pps = NULL;
ok = place_pp_slot(zram, ctl, index);
next:
zram_slot_unlock(zram, index);
if (!ok)
break;
}
kfree(pps);
return 0;
}
@ -1827,16 +1828,10 @@ static int scan_slots_for_recompress(struct zram *zram, u32 mode, u32 prio_max,
struct zram_pp_ctl *ctl)
{
unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
struct zram_pp_slot *pps = NULL;
unsigned long index;
for (index = 0; index < nr_pages; index++) {
if (!pps)
pps = kmalloc(sizeof(*pps), GFP_KERNEL);
if (!pps)
return -ENOMEM;
INIT_LIST_HEAD(&pps->entry);
bool ok = true;
zram_slot_lock(zram, index);
if (!zram_allocated(zram, index))
@ -1859,14 +1854,13 @@ static int scan_slots_for_recompress(struct zram *zram, u32 mode, u32 prio_max,
if (zram_get_priority(zram, index) + 1 >= prio_max)
goto next;
pps->index = index;
place_pp_slot(zram, ctl, pps);
pps = NULL;
ok = place_pp_slot(zram, ctl, index);
next:
zram_slot_unlock(zram, index);
if (!ok)
break;
}
kfree(pps);
return 0;
}