memblock: updates for 7.0-rc1

* update tools/include/linux/mm.h to fix memblock tests compilation
 * drop redundant struct page* parameter from memblock_free_pages() and get
   struct page from the pfn
 * add underflow detection for size calculation in memtest and warn about
   underflow when VM_DEBUG is enabled
 -----BEGIN PGP SIGNATURE-----
 
 iQFEBAABCgAuFiEEeOVYVaWZL5900a/pOQOGJssO/ZEFAmmQIhoQHHJwcHRAa2Vy
 bmVsLm9yZwAKCRA5A4Ymyw79kWhYB/0aobkrfD4aW5Utfmzp08LdBwtfsOqEfKX6
 AdBGPdG+WB90auW4qwDupspqj2lYDpJ4QvETNP0B84ek62VEN+8YEbvcC4W70l4H
 nsrrnkTgwFGNXXxjr6tIQXu9hnC1o7eSuWhhYry4XG+JEKR3iah54JmbxcDrAEFj
 lb4BzdocDtF6J3EkOv5alaDfdwUxgA3C6Idp2mpVb4m7DMraGZMq3lm7EPYm22zb
 zo9v0nvXW9xtZfADQ6mRzp4uTjd/UAUH+YsU/u1S1f+JBN1bELXmFRf/X3CKBC6/
 AIO9FcHsfA0i1MhbeBizT9eUEFaNIRxbMAtWbfdHrQhaLWNvyPOU
 =Gz3z
 -----END PGP SIGNATURE-----

Merge tag 'memblock-v7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock

Pull memblock updates from Mike Rapoport:

 - update tools/include/linux/mm.h to fix memblock tests compilation

 - drop redundant struct page* parameter from memblock_free_pages() and
   get struct page from the pfn

 - add underflow detection for size calculation in memtest and warn
   about underflow when VM_DEBUG is enabled

* tag 'memblock-v7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock:
  mm/memtest: add underflow detection for size calculation
  memblock: drop redundant 'struct page *' argument from memblock_free_pages()
  memblock test: include <linux/sizes.h> from tools mm.h stub
This commit is contained in:
Linus Torvalds 2026-02-14 12:39:34 -08:00
commit 787fe1d43a
6 changed files with 10 additions and 8 deletions

View File

@ -809,8 +809,7 @@ static inline void clear_zone_contiguous(struct zone *zone)
extern int __isolate_free_page(struct page *page, unsigned int order);
extern void __putback_isolated_page(struct page *page, unsigned int order,
int mt);
extern void memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order);
extern void memblock_free_pages(unsigned long pfn, unsigned int order);
extern void __free_pages_core(struct page *page, unsigned int order,
enum meminit_context context);

View File

@ -1772,7 +1772,7 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
end = PFN_DOWN(base + size);
for (; cursor < end; cursor++) {
memblock_free_pages(pfn_to_page(cursor), cursor, 0);
memblock_free_pages(cursor, 0);
totalram_pages_inc();
}
}
@ -2217,7 +2217,7 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
while (start + (1UL << order) > end)
order--;
memblock_free_pages(pfn_to_page(start), start, order);
memblock_free_pages(start, order);
start += (1UL << order);
}

View File

@ -50,6 +50,8 @@ static void __init memtest(u64 pattern, phys_addr_t start_phys, phys_addr_t size
start_bad = 0;
last_bad = 0;
VM_WARN_ON_ONCE(size < start_phys_aligned - start_phys);
for (p = start; p < end; p++)
WRITE_ONCE(*p, pattern);

View File

@ -2474,9 +2474,10 @@ void *__init alloc_large_system_hash(const char *tablename,
return table;
}
void __init memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order)
void __init memblock_free_pages(unsigned long pfn, unsigned int order)
{
struct page *page = pfn_to_page(pfn);
if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) {
int nid = early_pfn_to_nid(pfn);

View File

@ -4,6 +4,7 @@
#include <linux/align.h>
#include <linux/mmzone.h>
#include <linux/sizes.h>
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)

View File

@ -15,8 +15,7 @@ bool mirrored_kernelcore = false;
struct page {};
void memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order)
void memblock_free_pages(unsigned long pfn, unsigned int order)
{
}