diff options
author | Ingo Molnar <mingo@kernel.org> | 2021-02-17 14:04:39 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2021-02-17 14:04:39 +0100 |
commit | ed3cd45f8ca873dd320ff7e6b4c1c8f83a65302c (patch) | |
tree | 783a02c1e78964654fe6d9a9c14b24bfc50e6b3b /mm/page_alloc.c | |
parent | bae4ec13640b0915e7dd86da7e65c5d085160571 (diff) | |
parent | f40ddce88593482919761f74910f42f4b84c004b (diff) | |
download | linux-ed3cd45f8ca873dd320ff7e6b4c1c8f83a65302c.tar.gz linux-ed3cd45f8ca873dd320ff7e6b4c1c8f83a65302c.tar.bz2 linux-ed3cd45f8ca873dd320ff7e6b4c1c8f83a65302c.zip |
Merge tag 'v5.11' into sched/core, to pick up fixes & refresh the branch
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 33 |
1 files changed, 18 insertions, 15 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bdbec4c98173..519a60d5b6f7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1207,8 +1207,10 @@ static void kernel_init_free_pages(struct page *page, int numpages) /* s390's use of memset() could override KASAN redzones. */ kasan_disable_current(); for (i = 0; i < numpages; i++) { + u8 tag = page_kasan_tag(page + i); page_kasan_tag_reset(page + i); clear_highpage(page + i); + page_kasan_tag_set(page + i, tag); } kasan_enable_current(); } @@ -2862,20 +2864,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, { struct page *page; -#ifdef CONFIG_CMA - /* - * Balance movable allocations between regular and CMA areas by - * allocating from CMA when over half of the zone's free memory - * is in the CMA area. - */ - if (alloc_flags & ALLOC_CMA && - zone_page_state(zone, NR_FREE_CMA_PAGES) > - zone_page_state(zone, NR_FREE_PAGES) / 2) { - page = __rmqueue_cma_fallback(zone, order); - if (page) - return page; + if (IS_ENABLED(CONFIG_CMA)) { + /* + * Balance movable allocations between regular and CMA areas by + * allocating from CMA when over half of the zone's free memory + * is in the CMA area. + */ + if (alloc_flags & ALLOC_CMA && + zone_page_state(zone, NR_FREE_CMA_PAGES) > + zone_page_state(zone, NR_FREE_PAGES) / 2) { + page = __rmqueue_cma_fallback(zone, order); + if (page) + goto out; + } } -#endif retry: page = __rmqueue_smallest(zone, order, migratetype); if (unlikely(!page)) { @@ -2886,8 +2888,9 @@ retry: alloc_flags)) goto retry; } - - trace_mm_page_alloc_zone_locked(page, order, migratetype); +out: + if (page) + trace_mm_page_alloc_zone_locked(page, order, migratetype); return page; } |