diff options
| author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2021-04-29 23:01:15 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-30 11:20:42 -0700 | 
| commit | 84172f4bb752424415756351a40f8da5714e1554 (patch) | |
| tree | daf3da208d6fa979bf4e32ca8dfe61e6b87e3fe0 | |
| parent | 6e5e0f286eb0ecf12afaa3e73c321bc5bf599abb (diff) | |
mm/page_alloc: combine __alloc_pages and __alloc_pages_nodemask
There are only two callers of __alloc_pages() so prune the thicket of
alloc_page variants by combining the two functions together.  Current
callers of __alloc_pages() simply add an extra 'NULL' parameter and
current callers of __alloc_pages_nodemask() call __alloc_pages() instead.
Link: https://lkml.kernel.org/r/20210225150642.2582252-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | Documentation/admin-guide/mm/transhuge.rst | 2 | ||||
| -rw-r--r-- | include/linux/gfp.h | 13 | ||||
| -rw-r--r-- | mm/hugetlb.c | 2 | ||||
| -rw-r--r-- | mm/internal.h | 4 | ||||
| -rw-r--r-- | mm/mempolicy.c | 6 | ||||
| -rw-r--r-- | mm/migrate.c | 2 | ||||
| -rw-r--r-- | mm/page_alloc.c | 5 | 
7 files changed, 13 insertions, 21 deletions
| diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 3b8a336511a4..c9c37f16eef8 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -402,7 +402,7 @@ compact_fail  	but failed.  It is possible to establish how long the stalls were using the function -tracer to record how long was spent in __alloc_pages_nodemask and +tracer to record how long was spent in __alloc_pages() and  using the mm_page_alloc tracepoint to identify which allocations were  for huge pages. diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 8572a1474e16..f39de931bdf9 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -515,15 +515,8 @@ static inline int arch_make_page_accessible(struct page *page)  }  #endif -struct page * -__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, -							nodemask_t *nodemask); - -static inline struct page * -__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) -{ -	return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL); -} +struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, +		nodemask_t *nodemask);  /*   * Allocate pages, preferring the node given as nid. The node must be valid and @@ -535,7 +528,7 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)  	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);  	VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); -	return __alloc_pages(gfp_mask, order, nid); +	return __alloc_pages(gfp_mask, order, nid, NULL);  }  /* diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a86a58ef132d..6c72433bec1e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1616,7 +1616,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,  		gfp_mask |= __GFP_RETRY_MAYFAIL;  	if (nid == NUMA_NO_NODE)  		nid = numa_mem_id(); -	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask); +	page = __alloc_pages(gfp_mask, order, nid, nmask);  	if (page)  		__count_vm_event(HTLB_BUDDY_PGALLOC);  	else diff --git a/mm/internal.h b/mm/internal.h index 42e30e71554a..ef5f336f59bd 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -145,10 +145,10 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);   * family of functions.   *   * nodemask, migratetype and highest_zoneidx are initialized only once in - * __alloc_pages_nodemask() and then never change. + * __alloc_pages() and then never change.   *   * zonelist, preferred_zone and highest_zoneidx are set first in - * __alloc_pages_nodemask() for the fast path, and might be later changed + * __alloc_pages() for the fast path, and might be later changed   * in __alloc_pages_slowpath(). All other functions pass the whole structure   * by a const pointer.   */ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index ab51132547b8..5f0d20298736 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2140,7 +2140,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,  {  	struct page *page; -	page = __alloc_pages(gfp, order, nid); +	page = __alloc_pages(gfp, order, nid, NULL);  	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */  	if (!static_branch_likely(&vm_numa_stat_key))  		return page; @@ -2237,7 +2237,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,  	nmask = policy_nodemask(gfp, pol);  	preferred_nid = policy_node(gfp, pol, node); -	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); +	page = __alloc_pages(gfp, order, preferred_nid, nmask);  	mpol_cond_put(pol);  out:  	return page; @@ -2274,7 +2274,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)  	if (pol->mode == MPOL_INTERLEAVE)  		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));  	else -		page = __alloc_pages_nodemask(gfp, order, +		page = __alloc_pages(gfp, order,  				policy_node(gfp, pol, numa_node_id()),  				policy_nodemask(gfp, pol)); diff --git a/mm/migrate.c b/mm/migrate.c index 62b81d5257aa..47df0df8f21a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1617,7 +1617,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)  	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)  		gfp_mask |= __GFP_HIGHMEM; -	new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask); +	new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);  	if (new_page && PageTransHuge(new_page))  		prep_transhuge_page(new_page); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c565ebad02ee..fce4b9180bdb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5013,8 +5013,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,  /*   * This is the 'heart' of the zoned buddy allocator.   */ -struct page * -__alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid, +struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,  							nodemask_t *nodemask)  {  	struct page *page; @@ -5076,7 +5075,7 @@ out:  	return page;  } -EXPORT_SYMBOL(__alloc_pages_nodemask); +EXPORT_SYMBOL(__alloc_pages);  /*   * Common helper functions. Never use with __GFP_HIGHMEM because the returned | 
