mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux
synced 2025-09-16 01:26:56 +10:00
mm/hugetlb: get rid of page_hstate()
Convert the last page_hstate() user to use folio_hstate() so page_hstate() can be safely removed. Link: https://lkml.kernel.org/r/20230719184145.301911-1-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
89be82b4fe
commit
affd26b1fb
@ -841,11 +841,6 @@ static inline struct hstate *folio_hstate(struct folio *folio)
|
|||||||
return size_to_hstate(folio_size(folio));
|
return size_to_hstate(folio_size(folio));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct hstate *page_hstate(struct page *page)
|
|
||||||
{
|
|
||||||
return folio_hstate(page_folio(page));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned hstate_index_to_shift(unsigned index)
|
static inline unsigned hstate_index_to_shift(unsigned index)
|
||||||
{
|
{
|
||||||
return hstates[index].order + PAGE_SHIFT;
|
return hstates[index].order + PAGE_SHIFT;
|
||||||
@ -1062,11 +1057,6 @@ static inline struct hstate *folio_hstate(struct folio *folio)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct hstate *page_hstate(struct page *page)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct hstate *size_to_hstate(unsigned long size)
|
static inline struct hstate *size_to_hstate(unsigned long size)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1785,10 +1785,10 @@ static void free_hpage_workfn(struct work_struct *work)
|
|||||||
node = node->next;
|
node = node->next;
|
||||||
page->mapping = NULL;
|
page->mapping = NULL;
|
||||||
/*
|
/*
|
||||||
* The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate()
|
* The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
|
||||||
* is going to trigger because a previous call to
|
* folio_hstate() is going to trigger because a previous call to
|
||||||
* remove_hugetlb_folio() will call folio_set_compound_dtor
|
* remove_hugetlb_folio() will call folio_set_compound_dtor
|
||||||
* (folio, NULL_COMPOUND_DTOR), so do not use page_hstate()
|
* (folio, NULL_COMPOUND_DTOR), so do not use folio_hstate()
|
||||||
* directly.
|
* directly.
|
||||||
*/
|
*/
|
||||||
h = size_to_hstate(page_size(page));
|
h = size_to_hstate(page_size(page));
|
||||||
|
@ -79,17 +79,17 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
|
|||||||
* handle each tail page individually in migration.
|
* handle each tail page individually in migration.
|
||||||
*/
|
*/
|
||||||
if (PageHuge(page) || PageTransCompound(page)) {
|
if (PageHuge(page) || PageTransCompound(page)) {
|
||||||
struct page *head = compound_head(page);
|
struct folio *folio = page_folio(page);
|
||||||
unsigned int skip_pages;
|
unsigned int skip_pages;
|
||||||
|
|
||||||
if (PageHuge(page)) {
|
if (PageHuge(page)) {
|
||||||
if (!hugepage_migration_supported(page_hstate(head)))
|
if (!hugepage_migration_supported(folio_hstate(folio)))
|
||||||
return page;
|
return page;
|
||||||
} else if (!PageLRU(head) && !__PageMovable(head)) {
|
} else if (!folio_test_lru(folio) && !__folio_test_movable(folio)) {
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
skip_pages = compound_nr(head) - (page - head);
|
skip_pages = folio_nr_pages(folio) - folio_page_idx(folio, page);
|
||||||
pfn += skip_pages - 1;
|
pfn += skip_pages - 1;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user