| // SPDX-License-Identifier: GPL-2.0 | 
 | /* | 
 |  * linux/mm/page_isolation.c | 
 |  */ | 
 |  | 
 | #include <linux/mm.h> | 
 | #include <linux/page-isolation.h> | 
 | #include <linux/pageblock-flags.h> | 
 | #include <linux/memory.h> | 
 | #include <linux/hugetlb.h> | 
 | #include <linux/page_owner.h> | 
 | #include <linux/migrate.h> | 
 | #include "internal.h" | 
 |  | 
 | #define CREATE_TRACE_POINTS | 
 | #include <trace/events/page_isolation.h> | 
 |  | 
 | static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags) | 
 | { | 
 | 	struct zone *zone = page_zone(page); | 
 | 	struct page *unmovable; | 
 | 	unsigned long flags; | 
 |  | 
 | 	spin_lock_irqsave(&zone->lock, flags); | 
 |  | 
 | 	/* | 
 | 	 * We assume the caller intended to SET migrate type to isolate. | 
 | 	 * If it is already set, then someone else must have raced and | 
 | 	 * set it before us. | 
 | 	 */ | 
 | 	if (is_migrate_isolate_page(page)) { | 
 | 		spin_unlock_irqrestore(&zone->lock, flags); | 
 | 		return -EBUSY; | 
 | 	} | 
 |  | 
 | 	/* | 
 | 	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. | 
 | 	 * We just check MOVABLE pages. | 
 | 	 */ | 
 | 	unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags); | 
 | 	if (!unmovable) { | 
 | 		unsigned long nr_pages; | 
 | 		int mt = get_pageblock_migratetype(page); | 
 |  | 
 | 		set_pageblock_migratetype(page, MIGRATE_ISOLATE); | 
 | 		zone->nr_isolate_pageblock++; | 
 | 		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, | 
 | 									NULL); | 
 |  | 
 | 		__mod_zone_freepage_state(zone, -nr_pages, mt); | 
 | 		spin_unlock_irqrestore(&zone->lock, flags); | 
 | 		return 0; | 
 | 	} | 
 |  | 
 | 	spin_unlock_irqrestore(&zone->lock, flags); | 
 | 	if (isol_flags & REPORT_FAILURE) { | 
 | 		/* | 
 | 		 * printk() with zone->lock held will likely trigger a | 
 | 		 * lockdep splat, so defer it here. | 
 | 		 */ | 
 | 		dump_page(unmovable, "unmovable page"); | 
 | 	} | 
 |  | 
 | 	return -EBUSY; | 
 | } | 
 |  | 
 | static void unset_migratetype_isolate(struct page *page, unsigned migratetype) | 
 | { | 
 | 	struct zone *zone; | 
 | 	unsigned long flags, nr_pages; | 
 | 	bool isolated_page = false; | 
 | 	unsigned int order; | 
 | 	unsigned long pfn, buddy_pfn; | 
 | 	struct page *buddy; | 
 |  | 
 | 	zone = page_zone(page); | 
 | 	spin_lock_irqsave(&zone->lock, flags); | 
 | 	if (!is_migrate_isolate_page(page)) | 
 | 		goto out; | 
 |  | 
 | 	/* | 
 | 	 * Because freepage with more than pageblock_order on isolated | 
 | 	 * pageblock is restricted to merge due to freepage counting problem, | 
 | 	 * it is possible that there is free buddy page. | 
 | 	 * move_freepages_block() doesn't care of merge so we need other | 
 | 	 * approach in order to merge them. Isolation and free will make | 
 | 	 * these pages to be merged. | 
 | 	 */ | 
 | 	if (PageBuddy(page)) { | 
 | 		order = buddy_order(page); | 
 | 		if (order >= pageblock_order && order < MAX_ORDER - 1) { | 
 | 			pfn = page_to_pfn(page); | 
 | 			buddy_pfn = __find_buddy_pfn(pfn, order); | 
 | 			buddy = page + (buddy_pfn - pfn); | 
 |  | 
 | 			if (!is_migrate_isolate_page(buddy)) { | 
 | 				__isolate_free_page(page, order); | 
 | 				isolated_page = true; | 
 | 			} | 
 | 		} | 
 | 	} | 
 |  | 
 | 	/* | 
 | 	 * If we isolate freepage with more than pageblock_order, there | 
 | 	 * should be no freepage in the range, so we could avoid costly | 
 | 	 * pageblock scanning for freepage moving. | 
 | 	 * | 
 | 	 * We didn't actually touch any of the isolated pages, so place them | 
 | 	 * to the tail of the freelist. This is an optimization for memory | 
 | 	 * onlining - just onlined memory won't immediately be considered for | 
 | 	 * allocation. | 
 | 	 */ | 
 | 	if (!isolated_page) { | 
 | 		nr_pages = move_freepages_block(zone, page, migratetype, NULL); | 
 | 		__mod_zone_freepage_state(zone, nr_pages, migratetype); | 
 | 	} | 
 | 	set_pageblock_migratetype(page, migratetype); | 
 | 	if (isolated_page) | 
 | 		__putback_isolated_page(page, order, migratetype); | 
 | 	zone->nr_isolate_pageblock--; | 
 | out: | 
 | 	spin_unlock_irqrestore(&zone->lock, flags); | 
 | } | 
 |  | 
 | static inline struct page * | 
 | __first_valid_page(unsigned long pfn, unsigned long nr_pages) | 
 | { | 
 | 	int i; | 
 |  | 
 | 	for (i = 0; i < nr_pages; i++) { | 
 | 		struct page *page; | 
 |  | 
 | 		page = pfn_to_online_page(pfn + i); | 
 | 		if (!page) | 
 | 			continue; | 
 | 		return page; | 
 | 	} | 
 | 	return NULL; | 
 | } | 
 |  | 
 | /** | 
 |  * start_isolate_page_range() - make page-allocation-type of range of pages to | 
 |  * be MIGRATE_ISOLATE. | 
 |  * @start_pfn:		The lower PFN of the range to be isolated. | 
 |  * @end_pfn:		The upper PFN of the range to be isolated. | 
 |  *			start_pfn/end_pfn must be aligned to pageblock_order. | 
 |  * @migratetype:	Migrate type to set in error recovery. | 
 |  * @flags:		The following flags are allowed (they can be combined in | 
 |  *			a bit mask) | 
 |  *			MEMORY_OFFLINE - isolate to offline (!allocate) memory | 
 |  *					 e.g., skip over PageHWPoison() pages | 
 |  *					 and PageOffline() pages. | 
 |  *			REPORT_FAILURE - report details about the failure to | 
 |  *			isolate the range | 
 |  * | 
 |  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in | 
 |  * the range will never be allocated. Any free pages and pages freed in the | 
 |  * future will not be allocated again. If specified range includes migrate types | 
 |  * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all | 
 |  * pages in the range finally, the caller have to free all pages in the range. | 
 |  * test_page_isolated() can be used for test it. | 
 |  * | 
 |  * There is no high level synchronization mechanism that prevents two threads | 
 |  * from trying to isolate overlapping ranges. If this happens, one thread | 
 |  * will notice pageblocks in the overlapping range already set to isolate. | 
 |  * This happens in set_migratetype_isolate, and set_migratetype_isolate | 
 |  * returns an error. We then clean up by restoring the migration type on | 
 |  * pageblocks we may have modified and return -EBUSY to caller. This | 
 |  * prevents two threads from simultaneously working on overlapping ranges. | 
 |  * | 
 |  * Please note that there is no strong synchronization with the page allocator | 
 |  * either. Pages might be freed while their page blocks are marked ISOLATED. | 
 |  * A call to drain_all_pages() after isolation can flush most of them. However | 
 |  * in some cases pages might still end up on pcp lists and that would allow | 
 |  * for their allocation even when they are in fact isolated already. Depending | 
 |  * on how strong of a guarantee the caller needs, zone_pcp_disable/enable() | 
 |  * might be used to flush and disable pcplist before isolation and enable after | 
 |  * unisolation. | 
 |  * | 
 |  * Return: 0 on success and -EBUSY if any part of range cannot be isolated. | 
 |  */ | 
 | int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | 
 | 			     unsigned migratetype, int flags) | 
 | { | 
 | 	unsigned long pfn; | 
 | 	unsigned long undo_pfn; | 
 | 	struct page *page; | 
 |  | 
 | 	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); | 
 | 	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); | 
 |  | 
 | 	for (pfn = start_pfn; | 
 | 	     pfn < end_pfn; | 
 | 	     pfn += pageblock_nr_pages) { | 
 | 		page = __first_valid_page(pfn, pageblock_nr_pages); | 
 | 		if (page) { | 
 | 			if (set_migratetype_isolate(page, migratetype, flags)) { | 
 | 				undo_pfn = pfn; | 
 | 				goto undo; | 
 | 			} | 
 | 		} | 
 | 	} | 
 | 	return 0; | 
 | undo: | 
 | 	for (pfn = start_pfn; | 
 | 	     pfn < undo_pfn; | 
 | 	     pfn += pageblock_nr_pages) { | 
 | 		struct page *page = pfn_to_online_page(pfn); | 
 | 		if (!page) | 
 | 			continue; | 
 | 		unset_migratetype_isolate(page, migratetype); | 
 | 	} | 
 |  | 
 | 	return -EBUSY; | 
 | } | 
 |  | 
 | /* | 
 |  * Make isolated pages available again. | 
 |  */ | 
 | void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | 
 | 			    unsigned migratetype) | 
 | { | 
 | 	unsigned long pfn; | 
 | 	struct page *page; | 
 |  | 
 | 	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); | 
 | 	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); | 
 |  | 
 | 	for (pfn = start_pfn; | 
 | 	     pfn < end_pfn; | 
 | 	     pfn += pageblock_nr_pages) { | 
 | 		page = __first_valid_page(pfn, pageblock_nr_pages); | 
 | 		if (!page || !is_migrate_isolate_page(page)) | 
 | 			continue; | 
 | 		unset_migratetype_isolate(page, migratetype); | 
 | 	} | 
 | } | 
 | /* | 
 |  * Test all pages in the range is free(means isolated) or not. | 
 |  * all pages in [start_pfn...end_pfn) must be in the same zone. | 
 |  * zone->lock must be held before call this. | 
 |  * | 
 |  * Returns the last tested pfn. | 
 |  */ | 
 | static unsigned long | 
 | __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, | 
 | 				  int flags) | 
 | { | 
 | 	struct page *page; | 
 |  | 
 | 	while (pfn < end_pfn) { | 
 | 		page = pfn_to_page(pfn); | 
 | 		if (PageBuddy(page)) | 
 | 			/* | 
 | 			 * If the page is on a free list, it has to be on | 
 | 			 * the correct MIGRATE_ISOLATE freelist. There is no | 
 | 			 * simple way to verify that as VM_BUG_ON(), though. | 
 | 			 */ | 
 | 			pfn += 1 << buddy_order(page); | 
 | 		else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page)) | 
 | 			/* A HWPoisoned page cannot be also PageBuddy */ | 
 | 			pfn++; | 
 | 		else if ((flags & MEMORY_OFFLINE) && PageOffline(page) && | 
 | 			 !page_count(page)) | 
 | 			/* | 
 | 			 * The responsible driver agreed to skip PageOffline() | 
 | 			 * pages when offlining memory by dropping its | 
 | 			 * reference in MEM_GOING_OFFLINE. | 
 | 			 */ | 
 | 			pfn++; | 
 | 		else | 
 | 			break; | 
 | 	} | 
 |  | 
 | 	return pfn; | 
 | } | 
 |  | 
 | /* Caller should ensure that requested range is in a single zone */ | 
 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, | 
 | 			int isol_flags) | 
 | { | 
 | 	unsigned long pfn, flags; | 
 | 	struct page *page; | 
 | 	struct zone *zone; | 
 | 	int ret; | 
 |  | 
 | 	/* | 
 | 	 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages | 
 | 	 * are not aligned to pageblock_nr_pages. | 
 | 	 * Then we just check migratetype first. | 
 | 	 */ | 
 | 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { | 
 | 		page = __first_valid_page(pfn, pageblock_nr_pages); | 
 | 		if (page && !is_migrate_isolate_page(page)) | 
 | 			break; | 
 | 	} | 
 | 	page = __first_valid_page(start_pfn, end_pfn - start_pfn); | 
 | 	if ((pfn < end_pfn) || !page) { | 
 | 		ret = -EBUSY; | 
 | 		goto out; | 
 | 	} | 
 |  | 
 | 	/* Check all pages are free or marked as ISOLATED */ | 
 | 	zone = page_zone(page); | 
 | 	spin_lock_irqsave(&zone->lock, flags); | 
 | 	pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags); | 
 | 	spin_unlock_irqrestore(&zone->lock, flags); | 
 |  | 
 | 	ret = pfn < end_pfn ? -EBUSY : 0; | 
 |  | 
 | out: | 
 | 	trace_test_pages_isolated(start_pfn, end_pfn, pfn); | 
 |  | 
 | 	return ret; | 
 | } |