diff --git a/arch/loongarch/kernel/kfpu.c b/arch/loongarch/kernel/kfpu.c index ec5b28e570c963482d18e50f28043b066a425ffc..4c476904227f953bab5c1a89c9fe1175bfcc6647 100644 --- a/arch/loongarch/kernel/kfpu.c +++ b/arch/loongarch/kernel/kfpu.c @@ -18,11 +18,28 @@ static unsigned int euen_mask = CSR_EUEN_FPEN; static DEFINE_PER_CPU(bool, in_kernel_fpu); static DEFINE_PER_CPU(unsigned int, euen_current); +static inline void fpregs_lock(void) +{ + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_disable(); + else + local_bh_disable(); +} + +static inline void fpregs_unlock(void) +{ + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_enable(); + else + local_bh_enable(); +} + void kernel_fpu_begin(void) { unsigned int *euen_curr; - preempt_disable(); + if (!irqs_disabled()) + fpregs_lock(); WARN_ON(this_cpu_read(in_kernel_fpu)); @@ -73,7 +90,8 @@ void kernel_fpu_end(void) this_cpu_write(in_kernel_fpu, false); - preempt_enable(); + if (!irqs_disabled()) + fpregs_unlock(); } EXPORT_SYMBOL_GPL(kernel_fpu_end); diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c index 62ddcea0aa14672cc4906f9706f0934d2735502b..aca52c42e94e8b993d63e1b930578815138f7628 100644 --- a/arch/loongarch/mm/hugetlbpage.c +++ b/arch/loongarch/mm/hugetlbpage.c @@ -47,7 +47,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, pmd = pmd_offset(pud, addr); } } - return (pte_t *) pmd; + return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd; } int pmd_huge(pmd_t pmd) diff --git a/mm/migrate.c b/mm/migrate.c index 1004b1def1c2010cd0052ff095d523f7fe69ad3c..4ed470885217465945668fd108b846587c28f34a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1504,6 +1504,7 @@ struct migrate_pages_stats { int nr_thp_succeeded; /* THP migrated successfully */ int nr_thp_failed; /* THP failed to be migrated */ int nr_thp_split; /* THP split before migrating */ + int nr_split; /* Large folio (include THP) split before migrating */ }; /* @@ -1623,6 +1624,7 @@ static int migrate_pages_batch(struct list_head *from, int nr_retry_pages = 0; int pass = 0; bool is_thp = false; + bool is_large = false; struct folio *folio, *folio2, *dst = NULL, *dst2; int rc, rc_saved = 0, nr_pages; LIST_HEAD(unmap_folios); @@ -1638,7 +1640,8 @@ static int migrate_pages_batch(struct list_head *from, nr_retry_pages = 0; list_for_each_entry_safe(folio, folio2, from, lru) { - is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); + is_large = folio_test_large(folio); + is_thp = is_large && folio_test_pmd_mappable(folio); nr_pages = folio_nr_pages(folio); cond_resched(); @@ -1658,6 +1661,7 @@ static int migrate_pages_batch(struct list_head *from, stats->nr_thp_failed++; if (!try_split_folio(folio, split_folios)) { stats->nr_thp_split++; + stats->nr_split++; continue; } stats->nr_failed_pages += nr_pages; @@ -1686,11 +1690,12 @@ static int migrate_pages_batch(struct list_head *from, nr_failed++; stats->nr_thp_failed += is_thp; /* Large folio NUMA faulting doesn't split to retry. */ - if (folio_test_large(folio) && !nosplit) { + if (is_large && !nosplit) { int ret = try_split_folio(folio, split_folios); if (!ret) { stats->nr_thp_split += is_thp; + stats->nr_split += is_large; break; } else if (reason == MR_LONGTERM_PIN && ret == -EAGAIN) { @@ -1836,6 +1841,7 @@ static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio, stats->nr_succeeded += astats.nr_succeeded; stats->nr_thp_succeeded += astats.nr_thp_succeeded; stats->nr_thp_split += astats.nr_thp_split; + stats->nr_split += astats.nr_split; if (rc < 0) { stats->nr_failed_pages += astats.nr_failed_pages; stats->nr_thp_failed += astats.nr_thp_failed; @@ -1843,7 +1849,11 @@ static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio, return rc; } stats->nr_thp_failed += astats.nr_thp_split; - nr_failed += astats.nr_thp_split; + /* + * Do not count rc, as pages will be retried below. + * Count nr_split only, since it includes nr_thp_split. + */ + nr_failed += astats.nr_split; /* * Fall back to migrate all failed folios one by one synchronously. All * failed folios except split THPs will be retried, so their failure diff --git a/mm/page_alloc.c b/mm/page_alloc.c index aa292df1c28275f2fd9b6f1d16a77ff693464856..6fee765b26d09eaafd6f38202a3c5a7f4af16d9d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4205,6 +4205,14 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, } retry: + /* + * Deal with possible cpuset update races or zonelist updates to avoid + * infinite retries. + */ + if (check_retry_cpuset(cpuset_mems_cookie, ac) || + check_retry_zonelist(zonelist_iter_cookie)) + goto restart; + /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ if (alloc_flags & ALLOC_KSWAPD) wake_all_kswapds(order, gfp_mask, ac);