Six hotfixes. One from Miaohe Lin is considered a minor thing so it isn't

for -stable.  The remainder address pre-5.19 issues and are cc:stable.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCYpEC8gAKCRDdBJ7gKXxA
 jlukAQDCaXF7YTBjpoaAl0zhSu+5h7CawiB6cnRlq87/uJ2S4QD/eLVX3zfxI2DX
 YcOhc5H8BOgZ8ppD80Nv9qjmyvEWzAA=
 =ZFFG
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2022-05-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull hotfixes from Andrew Morton:
 "Six hotfixes.

  The page_table_check one from Miaohe Lin is considered a minor thing
  so it isn't marked for -stable. The remainder address pre-5.19 issues
  and are cc:stable"

* tag 'mm-hotfixes-stable-2022-05-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mm/page_table_check: fix accessing unmapped ptep
  kexec_file: drop weak attribute from arch_kexec_apply_relocations[_add]
  mm/page_alloc: always attempt to allocate at least one page during bulk allocation
  hugetlb: fix huge_pmd_unshare address update
  zsmalloc: fix races between asynchronous zspage free and page migration
  Revert "mm/cma.c: remove redundant cma_mutex lock"
This commit is contained in:
Linus Torvalds 2022-05-27 11:29:35 -07:00
commit 77fb622de1
9 changed files with 103 additions and 51 deletions

View file

@ -9,6 +9,8 @@
#ifndef _S390_KEXEC_H #ifndef _S390_KEXEC_H
#define _S390_KEXEC_H #define _S390_KEXEC_H
#include <linux/module.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/setup.h> #include <asm/setup.h>
@ -83,4 +85,12 @@ struct kimage_arch {
extern const struct kexec_file_ops s390_kexec_image_ops; extern const struct kexec_file_ops s390_kexec_image_ops;
extern const struct kexec_file_ops s390_kexec_elf_ops; extern const struct kexec_file_ops s390_kexec_elf_ops;
#ifdef CONFIG_KEXEC_FILE
struct purgatory_info;
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
#endif
#endif /*_S390_KEXEC_H */ #endif /*_S390_KEXEC_H */

View file

@ -186,6 +186,14 @@ extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages); extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
#define arch_kexec_pre_free_pages arch_kexec_pre_free_pages #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
#ifdef CONFIG_KEXEC_FILE
struct purgatory_info;
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
#endif
#endif #endif
typedef void crash_vmclear_fn(void); typedef void crash_vmclear_fn(void);

View file

@ -193,14 +193,6 @@ void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len); unsigned long buf_len);
void *arch_kexec_kernel_image_load(struct kimage *image); void *arch_kexec_kernel_image_load(struct kimage *image);
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
int arch_kexec_apply_relocations(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
int arch_kimage_file_post_load_cleanup(struct kimage *image); int arch_kimage_file_post_load_cleanup(struct kimage *image);
#ifdef CONFIG_KEXEC_SIG #ifdef CONFIG_KEXEC_SIG
int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
@ -229,6 +221,44 @@ extern int crash_exclude_mem_range(struct crash_mem *mem,
unsigned long long mend); unsigned long long mend);
extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map, extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
void **addr, unsigned long *sz); void **addr, unsigned long *sz);
#ifndef arch_kexec_apply_relocations_add
/*
* arch_kexec_apply_relocations_add - apply relocations of type RELA
* @pi: Purgatory to be relocated.
* @section: Section relocations applying to.
* @relsec: Section containing RELAs.
* @symtab: Corresponding symtab.
*
* Return: 0 on success, negative errno on error.
*/
static inline int
arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
const Elf_Shdr *relsec, const Elf_Shdr *symtab)
{
pr_err("RELA relocation unsupported.\n");
return -ENOEXEC;
}
#endif
#ifndef arch_kexec_apply_relocations
/*
* arch_kexec_apply_relocations - apply relocations of type REL
* @pi: Purgatory to be relocated.
* @section: Section relocations applying to.
* @relsec: Section containing RELs.
* @symtab: Corresponding symtab.
*
* Return: 0 on success, negative errno on error.
*/
static inline int
arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
const Elf_Shdr *relsec, const Elf_Shdr *symtab)
{
pr_err("REL relocation unsupported.\n");
return -ENOEXEC;
}
#endif
#endif /* CONFIG_KEXEC_FILE */ #endif /* CONFIG_KEXEC_FILE */
#ifdef CONFIG_KEXEC_ELF #ifdef CONFIG_KEXEC_ELF

View file

@ -108,40 +108,6 @@ int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
} }
#endif #endif
/*
* arch_kexec_apply_relocations_add - apply relocations of type RELA
* @pi: Purgatory to be relocated.
* @section: Section relocations applying to.
* @relsec: Section containing RELAs.
* @symtab: Corresponding symtab.
*
* Return: 0 on success, negative errno on error.
*/
int __weak
arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
const Elf_Shdr *relsec, const Elf_Shdr *symtab)
{
pr_err("RELA relocation unsupported.\n");
return -ENOEXEC;
}
/*
* arch_kexec_apply_relocations - apply relocations of type REL
* @pi: Purgatory to be relocated.
* @section: Section relocations applying to.
* @relsec: Section containing RELs.
* @symtab: Corresponding symtab.
*
* Return: 0 on success, negative errno on error.
*/
int __weak
arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
const Elf_Shdr *relsec, const Elf_Shdr *symtab)
{
pr_err("REL relocation unsupported.\n");
return -ENOEXEC;
}
/* /*
* Free up memory used by kernel, initrd, and command line. This is temporary * Free up memory used by kernel, initrd, and command line. This is temporary
* memory allocation which is not needed any more after these buffers have * memory allocation which is not needed any more after these buffers have

View file

@ -37,6 +37,7 @@
struct cma cma_areas[MAX_CMA_AREAS]; struct cma cma_areas[MAX_CMA_AREAS];
unsigned cma_area_count; unsigned cma_area_count;
static DEFINE_MUTEX(cma_mutex);
phys_addr_t cma_get_base(const struct cma *cma) phys_addr_t cma_get_base(const struct cma *cma)
{ {
@ -468,9 +469,10 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
spin_unlock_irq(&cma->lock); spin_unlock_irq(&cma->lock);
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
mutex_lock(&cma_mutex);
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
mutex_unlock(&cma_mutex);
if (ret == 0) { if (ret == 0) {
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
break; break;

View file

@ -6755,7 +6755,14 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
pud_clear(pud); pud_clear(pud);
put_page(virt_to_page(ptep)); put_page(virt_to_page(ptep));
mm_dec_nr_pmds(mm); mm_dec_nr_pmds(mm);
*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; /*
* This update of passed address optimizes loops sequentially
* processing addresses in increments of huge page size (PMD_SIZE
* in this case). By clearing the pud, a PUD_SIZE area is unmapped.
* Update address to the 'last page' in the cleared area so that
* calling loop can move to first page past this area.
*/
*addr |= PUD_SIZE - PMD_SIZE;
return 1; return 1;
} }

View file

@ -5324,8 +5324,8 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
pcp, pcp_list); pcp, pcp_list);
if (unlikely(!page)) { if (unlikely(!page)) {
/* Try and get at least one page */ /* Try and allocate at least one page */
if (!nr_populated) if (!nr_account)
goto failed_irq; goto failed_irq;
break; break;
} }

View file

@ -234,11 +234,11 @@ void __page_table_check_pte_clear_range(struct mm_struct *mm,
pte_t *ptep = pte_offset_map(&pmd, addr); pte_t *ptep = pte_offset_map(&pmd, addr);
unsigned long i; unsigned long i;
pte_unmap(ptep);
for (i = 0; i < PTRS_PER_PTE; i++) { for (i = 0; i < PTRS_PER_PTE; i++) {
__page_table_check_pte_clear(mm, addr, *ptep); __page_table_check_pte_clear(mm, addr, *ptep);
addr += PAGE_SIZE; addr += PAGE_SIZE;
ptep++; ptep++;
} }
pte_unmap(ptep - PTRS_PER_PTE);
} }
} }

View file

@ -1718,11 +1718,40 @@ static enum fullness_group putback_zspage(struct size_class *class,
*/ */
static void lock_zspage(struct zspage *zspage) static void lock_zspage(struct zspage *zspage)
{ {
struct page *page = get_first_page(zspage); struct page *curr_page, *page;
do { /*
lock_page(page); * Pages we haven't locked yet can be migrated off the list while we're
} while ((page = get_next_page(page)) != NULL); * trying to lock them, so we need to be careful and only attempt to
* lock each page under migrate_read_lock(). Otherwise, the page we lock
* may no longer belong to the zspage. This means that we may wait for
* the wrong page to unlock, so we must take a reference to the page
* prior to waiting for it to unlock outside migrate_read_lock().
*/
while (1) {
migrate_read_lock(zspage);
page = get_first_page(zspage);
if (trylock_page(page))
break;
get_page(page);
migrate_read_unlock(zspage);
wait_on_page_locked(page);
put_page(page);
}
curr_page = page;
while ((page = get_next_page(curr_page))) {
if (trylock_page(page)) {
curr_page = page;
} else {
get_page(page);
migrate_read_unlock(zspage);
wait_on_page_locked(page);
put_page(page);
migrate_read_lock(zspage);
}
}
migrate_read_unlock(zspage);
} }
static int zs_init_fs_context(struct fs_context *fc) static int zs_init_fs_context(struct fs_context *fc)