diff options
| author | Ard Biesheuvel <ardb@kernel.org> | 2021-02-26 09:33:52 +0100 |
|---|---|---|
| committer | Ard Biesheuvel <ardb@kernel.org> | 2021-03-10 19:07:07 +0100 |
| commit | 94092d3df6c5818ae5cfed7e6ceda14c0799161d (patch) | |
| tree | 193e7f67b6d32bcccabc726e824d865168ce57bd | |
| parent | b43438b59bebed2f5ae0f935c1e4ffd90bfb02d1 (diff) | |
| download | linux-94092d3df6c5818ae5cfed7e6ceda14c0799161d.tar.gz | |
mm: HACK provide target address when migrating a PMD entry
| -rw-r--r-- | mm/huge_memory.c | 6 | ||||
| -rw-r--r-- | mm/khugepaged.c | 2 |
2 files changed, 5 insertions, 3 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 395c75111d335..fc4b36ebd8dc4 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2027,7 +2027,8 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, pte_unmap(pte); } smp_wmb(); /* make pte visible before pmd */ - pmd_populate(mm, pmd, pgtable); + //pmd_populate(mm, pmd, pgtable); + set_pmd_at(mm, haddr, pmd, __pmd(page_to_phys(pgtable) | PMD_TYPE_TABLE)); } static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, @@ -2193,7 +2194,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, } smp_wmb(); /* make pte visible before pmd */ - pmd_populate(mm, pmd, pgtable); + //pmd_populate(mm, pmd, pgtable); + set_pmd_at(mm, haddr, pmd, __pmd(page_to_phys(pgtable) | PMD_TYPE_TABLE)); if (freeze) { for (i = 0; i < HPAGE_PMD_NR; i++) { diff --git a/mm/khugepaged.c b/mm/khugepaged.c index a7d6cb912b051..368ca7ec35491 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1167,7 +1167,7 @@ static void collapse_huge_page(struct mm_struct *mm, * hugepmds and never for establishing regular pmds that * points to regular pagetables. Use pmd_populate for that */ - pmd_populate(mm, pmd, pmd_pgtable(_pmd)); + set_pmd_at(mm, address, pmd, __pmd(pmd_val(_pmd) | PMD_TYPE_TABLE)); spin_unlock(pmd_ptl); anon_vma_unlock_write(vma->anon_vma); result = SCAN_FAIL; |
