diff options
| author | Linus Walleij <linus.walleij@linaro.org> | 2021-04-15 17:30:08 +0200 |
|---|---|---|
| committer | Linus Walleij <linus.walleij@linaro.org> | 2021-04-15 17:32:49 +0200 |
| commit | 65ecafff4de77d2c71838246ce810f4bdc589d31 (patch) | |
| tree | 36e85383dcf929251bb4ee7dc9518b2770de1e0f | |
| parent | 5dd555f6ba29fe888124d31da04ea48eb5442f6d (diff) | |
| download | linux-integrator-65ecafff4de77d2c71838246ce810f4bdc589d31.tar.gz | |
ARM: Compile the kernel into VMALLOC
This makes it possible to compile the kernel into the
VMALLOC area (kernel virtual memory allocation pool).
We hammer down the virtual location of the kernel to
0xf1000000 and augment all translation functions to take
a special roundtrip for any kernel addresses when mapping
virtual-to-physical or physical-to-virtual addresses.
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
| -rw-r--r-- | arch/arm/Kconfig | 9 | ||||
| -rw-r--r-- | arch/arm/include/asm/memory.h | 58 | ||||
| -rw-r--r-- | arch/arm/mm/init.c | 15 | ||||
| -rw-r--r-- | arch/arm/mm/mmu.c | 39 |
4 files changed, 112 insertions, 9 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 853aab5ab327a..497ccc7ab9291 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -266,6 +266,15 @@ config NEED_MACH_MEMORY_H definitions for this platform. The need for mach/memory.h should be avoided when possible. +config ARM_KERNEL_IN_VMALLOC + bool "Compile the kernel into the VMALLOC area" + depends on !ARM_PATCH_PHYS_VIRT + help + This augments the build process so that the kernel TEXT, DATA + and BSS will be put into the VMALLOC area. This avoids a dedicated + mapping to userspace of the kernel for system calls and task + switching. + config PHYS_OFFSET hex "Physical address of main memory" if MMU depends on !ARM_PATCH_PHYS_VIRT diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 69146abfa05a5..eaef67ead1c3f 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -15,6 +15,7 @@ #include <linux/types.h> #include <linux/sizes.h> +#include <asm/page.h> #ifdef CONFIG_NEED_MACH_MEMORY_H #include <mach/memory.h> #endif @@ -27,7 +28,11 @@ * we may further offset this with TEXT_OFFSET in practice. */ #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) +#ifdef CONFIG_ARM_KERNEL_IN_VMALLOC +#define KERNEL_OFFSET (0xF1000000) +#else #define KERNEL_OFFSET (PAGE_OFFSET) +#endif #ifdef CONFIG_MMU @@ -176,9 +181,13 @@ extern unsigned long vectors_base; /* * Physical start and end address of the kernel. These addresses are * 2MB-aligned to match section mappings. + * + * The KERNEL_SECTION_SIZE is the amount of virtual memory reserved around + * the kernel, rounded down/up to 2MB sections. */ extern phys_addr_t kernel_phy_start; extern phys_addr_t kernel_phy_end; +#define KERNEL_SECTION_SIZE (kernel_phy_end - kernel_phy_start) /* * Physical vs virtual RAM address space conversion. These are @@ -289,22 +298,59 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) #define PHYS_OFFSET PLAT_PHYS_OFFSET #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) +#define KERNEL_PFN_OFFSET ((unsigned long)(kernel_phy_start >> PAGE_SHIFT)) static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x) { - return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; + if (!IS_ENABLED(CONFIG_ARM_KERNEL_IN_VMALLOC)) { + return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; + } else { + phys_addr_t addr = (phys_addr_t)x; + + if ((addr >= KERNEL_OFFSET) && + (addr < (KERNEL_OFFSET + KERNEL_SECTION_SIZE))) + return addr - KERNEL_OFFSET + kernel_phy_start; + else + return addr - PAGE_OFFSET + PHYS_OFFSET; + } } static inline unsigned long __phys_to_virt(phys_addr_t x) { - return x - PHYS_OFFSET + PAGE_OFFSET; + /* Normally just use the 1-to-1 mapping */ + if (!IS_ENABLED(CONFIG_ARM_KERNEL_IN_VMALLOC)) { + return x - PHYS_OFFSET + PAGE_OFFSET; + } else { + /* + * We need very specific handling of the kernel physical memory + * 1-to-1 map: memory allocations for the kernel will be made + * above (e.g. page table) and above (any other allocations) + * in the physical memory. + */ + if (x >= kernel_phy_start && x < kernel_phy_end) + return x - kernel_phy_start + KERNEL_OFFSET; + else + return x - PHYS_OFFSET + PAGE_OFFSET; + } } - #endif -#define virt_to_pfn(kaddr) \ - ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ - PHYS_PFN_OFFSET) +static inline unsigned long virt_to_pfn(const void *kaddr) +{ + if (!IS_ENABLED(CONFIG_ARM_KERNEL_IN_VMALLOC)) { + return ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + + PHYS_PFN_OFFSET); + } else { + if ((kaddr >= (void *)KERNEL_OFFSET) && + (kaddr < (void *)(KERNEL_OFFSET + KERNEL_SECTION_SIZE))) { + return ((((unsigned long)(kaddr) - KERNEL_OFFSET) >> PAGE_SHIFT) + + KERNEL_PFN_OFFSET); + } else { + return ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + + PHYS_PFN_OFFSET); + } + } +} #define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x)) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 31381c2844c42..88e1c4bd60385 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -212,8 +212,19 @@ void check_cpu_icache_size(int cpuid) void __init arm_memblock_init(const struct machine_desc *mdesc) { - /* Register the kernel text, kernel data and initrd with memblock. */ - memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START); + /* + * Register the kernel text, kernel data and initrd with memblock. + * + * When using kernel in vmalloc, we have to round up to the closest + * section size, or the temporary section mapping of the tail of the + * kernel will be overwritten by memblock allocations. This is not + * a problem with the linear kernel map, since the allocations can + * use the 1:1 map in that case. + */ + if (!IS_ENABLED(CONFIG_ARM_KERNEL_IN_VMALLOC)) + memblock_reserve(kernel_phy_start, KERNEL_END - KERNEL_START); + else + memblock_reserve(kernel_phy_start, KERNEL_SECTION_SIZE); arm_initrd_init(); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 70470f8e97868..8d50d3fa27b88 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -923,6 +923,9 @@ static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md, pgd = pgd_offset(mm, addr); end = addr + length; + pr_info("map physical memory 0x%08llx-0x%08llx to virtual memory 0x%08lx-0x%08lx length: 0x%08lx\n", + (long long)phys, (long long)(phys + length - 1), addr, end - 1, length); + do { unsigned long next = pgd_addr_end(addr, end); @@ -1349,8 +1352,21 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) /* * Clear page table except top pmd used by early fixmaps */ - for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE) + for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE) { + /* + * When putting the kernel into the VMALLOC area, we need to + * make sure we don't wipe out the VM mappings for the kernel. + * This would pull out the ground under our feet. This gets + * compiled out if we're not using kernel in VMALLOC. + */ + if (IS_ENABLED(CONFIG_ARM_KERNEL_IN_VMALLOC)) { + if ((addr >= KERNEL_OFFSET) && + (addr < (KERNEL_OFFSET + KERNEL_SECTION_SIZE))) + continue; + } + pr_info("clear PMD at 0x%08llx\n", (unsigned long long)addr); pmd_clear(pmd_off_k(addr)); + } if (__atags_pointer) { /* create a read-only mapping of the device tree */ @@ -1538,6 +1554,23 @@ static void __init map_lowmem(void) } } +void __init vm_reserve_kernel(struct map_desc *md) +{ + struct vm_struct *vm; + struct static_vm *svm; + + svm = early_alloc(sizeof(*svm)); + + vm = &svm->vm; + vm->addr = (void *)(md->virtual & PAGE_MASK); + vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); + vm->phys_addr = __pfn_to_phys(md->pfn); + vm->flags = VM_MAP | VM_ARM_STATIC_MAPPING; + vm->flags |= VM_ARM_MTYPE(md->type); + vm->caller = vm_reserve_kernel; + add_static_vm_early(svm); +} + static void __init map_kernel(void) { phys_addr_t kernel_x_start = round_down(kernel_phy_start, SECTION_SIZE); @@ -1551,6 +1584,8 @@ static void __init map_kernel(void) map.length = kernel_x_end - kernel_x_start; map.type = MT_MEMORY_RWX; create_mapping(&map); + if (IS_ENABLED(CONFIG_ARM_KERNEL_IN_VMALLOC)) + vm_reserve_kernel(&map); /* If the nx part is small it may end up covered by the tail of the RWX section */ if (kernel_x_end == kernel_nx_end) @@ -1561,6 +1596,8 @@ static void __init map_kernel(void) map.length = kernel_nx_end - kernel_nx_start; map.type = MT_MEMORY_RW; create_mapping(&map); + if (IS_ENABLED(CONFIG_ARM_KERNEL_IN_VMALLOC)) + vm_reserve_kernel(&map); } #ifdef CONFIG_ARM_PV_FIXUP |
