aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@linaro.org>2021-02-03 14:25:04 +0100
committerLinus Walleij <linus.walleij@linaro.org>2021-04-12 01:37:37 +0200
commit11e0a781454290894392dc9dbdde64ba756cbdaa (patch)
tree7f430148e1e12af16f3f7746adef8fb983fab360
parent3d25eb2b2ab5c2df948c828e129428cd636f5d5d (diff)
downloadlinux-integrator-11e0a781454290894392dc9dbdde64ba756cbdaa.tar.gz
-rw-r--r--arch/arm/Kconfig11
-rw-r--r--arch/arm/include/asm/memory.h58
-rw-r--r--arch/arm/kernel/head.S27
-rw-r--r--arch/arm/mm/init.c17
-rw-r--r--arch/arm/mm/mmu.c150
5 files changed, 249 insertions, 14 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 138248999df74..f983184f59527 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -266,6 +266,15 @@ config NEED_MACH_MEMORY_H
definitions for this platform. The need for mach/memory.h should
be avoided when possible.
+config ARM_KERNEL_IN_VMALLOC
+ bool "Compile the kernel into the VMALLOC area"
+ depends on !ARM_PATCH_PHYS_VIRT
+ help
+ This augments the build process so that the kernel TEXT, DATA
+ and BSS will be put into the VMALLOC area. This avoids a dedicated
+ mapping to userspace of the kernel for system calls and task
+ switching.
+
config PHYS_OFFSET
hex "Physical address of main memory" if MMU
depends on !ARM_PATCH_PHYS_VIRT
@@ -273,6 +282,7 @@ config PHYS_OFFSET
default 0x00000000 if ARCH_FOOTBRIDGE
default 0x10000000 if ARCH_OMAP1 || ARCH_RPC
default 0x20000000 if ARCH_S5PV210
+ default 0x80000000 if ARCH_VEXPRESS
default 0xc0000000 if ARCH_SA1100
help
Please provide the physical address corresponding to the
@@ -320,7 +330,6 @@ config ARCH_MULTIPLATFORM
select ARCH_SPARSEMEM_ENABLE
select ARCH_SELECT_MEMORY_MODEL
select ARM_HAS_SG_CHAIN
- select ARM_PATCH_PHYS_VIRT
select AUTO_ZRELADDR
select TIMER_OF
select COMMON_CLK
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index e38655b916fa1..568051d49f499 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/sizes.h>
+#include <asm/page.h>
#ifdef CONFIG_NEED_MACH_MEMORY_H
#include <mach/memory.h>
#endif
@@ -27,7 +28,11 @@
* we may further offset this with TEXT_OFFSET in practice.
*/
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
+#ifdef CONFIG_ARM_KERNEL_IN_VMALLOC
+#define KERNEL_OFFSET (0xF1000000)
+#else
#define KERNEL_OFFSET (PAGE_OFFSET)
+#endif
#ifdef CONFIG_MMU
@@ -283,21 +288,62 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#define PHYS_OFFSET PLAT_PHYS_OFFSET
#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
+extern phys_addr_t kernel_phy_start;
+extern phys_addr_t kernel_phy_end;
+#define KERNEL_SIZE (kernel_phy_end - kernel_phy_start)
+#define KERNEL_PFN_OFFSET ((unsigned long)(kernel_phy_start >> PAGE_SHIFT))
+
static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
{
- return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
+ if (!IS_ENABLED(CONFIG_ARM_KERNEL_IN_VMALLOC)) {
+ return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
+ } else {
+ phys_addr_t addr = (phys_addr_t)x;
+
+ if ((addr >= KERNEL_OFFSET) &&
+ (addr < (KERNEL_OFFSET + KERNEL_SIZE)))
+ return addr - KERNEL_OFFSET + kernel_phy_start;
+ else
+ return addr - PAGE_OFFSET + PHYS_OFFSET;
+ }
}
static inline unsigned long __phys_to_virt(phys_addr_t x)
{
- return x - PHYS_OFFSET + PAGE_OFFSET;
+ /* Normally just use the 1-to-1 mapping */
+ if (!IS_ENABLED(CONFIG_ARM_KERNEL_IN_VMALLOC)) {
+ return x - PHYS_OFFSET + PAGE_OFFSET;
+ } else {
+ /*
+ * We need very specific handling of the kernel physical memory
+ * 1-to-1 map: memory allocations for the kernel will be made
+ * above (e.g. page table) and above (any other allocations)
+ * in the physical memory.
+ */
+ if (x >= kernel_phy_start && x < kernel_phy_end)
+ return x - kernel_phy_start + KERNEL_OFFSET;
+ else
+ return x - PHYS_OFFSET + PAGE_OFFSET;
+ }
}
-
#endif
-#define virt_to_pfn(kaddr) \
- ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
- PHYS_PFN_OFFSET)
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ if (!IS_ENABLED(CONFIG_ARM_KERNEL_IN_VMALLOC)) {
+ return ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) +
+ PHYS_PFN_OFFSET);
+ } else {
+ if ((kaddr >= (void *)KERNEL_OFFSET) &&
+ (kaddr < (void *)(KERNEL_OFFSET + KERNEL_SIZE))) {
+ return ((((unsigned long)(kaddr) - KERNEL_OFFSET) >> PAGE_SHIFT) +
+ KERNEL_PFN_OFFSET);
+ } else {
+ return ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) +
+ PHYS_PFN_OFFSET);
+ }
+ }
+}
#define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x))
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 4e2daaa7636aa..483de9f446c7a 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -47,6 +47,20 @@
.globl swapper_pg_dir
.equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
+ /*
+ * This needs to be assigned at runtime when the linker symbols are
+ * resolved.
+ */
+ .pushsection .data
+ .align 2
+ .globl kernel_phy_start
+ .globl kernel_phy_end
+kernel_phy_start:
+ .long 0
+kernel_phy_end:
+ .long 0
+ .popsection
+
.macro pgtbl, rd, phys
add \rd, \phys, #TEXT_OFFSET
sub \rd, \rd, #PG_DIR_SIZE
@@ -229,16 +243,23 @@ __create_page_tables:
blo 1b
/*
- * Map our RAM from the start to the end of the kernel .bss section.
+ * The main matter: map in the kernel using section mappings, and
+ * set two variables to indicate the physical start and end of the
+ * kernel.
*/
- add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
+ add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
ldr r6, =(_end - 1)
- orr r3, r8, r7
+ adr_l r5, kernel_phy_start @ _pa(kernel_phy_start)
+ str r8, [r5] @ Save physical start of kernel
+ orr r3, r8, r7 @ Add the MMU flags
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1: str r3, [r0], #1 << PMD_ORDER
add r3, r3, #1 << SECTION_SHIFT
cmp r0, r6
bls 1b
+ eor r3, r3, r7 @ Remove the MMU flags
+ adr_l r5, kernel_phy_end @ _pa(kernel_phy_end)
+ str r3, [r5] @ Save physical end of kernel
#ifdef CONFIG_XIP_KERNEL
/*
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 31381c2844c42..16ee429a6667a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -212,8 +212,21 @@ void check_cpu_icache_size(int cpuid)
void __init arm_memblock_init(const struct machine_desc *mdesc)
{
- /* Register the kernel text, kernel data and initrd with memblock. */
- memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
+ /*
+ * Register the kernel text, kernel data and initrd with memblock.
+ *
+ * When using kernel in vmalloc, we have to round up to the closest
+ * section size, or the temporary section mapping of the tail of the
+ * kernel will be overwritten by memblock allocations. This is not
+ * a problem with the linear kernel map, since the allocations can
+ * use the 1:1 map in that case.
+ */
+ if (!IS_ENABLED(CONFIG_ARM_KERNEL_IN_VMALLOC))
+ memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
+ else
+ memblock_reserve(__pa(KERNEL_START),
+ round_up(KERNEL_END - KERNEL_START,
+ SECTION_SIZE));
arm_initrd_init();
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c06ebfbc48c4a..4711c5a8af267 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -924,6 +924,9 @@ static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md,
pgd = pgd_offset(mm, addr);
end = addr + length;
+ pr_info("map physical memory 0x%08llx-0x%08llx to virtual memory 0x%08lx-0x%08lx length: 0x%08lx\n",
+ (long long)phys, (long long)(phys + length - 1), addr, end - 1, length);
+
do {
unsigned long next = pgd_addr_end(addr, end);
@@ -1350,8 +1353,21 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
/*
* Clear page table except top pmd used by early fixmaps
*/
- for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
+ for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE) {
+ /*
+ * When putting the kernel into the VMALLOC area, we need to
+ * make sure we don't wipe out the VM mappings for the kernel.
+ * This would pull out the ground under our feet. This gets
+ * compiled out if we're not using kernel in VMALLOC.
+ */
+ if (IS_ENABLED(CONFIG_ARM_KERNEL_IN_VMALLOC)) {
+ /* FIXME: assumes kernel ends on an even PMD */
+ if ((addr >= KERNEL_OFFSET) && (addr < (KERNEL_OFFSET + KERNEL_SIZE)))
+ continue;
+ }
+ pr_info("clear PMD at 0x%08x\n", addr);
pmd_clear(pmd_off_k(addr));
+ }
if (__atags_pointer) {
/* create a read-only mapping of the device tree */
@@ -1468,12 +1484,76 @@ static void __init map_lowmem(void)
for_each_mem_range(i, &start, &end) {
struct map_desc map;
+ pr_info("map lowmem start: 0x%08llx, end: 0x%08llx\n", (long long)start, (long long)end);
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
break;
- if (end < kernel_x_start) {
+ if (IS_ENABLED(CONFIG_ARM_KERNEL_IN_VMALLOC)) {
+ /*
+ * If our kernel image is in the VMALLOC area we need to remove the kernel
+ * physical memory from lowmem since the kernel will be mapped separately.
+ * The kernel will typically be at the very start of lowmem.
+ *
+ * If the memblock contains the kernel, we have to chisel out
+ * the kernel memory from it and map each part separately. We get 6
+ * different theoretical cases:
+ *
+ * +--------+ +--------+
+ * +-- start --+ +--------+ | Kernel | | Kernel |
+ * | | | Kernel | | case 2 | | case 5 |
+ * | | | case 1 | +--------+ | | +--------+
+ * | Memory | +--------+ | | | Kernel |
+ * | range | +--------+ | | | case 6 |
+ * | | | Kernel | +--------+ | | +--------+
+ * | | | case 3 | | Kernel | | |
+ * +-- end ----+ +--------+ | case 4 | | |
+ * +--------+ +--------+
+ */
+
+ /* Case 5: kernel covers range, don't map anything, should be rare */
+ if ((start > kernel_phy_start) && (end < kernel_phy_end))
+ break;
+
+ /* Cases where the kernel is starting inside the range */
+ if ((kernel_phy_start >= start) && (kernel_phy_start <= end)) {
+ /* Case 6: kernel is embedded in the range, we need two mappings */
+ if ((start < kernel_phy_start) && (end > kernel_phy_end)) {
+ /* Map memory below the kernel */
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = kernel_phy_start - start;
+ map.type = MT_MEMORY_RWX; // FIXME: RW?
+ create_mapping(&map);
+ /* Map memory above the kernel */
+ map.pfn = __phys_to_pfn(kernel_phy_end);
+ map.virtual = __phys_to_virt(kernel_phy_end);
+ map.length = end - kernel_phy_end;
+ map.type = MT_MEMORY_RWX; // FIXME: RW?
+ create_mapping(&map);
+ break;
+ }
+ /* Case 1: kernel and range start at the same address, should be common */
+ if (kernel_phy_start == start)
+ start = kernel_phy_end;
+ /* Case 3: kernel and range end at the same address, should be rare */
+ if (kernel_phy_end == end)
+ end = kernel_phy_start;
+ } else if ((kernel_phy_start < start) && (kernel_phy_end > start) && (kernel_phy_end < end)) {
+ /* Case 2: kernel ends inside range, starts below it */
+ start = kernel_phy_end;
+ } else if ((kernel_phy_start > start) && (kernel_phy_start < end) && (kernel_phy_end > end)) {
+ /* Case 4: kernel starts inside range, ends above it */
+ end = kernel_phy_start;
+ }
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY_RWX; // FIXME: RW?
+ create_mapping(&map);
+ break;
+ } else if (end < kernel_x_start) {
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
@@ -1517,6 +1597,65 @@ static void __init map_lowmem(void)
}
}
+#ifdef CONFIG_ARM_KERNEL_IN_VMALLOC
+/*
+ * FIXME: do we want to use the same method for all variants just without the
+ * vm reservation when not using kernel in vmalloc?
+ */
+void __init vm_reserve_kernel(struct map_desc *md)
+{
+ struct vm_struct *vm;
+ struct static_vm *svm;
+
+ svm = early_alloc(sizeof(*svm));
+
+ vm = &svm->vm;
+ vm->addr = (void *)(md->virtual & PAGE_MASK);
+ vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
+ vm->phys_addr = __pfn_to_phys(md->pfn);
+ vm->flags = VM_MAP | VM_ARM_STATIC_MAPPING;
+ vm->flags |= VM_ARM_MTYPE(md->type);
+ vm->caller = vm_reserve_kernel;
+ add_static_vm_early(svm);
+}
+
+static void __init map_kernel(void)
+{
+ phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
+ phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+ phys_addr_t kernel_nx_start = kernel_x_end;
+ phys_addr_t kernel_nx_end = round_up(__pa(KERNEL_END), SECTION_SIZE);
+ struct map_desc map;
+
+ map.pfn = __phys_to_pfn(kernel_x_start);
+ /* Relies on __phys_to_virt working for kernel addresses in VMALLOC */
+ map.virtual = __phys_to_virt(kernel_x_start);
+ map.length = kernel_x_end - kernel_x_start;
+ map.type = MT_MEMORY_RWX;
+ create_mapping(&map);
+ vm_reserve_kernel(&map);
+
+ /* If the nx part is small it may end up covered by the tail of the RWX section */
+ if (kernel_x_end == kernel_nx_end)
+ return;
+
+ map.pfn = __phys_to_pfn(kernel_nx_start);
+ /* Relies on __phys_to_virt working for kernel addresses in VMALLOC */
+ map.virtual = __phys_to_virt(kernel_nx_start);
+ map.length = kernel_nx_end - kernel_nx_start;
+ map.type = MT_MEMORY_RW;
+ create_mapping(&map);
+ vm_reserve_kernel(&map);
+}
+#else
+/*
+ * When not mapping the kernel in VMALLOC, the kernel is mapped while mapping lowmem.
+ */
+static void __init map_kernel(void)
+{
+}
+#endif /* CONFIG_ARM_KERNEL_IN_VMALLOC */
+
#ifdef CONFIG_ARM_PV_FIXUP
typedef void pgtables_remap(long long offset, unsigned long pgd);
pgtables_remap lpae_pgtables_remap_asm;
@@ -1647,9 +1786,16 @@ void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
+ pr_info("physical kernel memory: 0x%08x-0x%08x\n",
+ kernel_phy_start, kernel_phy_end);
+
prepare_page_table();
map_lowmem();
memblock_set_current_limit(arm_lowmem_limit);
+ pr_info("lowmem limit is %08llx\n", (long long)arm_lowmem_limit);
+ /* After this point early_alloc(), i.e. the memblock allocator, can be used */
+ map_kernel();
+ pr_info("mapped kernel\n");
dma_contiguous_remap();
early_fixmap_shutdown();
devicemaps_init(mdesc);