summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/head.S
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2016-07-01 16:53:00 +0100
committerSami Tolvanen <samitolvanen@google.com>2016-09-29 10:52:56 -0700
commit1911d36b27ba58ee18592df25b7ee636d4d4c41d (patch)
tree6e06a501afe513dff8ead40bb1b66cf9a69aa7a9 /arch/arm64/kernel/head.S
parent3b66929169de053042d47e482dd5748794756153 (diff)
FROMLIST: arm64: Introduce uaccess_{disable,enable} functionality based on TTBR0_EL1
This patch adds the uaccess macros/functions to disable access to user space by setting TTBR0_EL1 to a reserved zeroed page. Since the value written to TTBR0_EL1 must be a physical address, for simplicity this patch introduces a reserved_ttbr0 page at a constant offset from swapper_pg_dir. The uaccess_disable code uses the ttbr1_el1 value adjusted by the reserved_ttbr0 offset. Enabling access to user is done by restoring TTBR0_EL1 with the value from the struct thread_info ttbr0 variable. Interrupts must be disabled during the uaccess_ttbr0_enable code to ensure the atomicity of the thread_info.ttbr0 read and TTBR0_EL1 write. This patch also moves the get_thread_info asm macro from entry.S to assembler.h for reuse in the uaccess_ttbr0_* macros. Cc: Will Deacon <will.deacon@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Kees Cook <keescook@chromium.org> Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Change-Id: Idf09a870b8612dce23215bce90d88781f0c0c3aa (cherry picked from commit 940d37234182d2675ab8ab46084840212d735018) Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
Diffstat (limited to 'arch/arm64/kernel/head.S')
-rw-r--r--arch/arm64/kernel/head.S6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 54fd1e2590a2..9bfa58fea8ce 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -321,14 +321,14 @@ __create_page_tables:
* dirty cache lines being evicted.
*/
mov x0, x25
- add x1, x26, #SWAPPER_DIR_SIZE
+ add x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
bl __inval_cache_range
/*
* Clear the idmap and swapper page tables.
*/
mov x0, x25
- add x6, x26, #SWAPPER_DIR_SIZE
+ add x6, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
1: stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
@@ -406,7 +406,7 @@ __create_page_tables:
* tables again to remove any speculatively loaded cache lines.
*/
mov x0, x25
- add x1, x26, #SWAPPER_DIR_SIZE
+ add x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
dmb sy
bl __inval_cache_range