diff options
| author | Catalin Marinas <catalin.marinas@arm.com> | 2016-07-01 16:53:00 +0100 |
|---|---|---|
| committer | Amit Pundir <amit.pundir@linaro.org> | 2016-10-12 17:34:22 +0530 |
| commit | fb82efd29dd555a19ee378a16ad6fabbb4fcc951 (patch) | |
| tree | 82129b1322b1bb4d035172d994ae510aab804522 /arch/arm64/kernel/head.S | |
| parent | bb8653ae4be48db54ab7e2a7959e71c8469ab7f5 (diff) | |
FROMLIST: arm64: Introduce uaccess_{disable,enable} functionality based on TTBR0_EL1
This patch adds the uaccess macros/functions to disable access to user
space by setting TTBR0_EL1 to a reserved zeroed page. Since the value
written to TTBR0_EL1 must be a physical address, for simplicity this
patch introduces a reserved_ttbr0 page at a constant offset from
swapper_pg_dir. The uaccess_disable code uses the ttbr1_el1 value
adjusted by the reserved_ttbr0 offset.
Enabling access to user is done by restoring TTBR0_EL1 with the value
from the struct thread_info ttbr0 variable. Interrupts must be disabled
during the uaccess_ttbr0_enable code to ensure the atomicity of the
thread_info.ttbr0 read and TTBR0_EL1 write. This patch also moves the
get_thread_info asm macro from entry.S to assembler.h for reuse in the
uaccess_ttbr0_* macros.
Cc: Will Deacon <will.deacon@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Change-Id: Idf09a870b8612dce23215bce90d88781f0c0c3aa
(cherry picked from commit 940d37234182d2675ab8ab46084840212d735018)
Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
Diffstat (limited to 'arch/arm64/kernel/head.S')
| -rw-r--r-- | arch/arm64/kernel/head.S | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 488708687fda..4467bc9c9aa9 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -321,14 +321,14 @@ __create_page_tables: * dirty cache lines being evicted. */ mov x0, x25 - add x1, x26, #SWAPPER_DIR_SIZE + add x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE bl __inval_cache_range /* * Clear the idmap and swapper page tables. */ mov x0, x25 - add x6, x26, #SWAPPER_DIR_SIZE + add x6, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE 1: stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16 @@ -407,7 +407,7 @@ __create_page_tables: * tables again to remove any speculatively loaded cache lines. */ mov x0, x25 - add x1, x26, #SWAPPER_DIR_SIZE + add x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE dmb sy bl __inval_cache_range |
