From a8d13b36486220225ca5f9f64195762c6f643ebf Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 22 Apr 2019 00:20:24 +1000 Subject: powerpc/asm: Add a patch_site macro & helpers for patching instructions commit 06d0bbc6d0f56dacac3a79900e9a9a0d5972d818 upstream. Add a macro and some helper C functions for patching single asm instructions. The gas macro means we can do something like: 1: nop patch_site 1b, patch__foo Which is less visually distracting than defining a GLOBAL symbol at 1, and also doesn't pollute the symbol table which can confuse eg. perf. These are obviously similar to our existing feature sections, but are not automatically patched based on CPU/MMU features, rather they are designed to be manually patched by C code at some arbitrary point. Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/lib/code-patching.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'arch/powerpc/lib/code-patching.c') diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index d5edbeb8eb82..2ce6159d8983 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -32,6 +32,22 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags) return patch_instruction(addr, create_branch(addr, target, flags)); } +int patch_branch_site(s32 *site, unsigned long target, int flags) +{ + unsigned int *addr; + + addr = (unsigned int *)((unsigned long)site + *site); + return patch_instruction(addr, create_branch(addr, target, flags)); +} + +int patch_instruction_site(s32 *site, unsigned int instr) +{ + unsigned int *addr; + + addr = (unsigned int *)((unsigned long)site + *site); + return patch_instruction(addr, instr); +} + unsigned int create_branch(const unsigned int *addr, unsigned long target, int flags) { -- cgit v1.2.3 From 7fe905d0973eee071ebc4861cd6d23138116cd9e Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Mon, 22 Apr 2019 00:20:29 +1000 Subject: powerpc: Avoid code patching freed init sections commit 51c3c62b58b357e8d35e4cc32f7b4ec907426fe3 upstream. This stops us from doing code patching in init sections after they've been freed. In this chain: kvm_guest_init() -> kvm_use_magic_page() -> fault_in_pages_readable() -> __get_user() -> __get_user_nocheck() -> barrier_nospec(); We have a code patching location at barrier_nospec() and kvm_guest_init() is an init function. This whole chain gets inlined, so when we free the init section (hence kvm_guest_init()), this code goes away and hence should no longer be patched. We seen this as userspace memory corruption when using a memory checker while doing partition migration testing on powervm (this starts the code patching post migration via /sys/kernel/mobility/migration). In theory, it could also happen when using /sys/kernel/debug/powerpc/barrier_nospec. Signed-off-by: Michael Neuling Reviewed-by: Nicholas Piggin Reviewed-by: Christophe Leroy Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/lib/code-patching.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'arch/powerpc/lib/code-patching.c') diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 2ce6159d8983..570c06a00db6 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -14,12 +14,25 @@ #include #include #include +#include +#include +static inline bool is_init(unsigned int *addr) +{ + return addr >= (unsigned int *)__init_begin && addr < (unsigned int *)__init_end; +} + int patch_instruction(unsigned int *addr, unsigned int instr) { int err; + /* Make sure we aren't patching a freed init section */ + if (init_mem_is_free && is_init(addr)) { + pr_debug("Skipping init section patching addr: 0x%px\n", addr); + return 0; + } + __put_user_size(instr, addr, 4, err); if (err) return err; -- cgit v1.2.3 From 5f3cf6b8221107302107068063e7fa7304f6639a Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 1 Oct 2018 12:21:10 +0000 Subject: powerpc/lib: fix book3s/32 boot failure due to code patching commit b45ba4a51cde29b2939365ef0c07ad34c8321789 upstream. Commit 51c3c62b58b3 ("powerpc: Avoid code patching freed init sections") accesses 'init_mem_is_free' flag too early, before the kernel is relocated. This provokes early boot failure (before the console is active). As it is not necessary to do this verification that early, this patch moves the test into patch_instruction() instead of __patch_instruction(). This modification also has the advantage of avoiding unnecessary remappings. Fixes: 51c3c62b58b3 ("powerpc: Avoid code patching freed init sections") Cc: stable@vger.kernel.org # 4.13+ Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/lib/code-patching.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/lib/code-patching.c') diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 570c06a00db6..31d31a10f71f 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -28,7 +28,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr) int err; /* Make sure we aren't patching a freed init section */ - if (init_mem_is_free && is_init(addr)) { + if (*PTRRELOC(&init_mem_is_free) && is_init(addr)) { pr_debug("Skipping init section patching addr: 0x%px\n", addr); return 0; } -- cgit v1.2.3