diff options
Diffstat (limited to 'arch/arm64/kernel')
30 files changed, 292 insertions, 1227 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index f6e0269de4c7..9f7794c5743f 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -30,7 +30,6 @@ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ ../../arm/kernel/opcodes.o arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o -arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_debug.o \ perf_trace_counters.o \ @@ -44,8 +43,6 @@ arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o arm64-obj-$(CONFIG_PCI) += pci.o arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o arm64-obj-$(CONFIG_ACPI) += acpi.o -arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o -arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o obj-y += $(arm64-obj-y) vdso/ obj-m += $(arm64-obj-m) diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c deleted file mode 100644 index 4b1e5a7a98da..000000000000 --- a/arch/arm64/kernel/acpi_parking_protocol.c +++ /dev/null @@ -1,153 +0,0 @@ -/* - * ARM64 ACPI Parking Protocol implementation - * - * Authors: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> - * Mark Salter <msalter@redhat.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ -#include <linux/acpi.h> -#include <linux/types.h> - -#include <asm/cpu_ops.h> - -struct cpu_mailbox_entry { - phys_addr_t mailbox_addr; - u8 version; - u8 gic_cpu_id; -}; - -static struct cpu_mailbox_entry cpu_mailbox_entries[NR_CPUS]; - -void __init acpi_set_mailbox_entry(int cpu, - struct acpi_madt_generic_interrupt *p) -{ - struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu]; - - cpu_entry->mailbox_addr = p->parked_address; - cpu_entry->version = p->parking_version; - cpu_entry->gic_cpu_id = p->cpu_interface_number; -} - -bool acpi_parking_protocol_valid(int cpu) -{ - struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu]; - - return cpu_entry->mailbox_addr && cpu_entry->version; -} - -static int acpi_parking_protocol_cpu_init(unsigned int cpu) -{ - pr_debug("%s: ACPI parked addr=%llx\n", __func__, - cpu_mailbox_entries[cpu].mailbox_addr); - - return 0; -} - -static int acpi_parking_protocol_cpu_prepare(unsigned int cpu) -{ - return 0; -} - -struct parking_protocol_mailbox { - __le32 cpu_id; - __le32 reserved; - __le64 entry_point; -}; - -static int acpi_parking_protocol_cpu_boot(unsigned int cpu) -{ - struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu]; - struct parking_protocol_mailbox __iomem *mailbox; - __le32 cpu_id; - - /* - * Map mailbox memory with attribute device nGnRE (ie ioremap - - * this deviates from the parking protocol specifications since - * the mailboxes are required to be mapped nGnRnE; the attribute - * discrepancy is harmless insofar as the protocol specification - * is concerned). - * If the mailbox is mistakenly allocated in the linear mapping - * by FW ioremap will fail since the mapping will be prevented - * by the kernel (it clashes with the linear mapping attributes - * specifications). - */ - mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox)); - if (!mailbox) - return -EIO; - - cpu_id = readl_relaxed(&mailbox->cpu_id); - /* - * Check if firmware has set-up the mailbox entry properly - * before kickstarting the respective cpu. - */ - if (cpu_id != ~0U) { - iounmap(mailbox); - return -ENXIO; - } - - /* - * We write the entry point and cpu id as LE regardless of the - * native endianness of the kernel. Therefore, any boot-loaders - * that read this address need to convert this address to the - * Boot-Loader's endianness before jumping. - */ - writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point); - writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id); - - arch_send_wakeup_ipi_mask(cpumask_of(cpu)); - - iounmap(mailbox); - - return 0; -} - -static void acpi_parking_protocol_cpu_postboot(void) -{ - int cpu = smp_processor_id(); - struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu]; - struct parking_protocol_mailbox __iomem *mailbox; - __le64 entry_point; - - /* - * Map mailbox memory with attribute device nGnRE (ie ioremap - - * this deviates from the parking protocol specifications since - * the mailboxes are required to be mapped nGnRnE; the attribute - * discrepancy is harmless insofar as the protocol specification - * is concerned). - * If the mailbox is mistakenly allocated in the linear mapping - * by FW ioremap will fail since the mapping will be prevented - * by the kernel (it clashes with the linear mapping attributes - * specifications). - */ - mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox)); - if (!mailbox) - return; - - entry_point = readl_relaxed(&mailbox->entry_point); - /* - * Check if firmware has cleared the entry_point as expected - * by the protocol specification. - */ - WARN_ON(entry_point); - - iounmap(mailbox); -} - -const struct cpu_operations acpi_parking_protocol_ops = { - .name = "parking-protocol", - .cpu_init = acpi_parking_protocol_cpu_init, - .cpu_prepare = acpi_parking_protocol_cpu_prepare, - .cpu_boot = acpi_parking_protocol_cpu_boot, - .cpu_postboot = acpi_parking_protocol_cpu_postboot -}; diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index e6cb1dc63a2a..47a8caf28bcc 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -158,3 +158,9 @@ void apply_alternatives(void *start, size_t length) __apply_alternatives(®ion); } + +void free_alternatives_memory(void) +{ + free_reserved_area(__alt_instructions, __alt_instructions_end, + 0, "alternatives"); +} diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index c37202c0c838..937f5e58a4d3 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -62,7 +62,7 @@ struct insn_emulation { }; static LIST_HEAD(insn_emulation); -static int nr_insn_emulated __initdata; +static int nr_insn_emulated; static DEFINE_RAW_SPINLOCK(insn_emulation_lock); static void register_emulation_hooks(struct insn_emulation_ops *ops) @@ -173,7 +173,7 @@ static int update_insn_emulation_mode(struct insn_emulation *insn, return ret; } -static void __init register_insn_emulation(struct insn_emulation_ops *ops) +static void register_insn_emulation(struct insn_emulation_ops *ops) { unsigned long flags; struct insn_emulation *insn; @@ -237,7 +237,7 @@ static struct ctl_table ctl_abi[] = { { } }; -static void __init register_insn_emulation_sysctl(struct ctl_table *table) +static void register_insn_emulation_sysctl(struct ctl_table *table) { unsigned long flags; int i = 0; @@ -297,8 +297,11 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table) "4: mov %w0, %w5\n" \ " b 3b\n" \ " .popsection" \ - _ASM_EXTABLE(0b, 4b) \ - _ASM_EXTABLE(1b, 4b) \ + " .pushsection __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .quad 0b, 4b\n" \ + " .quad 1b, 4b\n" \ + " .popsection\n" \ ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ CONFIG_ARM64_PAN) \ : "=&r" (res), "+r" (data), "=&r" (temp) \ diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 1555520bbb74..34aa4b3b47e9 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -21,12 +21,26 @@ #include <asm/cputype.h> #include <asm/cpufeature.h> +#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) +#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) +#define MIDR_THUNDERX MIDR_CPU_PART(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) +#define MIDR_KRYO2XX_SILVER \ + MIDR_CPU_PART(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_SILVER) + +#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ + MIDR_ARCHITECTURE_MASK) + static bool __maybe_unused is_affected_midr_range(const struct arm64_cpu_capabilities *entry) { - return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model, - entry->midr_range_min, - entry->midr_range_max); + u32 midr = read_cpuid_id(); + + if ((midr & CPU_MODEL_MASK) != entry->midr_model) + return false; + + midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; + + return (midr >= entry->midr_range_min && midr <= entry->midr_range_max); } #define MIDR_RANGE(model, min, max) \ diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index c7cfb8fe06f9..b6bd7d447768 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -25,30 +25,19 @@ #include <asm/smp_plat.h> extern const struct cpu_operations smp_spin_table_ops; -extern const struct cpu_operations acpi_parking_protocol_ops; extern const struct cpu_operations cpu_psci_ops; const struct cpu_operations *cpu_ops[NR_CPUS]; -static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = { +static const struct cpu_operations *supported_cpu_ops[] __initconst = { &smp_spin_table_ops, &cpu_psci_ops, NULL, }; -static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = { -#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL - &acpi_parking_protocol_ops, -#endif - &cpu_psci_ops, - NULL, -}; - static const struct cpu_operations * __init cpu_get_ops(const char *name) { - const struct cpu_operations **ops; - - ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops; + const struct cpu_operations **ops = supported_cpu_ops; while (*ops) { if (!strcmp(name, (*ops)->name)) @@ -86,16 +75,8 @@ static const char *__init cpu_read_enable_method(int cpu) } } else { enable_method = acpi_get_enable_method(cpu); - if (!enable_method) { - /* - * In ACPI systems the boot CPU does not require - * checking the enable method since for some - * boot protocol (ie parking protocol) it need not - * be initialized. Don't warn spuriously. - */ - if (cpu != 0) - pr_err("Unsupported ACPI enable-method\n"); - } + if (!enable_method) + pr_err("Unsupported ACPI enable-method\n"); } return enable_method; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 7566cad9fa1d..0669c63281ea 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -67,10 +67,6 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); .width = 0, \ } -/* meta feature for alternatives */ -static bool __maybe_unused -cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry); - static struct arm64_ftr_bits ftr_id_aa64isar0[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0), @@ -127,11 +123,6 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0), - ARM64_FTR_END, -}; - static struct arm64_ftr_bits ftr_ctr[] = { U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), @@ -293,7 +284,6 @@ static struct arm64_ftr_reg arm64_ftr_regs[] = { /* Op1 = 0, CRn = 0, CRm = 7 */ ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0), ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1), - ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2), /* Op1 = 3, CRn = 0, CRm = 0 */ ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr), @@ -418,7 +408,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); - init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0); init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); @@ -528,8 +517,6 @@ void update_cpu_features(int cpu, info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0); taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu, info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1); - taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu, - info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2); /* * EL3 is not our concern. @@ -634,18 +621,6 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry) return has_sre; } -static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry) -{ - u32 midr = read_cpuid_id(); - u32 rv_min, rv_max; - - /* Cavium ThunderX pass 1.x and 2.x */ - rv_min = 0; - rv_max = (1 << MIDR_VARIANT_SHIFT) | MIDR_REVISION_MASK; - - return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max); -} - static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -676,28 +651,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .min_field_value = 2, }, #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ - { - .desc = "Software prefetching using PRFM", - .capability = ARM64_HAS_NO_HW_PREFETCH, - .matches = has_no_hw_prefetch, - }, -#ifdef CONFIG_ARM64_UAO - { - .desc = "User Access Override", - .capability = ARM64_HAS_UAO, - .matches = has_cpuid_feature, - .sys_reg = SYS_ID_AA64MMFR2_EL1, - .field_pos = ID_AA64MMFR2_UAO_SHIFT, - .min_field_value = 1, - .enable = cpu_enable_uao, - }, -#endif /* CONFIG_ARM64_UAO */ -#ifdef CONFIG_ARM64_PAN - { - .capability = ARM64_ALT_PAN_NOT_UAO, - .matches = cpufeature_pan_not_uao, - }, -#endif /* CONFIG_ARM64_PAN */ {}, }; @@ -731,7 +684,7 @@ static const struct arm64_cpu_capabilities arm64_hwcaps[] = { {}, }; -static void __init cap_set_hwcap(const struct arm64_cpu_capabilities *cap) +static void cap_set_hwcap(const struct arm64_cpu_capabilities *cap) { switch (cap->hwcap_type) { case CAP_HWCAP: @@ -776,12 +729,12 @@ static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities * return rc; } -static void __init setup_cpu_hwcaps(void) +static void setup_cpu_hwcaps(void) { int i; const struct arm64_cpu_capabilities *hwcaps = arm64_hwcaps; - for (i = 0; hwcaps[i].matches; i++) + for (i = 0; hwcaps[i].desc; i++) if (hwcaps[i].matches(&hwcaps[i])) cap_set_hwcap(&hwcaps[i]); } @@ -791,11 +744,11 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, { int i; - for (i = 0; caps[i].matches; i++) { + for (i = 0; caps[i].desc; i++) { if (!caps[i].matches(&caps[i])) continue; - if (!cpus_have_cap(caps[i].capability) && caps[i].desc) + if (!cpus_have_cap(caps[i].capability)) pr_info("%s %s\n", info, caps[i].desc); cpus_set_cap(caps[i].capability); } @@ -805,12 +758,11 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, * Run through the enabled capabilities and enable() it on all active * CPUs */ -static void __init -enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) +static void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) { int i; - for (i = 0; caps[i].matches; i++) + for (i = 0; caps[i].desc; i++) if (caps[i].enable && cpus_have_cap(caps[i].capability)) on_each_cpu(caps[i].enable, NULL, true); } @@ -838,36 +790,35 @@ static inline void set_sys_caps_initialised(void) static u64 __raw_read_system_reg(u32 sys_id) { switch (sys_id) { - case SYS_ID_PFR0_EL1: return read_cpuid(SYS_ID_PFR0_EL1); - case SYS_ID_PFR1_EL1: return read_cpuid(SYS_ID_PFR1_EL1); - case SYS_ID_DFR0_EL1: return read_cpuid(SYS_ID_DFR0_EL1); - case SYS_ID_MMFR0_EL1: return read_cpuid(SYS_ID_MMFR0_EL1); - case SYS_ID_MMFR1_EL1: return read_cpuid(SYS_ID_MMFR1_EL1); - case SYS_ID_MMFR2_EL1: return read_cpuid(SYS_ID_MMFR2_EL1); - case SYS_ID_MMFR3_EL1: return read_cpuid(SYS_ID_MMFR3_EL1); - case SYS_ID_ISAR0_EL1: return read_cpuid(SYS_ID_ISAR0_EL1); - case SYS_ID_ISAR1_EL1: return read_cpuid(SYS_ID_ISAR1_EL1); - case SYS_ID_ISAR2_EL1: return read_cpuid(SYS_ID_ISAR2_EL1); - case SYS_ID_ISAR3_EL1: return read_cpuid(SYS_ID_ISAR3_EL1); - case SYS_ID_ISAR4_EL1: return read_cpuid(SYS_ID_ISAR4_EL1); - case SYS_ID_ISAR5_EL1: return read_cpuid(SYS_ID_ISAR4_EL1); - case SYS_MVFR0_EL1: return read_cpuid(SYS_MVFR0_EL1); - case SYS_MVFR1_EL1: return read_cpuid(SYS_MVFR1_EL1); - case SYS_MVFR2_EL1: return read_cpuid(SYS_MVFR2_EL1); - - case SYS_ID_AA64PFR0_EL1: return read_cpuid(SYS_ID_AA64PFR0_EL1); - case SYS_ID_AA64PFR1_EL1: return read_cpuid(SYS_ID_AA64PFR0_EL1); - case SYS_ID_AA64DFR0_EL1: return read_cpuid(SYS_ID_AA64DFR0_EL1); - case SYS_ID_AA64DFR1_EL1: return read_cpuid(SYS_ID_AA64DFR0_EL1); - case SYS_ID_AA64MMFR0_EL1: return read_cpuid(SYS_ID_AA64MMFR0_EL1); - case SYS_ID_AA64MMFR1_EL1: return read_cpuid(SYS_ID_AA64MMFR1_EL1); - case SYS_ID_AA64MMFR2_EL1: return read_cpuid(SYS_ID_AA64MMFR2_EL1); - case SYS_ID_AA64ISAR0_EL1: return read_cpuid(SYS_ID_AA64ISAR0_EL1); - case SYS_ID_AA64ISAR1_EL1: return read_cpuid(SYS_ID_AA64ISAR1_EL1); - - case SYS_CNTFRQ_EL0: return read_cpuid(SYS_CNTFRQ_EL0); - case SYS_CTR_EL0: return read_cpuid(SYS_CTR_EL0); - case SYS_DCZID_EL0: return read_cpuid(SYS_DCZID_EL0); + case SYS_ID_PFR0_EL1: return (u64)read_cpuid(ID_PFR0_EL1); + case SYS_ID_PFR1_EL1: return (u64)read_cpuid(ID_PFR1_EL1); + case SYS_ID_DFR0_EL1: return (u64)read_cpuid(ID_DFR0_EL1); + case SYS_ID_MMFR0_EL1: return (u64)read_cpuid(ID_MMFR0_EL1); + case SYS_ID_MMFR1_EL1: return (u64)read_cpuid(ID_MMFR1_EL1); + case SYS_ID_MMFR2_EL1: return (u64)read_cpuid(ID_MMFR2_EL1); + case SYS_ID_MMFR3_EL1: return (u64)read_cpuid(ID_MMFR3_EL1); + case SYS_ID_ISAR0_EL1: return (u64)read_cpuid(ID_ISAR0_EL1); + case SYS_ID_ISAR1_EL1: return (u64)read_cpuid(ID_ISAR1_EL1); + case SYS_ID_ISAR2_EL1: return (u64)read_cpuid(ID_ISAR2_EL1); + case SYS_ID_ISAR3_EL1: return (u64)read_cpuid(ID_ISAR3_EL1); + case SYS_ID_ISAR4_EL1: return (u64)read_cpuid(ID_ISAR4_EL1); + case SYS_ID_ISAR5_EL1: return (u64)read_cpuid(ID_ISAR4_EL1); + case SYS_MVFR0_EL1: return (u64)read_cpuid(MVFR0_EL1); + case SYS_MVFR1_EL1: return (u64)read_cpuid(MVFR1_EL1); + case SYS_MVFR2_EL1: return (u64)read_cpuid(MVFR2_EL1); + + case SYS_ID_AA64PFR0_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1); + case SYS_ID_AA64PFR1_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1); + case SYS_ID_AA64DFR0_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1); + case SYS_ID_AA64DFR1_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1); + case SYS_ID_AA64MMFR0_EL1: return (u64)read_cpuid(ID_AA64MMFR0_EL1); + case SYS_ID_AA64MMFR1_EL1: return (u64)read_cpuid(ID_AA64MMFR1_EL1); + case SYS_ID_AA64ISAR0_EL1: return (u64)read_cpuid(ID_AA64ISAR0_EL1); + case SYS_ID_AA64ISAR1_EL1: return (u64)read_cpuid(ID_AA64ISAR1_EL1); + + case SYS_CNTFRQ_EL0: return (u64)read_cpuid(CNTFRQ_EL0); + case SYS_CTR_EL0: return (u64)read_cpuid(CTR_EL0); + case SYS_DCZID_EL0: return (u64)read_cpuid(DCZID_EL0); default: BUG(); return 0; @@ -917,7 +868,7 @@ void verify_local_cpu_capabilities(void) return; caps = arm64_features; - for (i = 0; caps[i].matches; i++) { + for (i = 0; caps[i].desc; i++) { if (!cpus_have_cap(caps[i].capability) || !caps[i].sys_reg) continue; /* @@ -930,7 +881,7 @@ void verify_local_cpu_capabilities(void) caps[i].enable(NULL); } - for (i = 0, caps = arm64_hwcaps; caps[i].matches; i++) { + for (i = 0, caps = arm64_hwcaps; caps[i].desc; i++) { if (!cpus_have_hwcap(&caps[i])) continue; if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i])) @@ -946,7 +897,7 @@ static inline void set_sys_caps_initialised(void) #endif /* CONFIG_HOTPLUG_CPU */ -static void __init setup_feature_capabilities(void) +static void setup_feature_capabilities(void) { update_cpu_capabilities(arm64_features, "detected feature:"); enable_cpu_capabilities(arm64_features); @@ -976,9 +927,3 @@ void __init setup_cpu_features(void) pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n", L1_CACHE_BYTES, cls); } - -static bool __maybe_unused -cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry) -{ - return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO)); -} diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 48281e4a719f..3691553f218e 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -211,41 +211,40 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) { info->reg_cntfrq = arch_timer_get_cntfrq(); info->reg_ctr = read_cpuid_cachetype(); - info->reg_dczid = read_cpuid(SYS_DCZID_EL0); + info->reg_dczid = read_cpuid(DCZID_EL0); info->reg_midr = read_cpuid_id(); - info->reg_id_aa64dfr0 = read_cpuid(SYS_ID_AA64DFR0_EL1); - info->reg_id_aa64dfr1 = read_cpuid(SYS_ID_AA64DFR1_EL1); - info->reg_id_aa64isar0 = read_cpuid(SYS_ID_AA64ISAR0_EL1); - info->reg_id_aa64isar1 = read_cpuid(SYS_ID_AA64ISAR1_EL1); + info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1); + info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1); + info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); + info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); /* * Explicitly mask out 16KB granule since we donot * want to support it */ - info->reg_id_aa64mmfr0 = read_cpuid(SYS_ID_AA64MMFR0_EL1) & + info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1) & (~MMFR0_EL1_16KGRAN_MASK); - info->reg_id_aa64mmfr1 = read_cpuid(SYS_ID_AA64MMFR1_EL1); - info->reg_id_aa64mmfr2 = read_cpuid(SYS_ID_AA64MMFR2_EL1); - info->reg_id_aa64pfr0 = read_cpuid(SYS_ID_AA64PFR0_EL1); - info->reg_id_aa64pfr1 = read_cpuid(SYS_ID_AA64PFR1_EL1); - - info->reg_id_dfr0 = read_cpuid(SYS_ID_DFR0_EL1); - info->reg_id_isar0 = read_cpuid(SYS_ID_ISAR0_EL1); - info->reg_id_isar1 = read_cpuid(SYS_ID_ISAR1_EL1); - info->reg_id_isar2 = read_cpuid(SYS_ID_ISAR2_EL1); - info->reg_id_isar3 = read_cpuid(SYS_ID_ISAR3_EL1); - info->reg_id_isar4 = read_cpuid(SYS_ID_ISAR4_EL1); - info->reg_id_isar5 = read_cpuid(SYS_ID_ISAR5_EL1); - info->reg_id_mmfr0 = read_cpuid(SYS_ID_MMFR0_EL1); - info->reg_id_mmfr1 = read_cpuid(SYS_ID_MMFR1_EL1); - info->reg_id_mmfr2 = read_cpuid(SYS_ID_MMFR2_EL1); - info->reg_id_mmfr3 = read_cpuid(SYS_ID_MMFR3_EL1); - info->reg_id_pfr0 = read_cpuid(SYS_ID_PFR0_EL1); - info->reg_id_pfr1 = read_cpuid(SYS_ID_PFR1_EL1); - - info->reg_mvfr0 = read_cpuid(SYS_MVFR0_EL1); - info->reg_mvfr1 = read_cpuid(SYS_MVFR1_EL1); - info->reg_mvfr2 = read_cpuid(SYS_MVFR2_EL1); + info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); + info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); + info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); + + info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); + info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); + info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); + info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); + info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1); + info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1); + info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1); + info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1); + info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1); + info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1); + info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); + info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); + info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); + + info->reg_mvfr0 = read_cpuid(MVFR0_EL1); + info->reg_mvfr1 = read_cpuid(MVFR1_EL1); + info->reg_mvfr2 = read_cpuid(MVFR2_EL1); cpuinfo_detect_icache_policy(info); diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index f82036e02485..a773db92908b 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S @@ -61,7 +61,7 @@ ENTRY(entry) */ mov x20, x0 // DTB address ldr x0, [sp, #16] // relocated _text address - movz x21, #:abs_g0:stext_offset + ldr x21, =stext_offset add x21, x0, x21 /* diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index b67e70c34888..e3131b39fbf2 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -27,7 +27,6 @@ #include <asm/cpufeature.h> #include <asm/errno.h> #include <asm/esr.h> -#include <asm/irq.h> #include <asm/thread_info.h> #include <asm/unistd.h> @@ -89,12 +88,9 @@ .if \el == 0 mrs x21, sp_el0 - mov tsk, sp - and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear, + get_thread_info tsk // Ensure MDSCR_EL1.SS is clear, ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug disable_step_tsk x19, x20 // exceptions when scheduling. - - mov x29, xzr // fp pointed to user-space .else add x21, sp, #S_FRAME_SIZE .endif @@ -112,13 +108,6 @@ .endif /* - * Set sp_el0 to current thread_info. - */ - .if \el == 0 - msr sp_el0, tsk - .endif - - /* * Registers that may be useful after this macro is invoked: * * x21 - aborted SP @@ -175,44 +164,8 @@ alternative_endif .endm .macro get_thread_info, rd - mrs \rd, sp_el0 - .endm - - .macro irq_stack_entry - mov x19, sp // preserve the original sp - - /* - * Compare sp with the current thread_info, if the top - * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and - * should switch to the irq stack. - */ - and x25, x19, #~(THREAD_SIZE - 1) - cmp x25, tsk - b.ne 9998f - - this_cpu_ptr irq_stack, x25, x26 - mov x26, #IRQ_STACK_START_SP - add x26, x25, x26 - - /* switch to the irq stack */ - mov sp, x26 - - /* - * Add a dummy stack frame, this non-standard format is fixed up - * by unwind_frame() - */ - stp x29, x19, [sp, #-16]! - mov x29, sp - -9998: - .endm - - /* - * x19 should be preserved between irq_stack_entry and - * irq_stack_exit. - */ - .macro irq_stack_exit - mov sp, x19 + mov \rd, sp + and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack .endm /* @@ -230,11 +183,10 @@ tsk .req x28 // current thread_info * Interrupt handling. */ .macro irq_handler - ldr_l x1, handle_arch_irq + adrp x1, handle_arch_irq + ldr x1, [x1, #:lo12:handle_arch_irq] mov x0, sp - irq_stack_entry blr x1 - irq_stack_exit .endm .text @@ -406,10 +358,10 @@ el1_irq: bl trace_hardirqs_off #endif - get_thread_info tsk irq_handler #ifdef CONFIG_PREEMPT + get_thread_info tsk ldr w24, [tsk, #TI_PREEMPT] // get preempt count cbnz w24, 1f // preempt count != 0 ldr x0, [tsk, #TI_FLAGS] // get flags @@ -674,8 +626,6 @@ ENTRY(cpu_switch_to) mov v15.16b, v15.16b #endif mov sp, x9 - and x9, x9, #~(THREAD_SIZE - 1) - msr sp_el0, x9 ret ENDPROC(cpu_switch_to) @@ -703,14 +653,14 @@ ret_fast_syscall_trace: work_pending: tbnz x1, #TIF_NEED_RESCHED, work_resched /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */ + ldr x2, [sp, #S_PSTATE] mov x0, sp // 'regs' + tst x2, #PSR_MODE_MASK // user mode regs? + b.ne no_work_pending // returning to kernel enable_irq // enable interrupts for do_notify_resume() bl do_notify_resume b ret_to_user work_resched: -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off // the IRQs are off here, inform the tracing code -#endif bl schedule /* @@ -722,6 +672,7 @@ ret_to_user: and x2, x1, #_TIF_WORK_MASK cbnz x2, work_pending enable_step_tsk x1, x2 +no_work_pending: kernel_exit 0 ENDPROC(ret_to_user) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index acc1afd5c749..4c46c54a3ad7 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -289,7 +289,7 @@ static struct notifier_block fpsimd_cpu_pm_notifier_block = { .notifier_call = fpsimd_cpu_pm_notifier, }; -static void __init fpsimd_pm_init(void) +static void fpsimd_pm_init(void) { cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block); } diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index ebecf9aa33d1..c851be795080 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -29,11 +29,12 @@ static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, /* * Note: - * We are paranoid about modifying text, as if a bug were to happen, it - * could cause us to read or write to someplace that could cause harm. - * Carefully read and modify the code with aarch64_insn_*() which uses - * probe_kernel_*(), and make sure what we read is what we expected it - * to be before modifying it. + * Due to modules and __init, code can disappear and change, + * we need to protect against faulting as well as code changing. + * We do this by aarch64_insn_*() which use the probe_kernel_*(). + * + * No lock is held here because all the modifications are run + * through stop_machine(). */ if (validate) { if (aarch64_insn_read((void *)pc, &replaced)) @@ -92,11 +93,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, return ftrace_modify_code(pc, old, new, true); } -void arch_ftrace_update_code(int command) -{ - ftrace_modify_all_code(command); -} - int __init ftrace_dyn_arch_init(void) { return 0; @@ -129,20 +125,23 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, * on other archs. It's unlikely on AArch64. */ old = *parent; + *parent = return_hooker; trace.func = self_addr; trace.depth = current->curr_ret_stack + 1; /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) + if (!ftrace_graph_entry(&trace)) { + *parent = old; return; + } err = ftrace_push_return_trace(old, self_addr, &trace.depth, frame_pointer); - if (err == -EBUSY) + if (err == -EBUSY) { + *parent = old; return; - else - *parent = return_hooker; + } } #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a88a15447c3b..b685257926f0 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -29,7 +29,6 @@ #include <asm/asm-offsets.h> #include <asm/cache.h> #include <asm/cputype.h> -#include <asm/elf.h> #include <asm/kernel-pgtable.h> #include <asm/memory.h> #include <asm/pgtable-hwdef.h> @@ -68,11 +67,12 @@ * in the entry routines. */ __HEAD -_head: + /* * DO NOT MODIFY. Image header expected by Linux boot-loaders. */ #ifdef CONFIG_EFI +efi_head: /* * This add instruction has no meaningful effect except that * its opcode forms the magic "MZ" signature required by UEFI. @@ -83,9 +83,9 @@ _head: b stext // branch to kernel start, magic .long 0 // reserved #endif - le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian - le64sym _kernel_size_le // Effective size of kernel image, little-endian - le64sym _kernel_flags_le // Informative flags, little-endian + .quad _kernel_offset_le // Image load offset from start of RAM, little-endian + .quad _kernel_size_le // Effective size of kernel image, little-endian + .quad _kernel_flags_le // Informative flags, little-endian .quad 0 // reserved .quad 0 // reserved .quad 0 // reserved @@ -94,14 +94,14 @@ _head: .byte 0x4d .byte 0x64 #ifdef CONFIG_EFI - .long pe_header - _head // Offset to the PE header. + .long pe_header - efi_head // Offset to the PE header. #else .word 0 // reserved #endif #ifdef CONFIG_EFI .globl __efistub_stext_offset - .set __efistub_stext_offset, stext - _head + .set __efistub_stext_offset, stext - efi_head .align 3 pe_header: .ascii "PE" @@ -124,7 +124,7 @@ optional_header: .long _end - stext // SizeOfCode .long 0 // SizeOfInitializedData .long 0 // SizeOfUninitializedData - .long __efistub_entry - _head // AddressOfEntryPoint + .long __efistub_entry - efi_head // AddressOfEntryPoint .long __efistub_stext_offset // BaseOfCode extra_header_fields: @@ -139,7 +139,7 @@ extra_header_fields: .short 0 // MinorSubsystemVersion .long 0 // Win32VersionValue - .long _end - _head // SizeOfImage + .long _end - efi_head // SizeOfImage // Everything before the kernel image is considered part of the header .long __efistub_stext_offset // SizeOfHeaders @@ -210,7 +210,6 @@ section_table: ENTRY(stext) bl preserve_boot_args bl el2_setup // Drop to EL1, w20=cpu_boot_mode - mov x23, xzr // KASLR offset, defaults to 0 adrp x24, __PHYS_OFFSET bl set_cpu_boot_mode_flag bl __create_page_tables // x25=TTBR0, x26=TTBR1 @@ -220,13 +219,11 @@ ENTRY(stext) * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ - ldr x27, 0f // address to jump to after + ldr x27, =__mmap_switched // address to jump to after // MMU has been enabled adr_l lr, __enable_mmu // return (PIC) address b __cpu_setup // initialise processor ENDPROC(stext) - .align 3 -0: .quad __mmap_switched - (_head - TEXT_OFFSET) + KIMAGE_VADDR /* * Preserve the arguments passed by the bootloader in x0 .. x3 @@ -314,7 +311,7 @@ ENDPROC(preserve_boot_args) __create_page_tables: adrp x25, idmap_pg_dir adrp x26, swapper_pg_dir - mov x28, lr + mov x27, lr /* * Invalidate the idmap and swapper page tables to avoid potential @@ -392,11 +389,9 @@ __create_page_tables: * Map the kernel image (starting with PHYS_OFFSET). */ mov x0, x26 // swapper_pg_dir - ldr x5, =KIMAGE_VADDR - add x5, x5, x23 // add KASLR displacement + mov x5, #PAGE_OFFSET create_pgd_entry x0, x5, x3, x6 - ldr w6, kernel_img_size - add x6, x6, x5 + ldr x6, =KERNEL_END // __va(KERNEL_END) mov x3, x24 // phys offset create_block_map x0, x7, x3, x5, x6 @@ -410,11 +405,9 @@ __create_page_tables: dmb sy bl __inval_cache_range - ret x28 + mov lr, x27 + ret ENDPROC(__create_page_tables) - -kernel_img_size: - .long _end - (_head - TEXT_OFFSET) .ltorg /* @@ -422,81 +415,21 @@ kernel_img_size: */ .set initial_sp, init_thread_union + THREAD_START_SP __mmap_switched: - mov x28, lr // preserve LR - adr_l x8, vectors // load VBAR_EL1 with virtual - msr vbar_el1, x8 // vector table address - isb + adr_l x6, __bss_start + adr_l x7, __bss_stop - // Clear BSS - adr_l x0, __bss_start - mov x1, xzr - adr_l x2, __bss_stop - sub x2, x2, x0 - bl __pi_memset - dsb ishst // Make zero page visible to PTW - -#ifdef CONFIG_RELOCATABLE - - /* - * Iterate over each entry in the relocation table, and apply the - * relocations in place. - */ - adr_l x8, __dynsym_start // start of symbol table - adr_l x9, __reloc_start // start of reloc table - adr_l x10, __reloc_end // end of reloc table - -0: cmp x9, x10 +1: cmp x6, x7 b.hs 2f - ldp x11, x12, [x9], #24 - ldr x13, [x9, #-8] - cmp w12, #R_AARCH64_RELATIVE - b.ne 1f - add x13, x13, x23 // relocate - str x13, [x11, x23] - b 0b - -1: cmp w12, #R_AARCH64_ABS64 - b.ne 0b - add x12, x12, x12, lsl #1 // symtab offset: 24x top word - add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word - ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx - ldr x15, [x12, #8] // Elf64_Sym::st_value - cmp w14, #-0xf // SHN_ABS (0xfff1) ? - add x14, x15, x23 // relocate - csel x15, x14, x15, ne - add x15, x13, x15 - str x15, [x11, x23] - b 0b - -2: adr_l x8, kimage_vaddr // make relocated kimage_vaddr - dc cvac, x8 // value visible to secondaries - dsb sy // with MMU off -#endif - + str xzr, [x6], #8 // Clear BSS + b 1b +2: adr_l sp, initial_sp, x4 - mov x4, sp - and x4, x4, #~(THREAD_SIZE - 1) - msr sp_el0, x4 // Save thread_info str_l x21, __fdt_pointer, x5 // Save FDT pointer - - ldr_l x4, kimage_vaddr // Save the offset between - sub x4, x4, x24 // the kernel virtual and - str_l x4, kimage_voffset, x5 // physical mappings - + str_l x24, memstart_addr, x6 // Save PHYS_OFFSET mov x29, #0 #ifdef CONFIG_KASAN bl kasan_early_init #endif -#ifdef CONFIG_RANDOMIZE_BASE - cbnz x23, 0f // already running randomized? - mov x0, x21 // pass FDT address in x0 - bl kaslr_early_init // parse FDT for KASLR options - cbz x0, 0f // KASLR disabled? just proceed - mov x23, x0 // record KASLR offset - ret x28 // we must enable KASLR, return - // to __enable_mmu() -0: -#endif b start_kernel ENDPROC(__mmap_switched) @@ -505,10 +438,6 @@ ENDPROC(__mmap_switched) * hotplug and needs to have the same protections as the text region */ .section ".text","ax" - -ENTRY(kimage_vaddr) - .quad _text - TEXT_OFFSET - /* * If we're fortunate enough to boot at EL2, ensure that the world is * sane before dropping to EL1. @@ -674,22 +603,14 @@ ENTRY(secondary_startup) adrp x26, swapper_pg_dir bl __cpu_setup // initialise processor - ldr x8, kimage_vaddr - ldr w9, 0f - sub x27, x8, w9, sxtw // address to jump to after enabling the MMU + ldr x21, =secondary_data + ldr x27, =__secondary_switched // address to jump to after enabling the MMU b __enable_mmu ENDPROC(secondary_startup) -0: .long (_text - TEXT_OFFSET) - __secondary_switched ENTRY(__secondary_switched) - adr_l x5, vectors - msr vbar_el1, x5 - isb - - ldr_l x0, secondary_data // get secondary_data.stack + ldr x0, [x21] // get secondary_data.stack mov sp, x0 - and x0, x0, #~(THREAD_SIZE - 1) - msr sp_el0, x0 // save thread_info mov x29, #0 b secondary_start_kernel ENDPROC(__secondary_switched) @@ -707,11 +628,12 @@ ENDPROC(__secondary_switched) */ .section ".idmap.text", "ax" __enable_mmu: - mrs x18, sctlr_el1 // preserve old SCTLR_EL1 value mrs x1, ID_AA64MMFR0_EL1 ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED b.ne __no_granule_support + ldr x5, =vectors + msr vbar_el1, x5 msr ttbr0_el1, x25 // load TTBR0 msr ttbr1_el1, x26 // load TTBR1 isb @@ -725,26 +647,6 @@ __enable_mmu: ic iallu dsb nsh isb -#ifdef CONFIG_RANDOMIZE_BASE - mov x19, x0 // preserve new SCTLR_EL1 value - blr x27 - - /* - * If we return here, we have a KASLR displacement in x23 which we need - * to take into account by discarding the current kernel mapping and - * creating a new one. - */ - msr sctlr_el1, x18 // disable the MMU - isb - bl __create_page_tables // recreate kernel mapping - - msr sctlr_el1, x19 // re-enable the MMU - isb - ic iallu // flush instructions fetched - dsb nsh // via old mapping - isb - add x27, x27, x23 // relocated __mmap_switched -#endif br x27 ENDPROC(__enable_mmu) diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index db1bf57948f1..bc2abb8b1599 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h @@ -26,40 +26,31 @@ * There aren't any ELF relocations we can use to endian-swap values known only * at link time (e.g. the subtraction of two symbol addresses), so we must get * the linker to endian-swap certain values before emitting them. - * - * Note that, in order for this to work when building the ELF64 PIE executable - * (for KASLR), these values should not be referenced via R_AARCH64_ABS64 - * relocations, since these are fixed up at runtime rather than at build time - * when PIE is in effect. So we need to split them up in 32-bit high and low - * words. */ #ifdef CONFIG_CPU_BIG_ENDIAN -#define DATA_LE32(data) \ - ((((data) & 0x000000ff) << 24) | \ - (((data) & 0x0000ff00) << 8) | \ - (((data) & 0x00ff0000) >> 8) | \ - (((data) & 0xff000000) >> 24)) +#define DATA_LE64(data) \ + ((((data) & 0x00000000000000ff) << 56) | \ + (((data) & 0x000000000000ff00) << 40) | \ + (((data) & 0x0000000000ff0000) << 24) | \ + (((data) & 0x00000000ff000000) << 8) | \ + (((data) & 0x000000ff00000000) >> 8) | \ + (((data) & 0x0000ff0000000000) >> 24) | \ + (((data) & 0x00ff000000000000) >> 40) | \ + (((data) & 0xff00000000000000) >> 56)) #else -#define DATA_LE32(data) ((data) & 0xffffffff) +#define DATA_LE64(data) ((data) & 0xffffffffffffffff) #endif -#define DEFINE_IMAGE_LE64(sym, data) \ - sym##_lo32 = DATA_LE32((data) & 0xffffffff); \ - sym##_hi32 = DATA_LE32((data) >> 32) - #ifdef CONFIG_CPU_BIG_ENDIAN -#define __HEAD_FLAG_BE 1 +#define __HEAD_FLAG_BE 1 #else -#define __HEAD_FLAG_BE 0 +#define __HEAD_FLAG_BE 0 #endif -#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2) - -#define __HEAD_FLAG_PHYS_BASE 1 +#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2) -#define __HEAD_FLAGS ((__HEAD_FLAG_BE << 0) | \ - (__HEAD_FLAG_PAGE_SIZE << 1) | \ - (__HEAD_FLAG_PHYS_BASE << 3)) +#define __HEAD_FLAGS ((__HEAD_FLAG_BE << 0) | \ + (__HEAD_FLAG_PAGE_SIZE << 1)) /* * These will output as part of the Image header, which should be little-endian @@ -67,23 +58,13 @@ * endian swapped in head.S, all are done here for consistency. */ #define HEAD_SYMBOLS \ - DEFINE_IMAGE_LE64(_kernel_size_le, _end - _text); \ - DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \ - DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS); + _kernel_size_le = DATA_LE64(_end - _text); \ + _kernel_offset_le = DATA_LE64(TEXT_OFFSET); \ + _kernel_flags_le = DATA_LE64(__HEAD_FLAGS); #ifdef CONFIG_EFI /* - * Prevent the symbol aliases below from being emitted into the kallsyms - * table, by forcing them to be absolute symbols (which are conveniently - * ignored by scripts/kallsyms) rather than section relative symbols. - * The distinction is only relevant for partial linking, and only for symbols - * that are defined within a section declaration (which is not the case for - * the definitions below) so the resulting values will be identical. - */ -#define KALLSYMS_HIDE(sym) ABSOLUTE(sym) - -/* * The EFI stub has its own symbol namespace prefixed by __efistub_, to * isolate it from the kernel proper. The following symbols are legally * accessed by the stub, so provide some aliases to make them accessible. @@ -92,25 +73,25 @@ * linked at. The routines below are all implemented in assembler in a * position independent manner */ -__efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp); -__efistub_memchr = KALLSYMS_HIDE(__pi_memchr); -__efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy); -__efistub_memmove = KALLSYMS_HIDE(__pi_memmove); -__efistub_memset = KALLSYMS_HIDE(__pi_memset); -__efistub_strlen = KALLSYMS_HIDE(__pi_strlen); -__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp); -__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp); -__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area); +__efistub_memcmp = __pi_memcmp; +__efistub_memchr = __pi_memchr; +__efistub_memcpy = __pi_memcpy; +__efistub_memmove = __pi_memmove; +__efistub_memset = __pi_memset; +__efistub_strlen = __pi_strlen; +__efistub_strcmp = __pi_strcmp; +__efistub_strncmp = __pi_strncmp; +__efistub___flush_dcache_area = __pi___flush_dcache_area; #ifdef CONFIG_KASAN -__efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy); -__efistub___memmove = KALLSYMS_HIDE(__pi_memmove); -__efistub___memset = KALLSYMS_HIDE(__pi_memset); +__efistub___memcpy = __pi_memcpy; +__efistub___memmove = __pi_memmove; +__efistub___memset = __pi_memset; #endif -__efistub__text = KALLSYMS_HIDE(_text); -__efistub__end = KALLSYMS_HIDE(_end); -__efistub__edata = KALLSYMS_HIDE(_edata); +__efistub__text = _text; +__efistub__end = _end; +__efistub__edata = _edata; #endif diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 2386b26c0712..9f17ec071ee0 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -30,9 +30,6 @@ unsigned long irq_err_count; -/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */ -DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16); - int arch_show_interrupts(struct seq_file *p, int prec) { show_ipi_list(p, prec); diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c deleted file mode 100644 index 582983920054..000000000000 --- a/arch/arm64/kernel/kaslr.c +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/crc32.h> -#include <linux/init.h> -#include <linux/libfdt.h> -#include <linux/mm_types.h> -#include <linux/sched.h> -#include <linux/types.h> - -#include <asm/fixmap.h> -#include <asm/kernel-pgtable.h> -#include <asm/memory.h> -#include <asm/mmu.h> -#include <asm/pgtable.h> -#include <asm/sections.h> - -u64 __read_mostly module_alloc_base; -u16 __initdata memstart_offset_seed; - -static __init u64 get_kaslr_seed(void *fdt) -{ - int node, len; - u64 *prop; - u64 ret; - - node = fdt_path_offset(fdt, "/chosen"); - if (node < 0) - return 0; - - prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len); - if (!prop || len != sizeof(u64)) - return 0; - - ret = fdt64_to_cpu(*prop); - *prop = 0; - return ret; -} - -static __init const u8 *get_cmdline(void *fdt) -{ - static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; - - if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) { - int node; - const u8 *prop; - - node = fdt_path_offset(fdt, "/chosen"); - if (node < 0) - goto out; - - prop = fdt_getprop(fdt, node, "bootargs", NULL); - if (!prop) - goto out; - return prop; - } -out: - return default_cmdline; -} - -extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, - pgprot_t prot); - -/* - * This routine will be executed with the kernel mapped at its default virtual - * address, and if it returns successfully, the kernel will be remapped, and - * start_kernel() will be executed from a randomized virtual offset. The - * relocation will result in all absolute references (e.g., static variables - * containing function pointers) to be reinitialized, and zero-initialized - * .bss variables will be reset to 0. - */ -u64 __init kaslr_early_init(u64 dt_phys) -{ - void *fdt; - u64 seed, offset, mask, module_range; - const u8 *cmdline, *str; - int size; - - /* - * Set a reasonable default for module_alloc_base in case - * we end up running with module randomization disabled. - */ - module_alloc_base = (u64)_etext - MODULES_VSIZE; - - /* - * Try to map the FDT early. If this fails, we simply bail, - * and proceed with KASLR disabled. We will make another - * attempt at mapping the FDT in setup_machine() - */ - early_fixmap_init(); - fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); - if (!fdt) - return 0; - - /* - * Retrieve (and wipe) the seed from the FDT - */ - seed = get_kaslr_seed(fdt); - if (!seed) - return 0; - - /* - * Check if 'nokaslr' appears on the command line, and - * return 0 if that is the case. - */ - cmdline = get_cmdline(fdt); - str = strstr(cmdline, "nokaslr"); - if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) - return 0; - - /* - * OK, so we are proceeding with KASLR enabled. Calculate a suitable - * kernel image offset from the seed. Let's place the kernel in the - * lower half of the VMALLOC area (VA_BITS - 2). - * Even if we could randomize at page granularity for 16k and 64k pages, - * let's always round to 2 MB so we don't interfere with the ability to - * map using contiguous PTEs - */ - mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1); - offset = seed & mask; - - /* use the top 16 bits to randomize the linear region */ - memstart_offset_seed = seed >> 48; - - /* - * The kernel Image should not extend across a 1GB/32MB/512MB alignment - * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this - * happens, increase the KASLR offset by the size of the kernel image. - */ - if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) != - (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT)) - offset = (offset + (u64)(_end - _text)) & mask; - - if (IS_ENABLED(CONFIG_KASAN)) - /* - * KASAN does not expect the module region to intersect the - * vmalloc region, since shadow memory is allocated for each - * module at load time, whereas the vmalloc region is shadowed - * by KASAN zero pages. So keep modules out of the vmalloc - * region if KASAN is enabled. - */ - return offset; - - if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { - /* - * Randomize the module region independently from the core - * kernel. This prevents modules from leaking any information - * about the address of the kernel itself, but results in - * branches between modules and the core kernel that are - * resolved via PLTs. (Branches between modules will be - * resolved normally.) - */ - module_range = VMALLOC_END - VMALLOC_START - MODULES_VSIZE; - module_alloc_base = VMALLOC_START; - } else { - /* - * Randomize the module region by setting module_alloc_base to - * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE, - * _stext) . This guarantees that the resulting region still - * covers [_stext, _etext], and that all relative branches can - * be resolved without veneers. - */ - module_range = MODULES_VSIZE - (u64)(_etext - _stext); - module_alloc_base = (u64)_etext + offset - MODULES_VSIZE; - } - - /* use the lower 21 bits to randomize the base of the module region */ - module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; - module_alloc_base &= PAGE_MASK; - - return offset; -} diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c deleted file mode 100644 index 1ce90d8450ae..000000000000 --- a/arch/arm64/kernel/module-plts.c +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright (C) 2014-2016 Linaro Ltd. <ard.biesheuvel@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/elf.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/sort.h> - -struct plt_entry { - /* - * A program that conforms to the AArch64 Procedure Call Standard - * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or - * IP1 (x17) may be inserted at any branch instruction that is - * exposed to a relocation that supports long branches. Since that - * is exactly what we are dealing with here, we are free to use x16 - * as a scratch register in the PLT veneers. - */ - __le32 mov0; /* movn x16, #0x.... */ - __le32 mov1; /* movk x16, #0x...., lsl #16 */ - __le32 mov2; /* movk x16, #0x...., lsl #32 */ - __le32 br; /* br x16 */ -}; - -u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela, - Elf64_Sym *sym) -{ - struct plt_entry *plt = (struct plt_entry *)mod->arch.plt->sh_addr; - int i = mod->arch.plt_num_entries; - u64 val = sym->st_value + rela->r_addend; - - /* - * We only emit PLT entries against undefined (SHN_UNDEF) symbols, - * which are listed in the ELF symtab section, but without a type - * or a size. - * So, similar to how the module loader uses the Elf64_Sym::st_value - * field to store the resolved addresses of undefined symbols, let's - * borrow the Elf64_Sym::st_size field (whose value is never used by - * the module loader, even for symbols that are defined) to record - * the address of a symbol's associated PLT entry as we emit it for a - * zero addend relocation (which is the only kind we have to deal with - * in practice). This allows us to find duplicates without having to - * go through the table every time. - */ - if (rela->r_addend == 0 && sym->st_size != 0) { - BUG_ON(sym->st_size < (u64)plt || sym->st_size >= (u64)&plt[i]); - return sym->st_size; - } - - mod->arch.plt_num_entries++; - BUG_ON(mod->arch.plt_num_entries > mod->arch.plt_max_entries); - - /* - * MOVK/MOVN/MOVZ opcode: - * +--------+------------+--------+-----------+-------------+---------+ - * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] | - * +--------+------------+--------+-----------+-------------+---------+ - * - * Rd := 0x10 (x16) - * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32) - * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ) - * sf := 1 (64-bit variant) - */ - plt[i] = (struct plt_entry){ - cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5), - cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5), - cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5), - cpu_to_le32(0xd61f0200) - }; - - if (rela->r_addend == 0) - sym->st_size = (u64)&plt[i]; - - return (u64)&plt[i]; -} - -#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b)) - -static int cmp_rela(const void *a, const void *b) -{ - const Elf64_Rela *x = a, *y = b; - int i; - - /* sort by type, symbol index and addend */ - i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info)); - if (i == 0) - i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info)); - if (i == 0) - i = cmp_3way(x->r_addend, y->r_addend); - return i; -} - -static bool duplicate_rel(const Elf64_Rela *rela, int num) -{ - /* - * Entries are sorted by type, symbol index and addend. That means - * that, if a duplicate entry exists, it must be in the preceding - * slot. - */ - return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0; -} - -static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num) -{ - unsigned int ret = 0; - Elf64_Sym *s; - int i; - - for (i = 0; i < num; i++) { - switch (ELF64_R_TYPE(rela[i].r_info)) { - case R_AARCH64_JUMP26: - case R_AARCH64_CALL26: - /* - * We only have to consider branch targets that resolve - * to undefined symbols. This is not simply a heuristic, - * it is a fundamental limitation, since the PLT itself - * is part of the module, and needs to be within 128 MB - * as well, so modules can never grow beyond that limit. - */ - s = syms + ELF64_R_SYM(rela[i].r_info); - if (s->st_shndx != SHN_UNDEF) - break; - - /* - * Jump relocations with non-zero addends against - * undefined symbols are supported by the ELF spec, but - * do not occur in practice (e.g., 'jump n bytes past - * the entry point of undefined function symbol f'). - * So we need to support them, but there is no need to - * take them into consideration when trying to optimize - * this code. So let's only check for duplicates when - * the addend is zero: this allows us to record the PLT - * entry address in the symbol table itself, rather than - * having to search the list for duplicates each time we - * emit one. - */ - if (rela[i].r_addend != 0 || !duplicate_rel(rela, i)) - ret++; - break; - } - } - return ret; -} - -int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, - char *secstrings, struct module *mod) -{ - unsigned long plt_max_entries = 0; - Elf64_Sym *syms = NULL; - int i; - - /* - * Find the empty .plt section so we can expand it to store the PLT - * entries. Record the symtab address as well. - */ - for (i = 0; i < ehdr->e_shnum; i++) { - if (strcmp(".plt", secstrings + sechdrs[i].sh_name) == 0) - mod->arch.plt = sechdrs + i; - else if (sechdrs[i].sh_type == SHT_SYMTAB) - syms = (Elf64_Sym *)sechdrs[i].sh_addr; - } - - if (!mod->arch.plt) { - pr_err("%s: module PLT section missing\n", mod->name); - return -ENOEXEC; - } - if (!syms) { - pr_err("%s: module symtab section missing\n", mod->name); - return -ENOEXEC; - } - - for (i = 0; i < ehdr->e_shnum; i++) { - Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset; - int numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela); - Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info; - - if (sechdrs[i].sh_type != SHT_RELA) - continue; - - /* ignore relocations that operate on non-exec sections */ - if (!(dstsec->sh_flags & SHF_EXECINSTR)) - continue; - - /* sort by type, symbol index and addend */ - sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL); - - plt_max_entries += count_plts(syms, rels, numrels); - } - - mod->arch.plt->sh_type = SHT_NOBITS; - mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; - mod->arch.plt->sh_addralign = L1_CACHE_BYTES; - mod->arch.plt->sh_size = plt_max_entries * sizeof(struct plt_entry); - mod->arch.plt_num_entries = 0; - mod->arch.plt_max_entries = plt_max_entries; - return 0; -} diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 7f316982ce00..f4bc779e62e8 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -30,30 +30,17 @@ #include <asm/insn.h> #include <asm/sections.h> +#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX +#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16 + void *module_alloc(unsigned long size) { void *p; - p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, - module_alloc_base + MODULES_VSIZE, + p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, __builtin_return_address(0)); - if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && - !IS_ENABLED(CONFIG_KASAN)) - /* - * KASAN can only deal with module allocations being served - * from the reserved module region, since the remainder of - * the vmalloc region is already backed by zero shadow pages, - * and punching holes into it is non-trivial. Since the module - * region is not randomized when KASAN is enabled, it is even - * less likely that the module region gets exhausted, so we - * can simply omit this fallback in that case. - */ - p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, - VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, - NUMA_NO_NODE, __builtin_return_address(0)); - if (p && (kasan_module_alloc(p, size) < 0)) { vfree(p); return NULL; @@ -88,18 +75,15 @@ static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val) static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) { + u64 imm_mask = (1 << len) - 1; s64 sval = do_reloc(op, place, val); switch (len) { case 16: *(s16 *)place = sval; - if (sval < S16_MIN || sval > U16_MAX) - return -ERANGE; break; case 32: *(s32 *)place = sval; - if (sval < S32_MIN || sval > U32_MAX) - return -ERANGE; break; case 64: *(s64 *)place = sval; @@ -108,23 +92,34 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) pr_err("Invalid length (%d) for data relocation\n", len); return 0; } + + /* + * Extract the upper value bits (including the sign bit) and + * shift them to bit 0. + */ + sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); + + /* + * Overflow has occurred if the value is not representable in + * len bits (i.e the bottom len bits are not sign-extended and + * the top bits are not all zero). + */ + if ((u64)(sval + 1) > 2) + return -ERANGE; + return 0; } -enum aarch64_insn_movw_imm_type { - AARCH64_INSN_IMM_MOVNZ, - AARCH64_INSN_IMM_MOVKZ, -}; - static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, - int lsb, enum aarch64_insn_movw_imm_type imm_type) + int lsb, enum aarch64_insn_imm_type imm_type) { - u64 imm; + u64 imm, limit = 0; s64 sval; u32 insn = le32_to_cpu(*(u32 *)place); sval = do_reloc(op, place, val); - imm = sval >> lsb; + sval >>= lsb; + imm = sval & 0xffff; if (imm_type == AARCH64_INSN_IMM_MOVNZ) { /* @@ -133,7 +128,7 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, * immediate is less than zero. */ insn &= ~(3 << 29); - if (sval >= 0) { + if ((s64)imm >= 0) { /* >=0: Set the instruction to MOVZ (opcode 10b). */ insn |= 2 << 29; } else { @@ -145,13 +140,29 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, */ imm = ~imm; } + imm_type = AARCH64_INSN_IMM_MOVK; } /* Update the instruction with the new encoding. */ - insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); + insn = aarch64_insn_encode_immediate(imm_type, insn, imm); *(u32 *)place = cpu_to_le32(insn); - if (imm > U16_MAX) + /* Shift out the immediate field. */ + sval >>= 16; + + /* + * For unsigned immediates, the overflow check is straightforward. + * For signed immediates, the sign bit is actually the bit past the + * most significant bit of the field. + * The AARCH64_INSN_IMM_16 immediate type is unsigned. + */ + if (imm_type != AARCH64_INSN_IMM_16) { + sval++; + limit++; + } + + /* Check the upper bits depending on the sign of the immediate. */ + if ((u64)sval > limit) return -ERANGE; return 0; @@ -256,25 +267,25 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, overflow_check = false; case R_AARCH64_MOVW_UABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, - AARCH64_INSN_IMM_MOVKZ); + AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_UABS_G1_NC: overflow_check = false; case R_AARCH64_MOVW_UABS_G1: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, - AARCH64_INSN_IMM_MOVKZ); + AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_UABS_G2_NC: overflow_check = false; case R_AARCH64_MOVW_UABS_G2: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, - AARCH64_INSN_IMM_MOVKZ); + AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_UABS_G3: /* We're using the top bits so we can't overflow. */ overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, - AARCH64_INSN_IMM_MOVKZ); + AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_SABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, @@ -291,7 +302,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, case R_AARCH64_MOVW_PREL_G0_NC: overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, - AARCH64_INSN_IMM_MOVKZ); + AARCH64_INSN_IMM_MOVK); break; case R_AARCH64_MOVW_PREL_G0: ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, @@ -300,7 +311,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, case R_AARCH64_MOVW_PREL_G1_NC: overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, - AARCH64_INSN_IMM_MOVKZ); + AARCH64_INSN_IMM_MOVK); break; case R_AARCH64_MOVW_PREL_G1: ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, @@ -309,7 +320,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, case R_AARCH64_MOVW_PREL_G2_NC: overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, - AARCH64_INSN_IMM_MOVKZ); + AARCH64_INSN_IMM_MOVK); break; case R_AARCH64_MOVW_PREL_G2: ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, @@ -377,13 +388,6 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, case R_AARCH64_CALL26: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, AARCH64_INSN_IMM_26); - - if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && - ovf == -ERANGE) { - val = module_emit_plt_entry(me, &rel[i], sym); - ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, - 26, AARCH64_INSN_IMM_26); - } break; default: diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds deleted file mode 100644 index 8949f6c6f729..000000000000 --- a/arch/arm64/kernel/module.lds +++ /dev/null @@ -1,3 +0,0 @@ -SECTIONS { - .plt (NOLOAD) : { BYTE(0) } -} diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index ff4665462a02..3aa74830cc69 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c @@ -164,11 +164,8 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, frame.fp = regs->regs[29]; frame.sp = regs->sp; frame.pc = regs->pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = current->curr_ret_stack; -#endif - walk_stackframe(current, &frame, callchain_trace, entry); + walk_stackframe(&frame, callchain_trace, entry); } unsigned long perf_instruction_pointer(struct pt_regs *regs) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 496e2e33bbec..129fb3f8c322 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -46,7 +46,6 @@ #include <linux/notifier.h> #include <trace/events/power.h> -#include <asm/alternative.h> #include <asm/compat.h> #include <asm/cacheflush.h> #include <asm/fpsimd.h> @@ -351,9 +350,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, } else { memset(childregs, 0, sizeof(struct pt_regs)); childregs->pstate = PSR_MODE_EL1h; - if (IS_ENABLED(CONFIG_ARM64_UAO) && - cpus_have_cap(ARM64_HAS_UAO)) - childregs->pstate |= PSR_UAO_BIT; p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x20 = stk_sz; } @@ -382,17 +378,6 @@ static void tls_thread_switch(struct task_struct *next) : : "r" (tpidr), "r" (tpidrro)); } -/* Restore the UAO state depending on next's addr_limit */ -static void uao_thread_switch(struct task_struct *next) -{ - if (IS_ENABLED(CONFIG_ARM64_UAO)) { - if (task_thread_info(next)->addr_limit == KERNEL_DS) - asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); - else - asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO)); - } -} - /* * Thread switching. */ @@ -405,7 +390,6 @@ struct task_struct *__switch_to(struct task_struct *prev, tls_thread_switch(next); hw_breakpoint_thread_switch(next); contextidr_thread_switch(next); - uao_thread_switch(next); /* * Complete any pending TLB or cache maintenance on this CPU in case @@ -430,14 +414,11 @@ unsigned long get_wchan(struct task_struct *p) frame.fp = thread_saved_fp(p); frame.sp = thread_saved_sp(p); frame.pc = thread_saved_pc(p); -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = p->curr_ret_stack; -#endif stack_page = (unsigned long)task_stack_page(p); do { if (frame.sp < stack_page || frame.sp >= stack_page + THREAD_SIZE || - unwind_frame(p, &frame)) + unwind_frame(&frame)) return 0; if (!in_sched_functions(frame.pc)) return frame.pc; diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index 1718706fde83..6c4fd2810ecb 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c @@ -43,11 +43,8 @@ void *return_address(unsigned int level) frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_stack_pointer; frame.pc = (unsigned long)return_address; /* dummy */ -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = current->curr_ret_stack; -#endif - walk_stackframe(current, &frame, save_return_addr, &data); + walk_stackframe(&frame, save_return_addr, &data); if (!data.level) return data.addr; diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index d1e01e6498bb..cd4eb7ee618c 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -63,7 +63,6 @@ #include <asm/memblock.h> #include <asm/efi.h> #include <asm/xen/hypervisor.h> -#include <asm/mmu_context.h> unsigned int boot_reason; EXPORT_SYMBOL(boot_reason); @@ -328,12 +327,6 @@ void __init setup_arch(char **cmdline_p) */ local_async_enable(); - /* - * TTBR0 is only used for the identity mapping at this stage. Make it - * point to zero page to avoid speculatively fetching new entries. - */ - cpu_uninstall_idmap(); - efi_init(); arm64_memblock_init(); @@ -409,32 +402,3 @@ void arch_setup_pdev_archdata(struct platform_device *pdev) pdev->archdata.dma_mask = DMA_BIT_MASK(32); pdev->dev.dma_mask = &pdev->archdata.dma_mask; } - -/* - * Dump out kernel offset information on panic. - */ -static int dump_kernel_offset(struct notifier_block *self, unsigned long v, - void *p) -{ - u64 const kaslr_offset = kimage_vaddr - KIMAGE_VADDR; - - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset > 0) { - pr_emerg("Kernel Offset: 0x%llx from 0x%lx\n", - kaslr_offset, KIMAGE_VADDR); - } else { - pr_emerg("Kernel Offset: disabled\n"); - } - return 0; -} - -static struct notifier_block kernel_offset_notifier = { - .notifier_call = dump_kernel_offset -}; - -static int __init register_kernel_offset_dumper(void) -{ - atomic_notifier_chain_register(&panic_notifier_list, - &kernel_offset_notifier); - return 0; -} -__initcall(register_kernel_offset_dumper); diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index e33fe33876ab..f586f7c875e2 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -173,9 +173,6 @@ ENTRY(cpu_resume) /* load physical address of identity map page table in x1 */ adrp x1, idmap_pg_dir mov sp, x2 - /* save thread_info */ - and x2, x2, #~(THREAD_SIZE - 1) - msr sp_el0, x2 /* * cpu_do_resume expects x0 to contain context physical address * pointer and x1 to contain physical address of 1:1 page tables diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index b2f5631c3785..08e78e47be95 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -154,7 +154,9 @@ asmlinkage void secondary_start_kernel(void) * TTBR0 is only used for the identity mapping at this stage. Make it * point to zero page to avoid speculatively fetching new entries. */ - cpu_uninstall_idmap(); + cpu_set_reserved_ttbr0(); + local_flush_tlb_all(); + cpu_set_default_tcr_t0sz(); preempt_disable(); trace_hardirqs_off(); @@ -457,17 +459,6 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) /* map the logical cpu id to cpu MPIDR */ cpu_logical_map(cpu_count) = hwid; - /* - * Set-up the ACPI parking protocol cpu entries - * while initializing the cpu_logical_map to - * avoid parsing MADT entries multiple times for - * nothing (ie a valid cpu_logical_map entry should - * contain a valid parking protocol data set to - * initialize the cpu if the parking protocol is - * the only available enable method). - */ - acpi_set_mailbox_entry(cpu_count, processor); - cpu_count++; } @@ -710,12 +701,10 @@ void arch_send_call_function_single_ipi(int cpu) smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC); } -#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL void arch_send_wakeup_ipi_mask(const struct cpumask *mask) { - smp_cross_call(mask, IPI_WAKEUP); + smp_cross_call_common(mask, IPI_WAKEUP); } -#endif #ifdef CONFIG_IRQ_WORK void arch_irq_work_raise(void) @@ -866,17 +855,12 @@ void handle_IPI(int ipinr, struct pt_regs *regs) break; #endif - case IPI_CPU_BACKTRACE: - ipi_cpu_backtrace(cpu, regs); + case IPI_WAKEUP: break; -#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL - case IPI_WAKEUP: - WARN_ONCE(!acpi_parking_protocol_valid(cpu), - "CPU%u: Wake-up IPI outside the ACPI parking protocol\n", - cpu); + case IPI_CPU_BACKTRACE: + ipi_cpu_backtrace(cpu, regs); break; -#endif default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 85aea381fbf6..665c7fedc65b 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -18,11 +18,9 @@ #include <linux/kasan.h> #include <linux/kernel.h> #include <linux/export.h> -#include <linux/ftrace.h> #include <linux/sched.h> #include <linux/stacktrace.h> -#include <asm/irq.h> #include <asm/stacktrace.h> /* @@ -38,29 +36,15 @@ * ldp x29, x30, [sp] * add sp, sp, #0x10 */ -int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) +int notrace unwind_frame(struct stackframe *frame) { unsigned long high, low; unsigned long fp = frame->fp; - unsigned long irq_stack_ptr; - - /* - * Switching between stacks is valid when tracing current and in - * non-preemptible context. - */ - if (tsk == current && !preemptible()) - irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id()); - else - irq_stack_ptr = 0; low = frame->sp; - /* irq stacks are not THREAD_SIZE aligned */ - if (on_irq_stack(frame->sp, raw_smp_processor_id())) - high = irq_stack_ptr; - else - high = ALIGN(low, THREAD_SIZE) - 0x20; + high = ALIGN(low, THREAD_SIZE); - if (fp < low || fp > high || fp & 0xf) + if (fp < low || fp > high - 0x18 || fp & 0xf) return -EINVAL; kasan_disable_current(); @@ -69,55 +53,12 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) frame->fp = *(unsigned long *)(fp); frame->pc = *(unsigned long *)(fp + 8); -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - if (tsk && tsk->ret_stack && - (frame->pc == (unsigned long)return_to_handler)) { - /* - * This is a case where function graph tracer has - * modified a return address (LR) in a stack frame - * to hook a function return. - * So replace it to an original value. - */ - frame->pc = tsk->ret_stack[frame->graph--].ret; - } -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ - - /* - * Check whether we are going to walk through from interrupt stack - * to task stack. - * If we reach the end of the stack - and its an interrupt stack, - * unpack the dummy frame to find the original elr. - * - * Check the frame->fp we read from the bottom of the irq_stack, - * and the original task stack pointer are both in current->stack. - */ - if (frame->sp == irq_stack_ptr) { - struct pt_regs *irq_args; - unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr); - - if (object_is_on_stack((void *)orig_sp) && - object_is_on_stack((void *)frame->fp)) { - frame->sp = orig_sp; - - /* orig_sp is the saved pt_regs, find the elr */ - irq_args = (struct pt_regs *)orig_sp; - frame->pc = irq_args->pc; - } else { - /* - * This frame has a non-standard format, and we - * didn't fix it, because the data looked wrong. - * Refuse to output this frame. - */ - return -EINVAL; - } - } - kasan_enable_current(); return 0; } -void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, +void notrace walk_stackframe(struct stackframe *frame, int (*fn)(struct stackframe *, void *), void *data) { while (1) { @@ -125,7 +66,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, if (fn(frame, data)) break; - ret = unwind_frame(tsk, frame); + ret = unwind_frame(frame); if (ret < 0) break; } @@ -176,11 +117,8 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) frame.sp = current_stack_pointer; frame.pc = (unsigned long)save_stack_trace_tsk; } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = tsk->curr_ret_stack; -#endif - walk_stackframe(tsk, &frame, save_trace, &data); + walk_stackframe(&frame, save_trace, &data); if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; } diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 66055392f445..1095aa483a1c 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -60,6 +60,7 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) */ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { + struct mm_struct *mm = current->active_mm; int ret; unsigned long flags; @@ -86,11 +87,22 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ret = __cpu_suspend_enter(arg, fn); if (ret == 0) { /* - * We are resuming from reset with the idmap active in TTBR0_EL1. - * We must uninstall the idmap and restore the expected MMU - * state before we can possibly return to userspace. + * We are resuming from reset with TTBR0_EL1 set to the + * idmap to enable the MMU; set the TTBR0 to the reserved + * page tables to prevent speculative TLB allocations, flush + * the local tlb and set the default tcr_el1.t0sz so that + * the TTBR0 address space set-up is properly restored. + * If the current active_mm != &init_mm we entered cpu_suspend + * with mappings in TTBR0 that must be restored, so we switch + * them back to complete the address space configuration + * restoration before returning. */ - cpu_uninstall_idmap(); + cpu_set_reserved_ttbr0(); + local_flush_tlb_all(); + cpu_set_default_tcr_t0sz(); + + if (mm != &init_mm) + cpu_switch_mm(mm->pgd, mm); /* * Restore per-cpu offset before any kernel diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index 59779699a1a4..13339b6ffc1a 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -52,11 +52,8 @@ unsigned long profile_pc(struct pt_regs *regs) frame.fp = regs->regs[29]; frame.sp = regs->sp; frame.pc = regs->pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = -1; /* no task info */ -#endif do { - int ret = unwind_frame(NULL, &frame); + int ret = unwind_frame(&frame); if (ret < 0) return 0; } while (in_lock_functions(frame.pc)); diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 48b75ece4c17..7d54c168d19c 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -150,24 +150,17 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) { struct stackframe frame; - unsigned long irq_stack_ptr; - int skip; - - /* - * Switching between stacks is valid when tracing current and in - * non-preemptible context. - */ - if (tsk == current && !preemptible()) - irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id()); - else - irq_stack_ptr = 0; pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); if (!tsk) tsk = current; - if (tsk == current) { + if (regs) { + frame.fp = regs->regs[29]; + frame.sp = regs->sp; + frame.pc = regs->pc; + } else if (tsk == current) { frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_stack_pointer; frame.pc = (unsigned long)dump_backtrace; @@ -179,49 +172,21 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) frame.sp = thread_saved_sp(tsk); frame.pc = thread_saved_pc(tsk); } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = tsk->curr_ret_stack; -#endif - skip = !!regs; - printk("Call trace:\n"); + pr_emerg("Call trace:\n"); while (1) { unsigned long where = frame.pc; unsigned long stack; int ret; - /* skip until specified stack frame */ - if (!skip) { - dump_backtrace_entry(where); - } else if (frame.fp == regs->regs[29]) { - skip = 0; - /* - * Mostly, this is the case where this function is - * called in panic/abort. As exception handler's - * stack frame does not contain the corresponding pc - * at which an exception has taken place, use regs->pc - * instead. - */ - dump_backtrace_entry(regs->pc); - } - ret = unwind_frame(tsk, &frame); + dump_backtrace_entry(where); + ret = unwind_frame(&frame); if (ret < 0) break; stack = frame.sp; - if (in_exception_text(where)) { - /* - * If we switched to the irq_stack before calling this - * exception handler, then the pt_regs will be on the - * task stack. The easiest way to tell is if the large - * pt_regs would overlap with the end of the irq_stack. - */ - if (stack < irq_stack_ptr && - (stack + sizeof(struct pt_regs)) > irq_stack_ptr) - stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr); - + if (in_exception_text(where)) dump_mem("", "Exception stack", stack, stack + sizeof(struct pt_regs), false); - } } } @@ -537,22 +502,22 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) void __pte_error(const char *file, int line, unsigned long val) { - pr_err("%s:%d: bad pte %016lx.\n", file, line, val); + pr_crit("%s:%d: bad pte %016lx.\n", file, line, val); } void __pmd_error(const char *file, int line, unsigned long val) { - pr_err("%s:%d: bad pmd %016lx.\n", file, line, val); + pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val); } void __pud_error(const char *file, int line, unsigned long val) { - pr_err("%s:%d: bad pud %016lx.\n", file, line, val); + pr_crit("%s:%d: bad pud %016lx.\n", file, line, val); } void __pgd_error(const char *file, int line, unsigned long val) { - pr_err("%s:%d: bad pgd %016lx.\n", file, line, val); + pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val); } /* GENERIC_BUG traps */ diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 6b562a318f84..b3f48316f888 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -88,16 +88,15 @@ SECTIONS EXIT_CALL *(.discard) *(.discard.*) - *(.interp .dynamic) } - . = KIMAGE_VADDR + TEXT_OFFSET; + . = PAGE_OFFSET + TEXT_OFFSET; .head.text : { _text = .; HEAD_TEXT } - ALIGN_DEBUG_RO_MIN(PAGE_SIZE) + ALIGN_DEBUG_RO .text : { /* Real text segment */ _stext = .; /* Text and read-only data */ __exception_text_start = .; @@ -115,12 +114,14 @@ SECTIONS *(.got) /* Global offset table */ } + ALIGN_DEBUG_RO RO_DATA(PAGE_SIZE) EXCEPTION_TABLE(8) NOTES + ALIGN_DEBUG_RO + _etext = .; /* End of text and rodata section */ ALIGN_DEBUG_RO_MIN(PAGE_SIZE) - _etext = .; /* End of text and rodata section */ __init_begin = .; INIT_TEXT_SECTION(8) @@ -128,6 +129,7 @@ SECTIONS ARM_EXIT_KEEP(EXIT_TEXT) } + ALIGN_DEBUG_RO_MIN(16) .init.data : { INIT_DATA INIT_SETUP(16) @@ -142,6 +144,9 @@ SECTIONS PERCPU_SECTION(L1_CACHE_BYTES) + . = ALIGN(PAGE_SIZE); + __init_end = .; + . = ALIGN(4); .altinstructions : { __alt_instructions = .; @@ -151,25 +156,8 @@ SECTIONS .altinstr_replacement : { *(.altinstr_replacement) } - .rela : ALIGN(8) { - __reloc_start = .; - *(.rela .rela*) - __reloc_end = .; - } - .dynsym : ALIGN(8) { - __dynsym_start = .; - *(.dynsym) - } - .dynstr : { - *(.dynstr) - } - .hash : { - *(.hash) - } . = ALIGN(PAGE_SIZE); - __init_end = .; - _data = .; _sdata = .; RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) @@ -203,4 +191,4 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, /* * If padding is applied before .head.text, virt<->phys conversions will fail. */ -ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned") +ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned") |
