summaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/devtree.c9
-rw-r--r--arch/arm/kernel/irq.c11
-rw-r--r--arch/arm/kernel/perf_event_v7.c15
-rw-r--r--arch/arm/kernel/psci_smp.c4
-rw-r--r--arch/arm/kernel/setup.c18
-rw-r--r--arch/arm/kernel/smp.c67
-rw-r--r--arch/arm/kernel/stacktrace.c5
-rw-r--r--arch/arm/kernel/topology.c282
-rw-r--r--arch/arm/kernel/traps.c37
9 files changed, 379 insertions, 69 deletions
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 65addcbf5b30..e94422d0405c 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -88,6 +88,7 @@ void __init arm_dt_init_cpu_maps(void)
for_each_child_of_node(cpus, cpu) {
u32 hwid;
+ const __be32 *cell;
if (of_node_cmp(cpu->type, "cpu"))
continue;
@@ -98,13 +99,13 @@ void __init arm_dt_init_cpu_maps(void)
* properties is considered invalid to build the
* cpu_logical_map.
*/
- if (of_property_read_u32(cpu, "reg", &hwid)) {
- pr_debug(" * %s missing reg property\n",
- cpu->full_name);
+ cell = of_get_property(cpu, "reg", NULL);
+ if (!cell) {
+ pr_err("%s: missing reg property\n", cpu->full_name);
of_node_put(cpu);
return;
}
-
+ hwid = of_read_number(cell, of_n_addr_cells(cpu));
/*
* 8 MSBs must be set to 0 in the DT since the reg property
* defines the MPIDR[23:0].
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 1d45320ee125..f56a831de043 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -37,6 +37,7 @@
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
#include <linux/export.h>
+#include <linux/cpumask.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/cache-uniphier.h>
@@ -127,6 +128,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
const struct cpumask *affinity = irq_data_get_affinity_mask(d);
struct irq_chip *c;
bool ret = false;
+ struct cpumask available_cpus;
/*
* If this is a per-CPU interrupt, or the affinity does not
@@ -135,8 +137,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
return false;
+ cpumask_copy(&available_cpus, affinity);
+ cpumask_andnot(&available_cpus, &available_cpus, cpu_isolated_mask);
+ affinity = &available_cpus;
+
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
- affinity = cpu_online_mask;
+ cpumask_andnot(&available_cpus, cpu_online_mask,
+ cpu_isolated_mask);
+ if (cpumask_empty(affinity))
+ affinity = cpu_online_mask;
ret = true;
}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 126dc679b230..0938911e4df0 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -600,6 +600,11 @@ static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
#define ARMV7_EXCLUDE_USER (1 << 30)
#define ARMV7_INCLUDE_HYP (1 << 27)
+/*
+ * Secure debug enable reg
+ */
+#define ARMV7_SDER_SUNIDEN BIT(1) /* Permit non-invasive debug */
+
static inline u32 armv7_pmnc_read(void)
{
u32 val;
@@ -982,7 +987,13 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event,
static void armv7pmu_reset(void *info)
{
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
- u32 idx, nb_cnt = cpu_pmu->num_events;
+ u32 idx, nb_cnt = cpu_pmu->num_events, val;
+
+ if (cpu_pmu->secure_access) {
+ asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val));
+ val |= ARMV7_SDER_SUNIDEN;
+ asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val));
+ }
/* The counter and interrupt enable registers are unknown at reset. */
for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
@@ -991,7 +1002,7 @@ static void armv7pmu_reset(void *info)
}
/* Initialize & Reset PMNC: C and P bits */
- armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
+ armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_P | ARMV7_PMNC_C);
}
static int armv7_a8_map_event(struct perf_event *event)
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index 9d479b2ea40d..aaf7a2561427 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -98,12 +98,12 @@ int psci_cpu_kill(unsigned int cpu)
for (i = 0; i < 10; i++) {
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
- pr_info("CPU%d killed.\n", cpu);
+ pr_debug("CPU%d killed.\n", cpu);
return 1;
}
msleep(10);
- pr_info("Retrying again to check for CPU kill\n");
+ pr_debug("Retrying again to check for CPU kill\n");
}
pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index bf63b4693457..cc1e5ae35dfb 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -111,6 +111,15 @@ unsigned int elf_hwcap2 __read_mostly;
EXPORT_SYMBOL(elf_hwcap2);
+char* (*arch_read_hardware_id)(void);
+EXPORT_SYMBOL(arch_read_hardware_id);
+
+unsigned int boot_reason;
+EXPORT_SYMBOL(boot_reason);
+
+unsigned int cold_boot;
+EXPORT_SYMBOL(cold_boot);
+
#ifdef MULTI_CPU
struct processor processor __read_mostly;
#endif
@@ -932,6 +941,8 @@ void __init hyp_mode_check(void)
#endif
}
+void __init __weak init_random_pool(void) { }
+
void __init setup_arch(char **cmdline_p)
{
const struct machine_desc *mdesc;
@@ -1011,6 +1022,8 @@ void __init setup_arch(char **cmdline_p)
if (mdesc->init_early)
mdesc->init_early();
+
+ init_random_pool();
}
@@ -1135,7 +1148,10 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
}
- seq_printf(m, "Hardware\t: %s\n", machine_name);
+ if (!arch_read_hardware_id)
+ seq_printf(m, "Hardware\t: %s\n", machine_name);
+ else
+ seq_printf(m, "Hardware\t: %s\n", arch_read_hardware_id());
seq_printf(m, "Revision\t: %04x\n", system_rev);
seq_printf(m, "Serial\t\t: %s\n", system_serial);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b26361355dae..a1e4ff99d1bc 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -384,12 +384,12 @@ asmlinkage void secondary_start_kernel(void)
if (smp_ops.smp_secondary_init)
smp_ops.smp_secondary_init(cpu);
+ smp_store_cpu_info(cpu);
+
notify_cpu_starting(cpu);
calibrate_delay();
- smp_store_cpu_info(cpu);
-
/*
* OK, now it's safe to let the boot CPU continue. Wait for
* the CPU migration code to notice that the CPU is online
@@ -462,6 +462,18 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
+DEFINE_PER_CPU(bool, pending_ipi);
+
+static void smp_cross_call_common(const struct cpumask *cpumask,
+ unsigned int func)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, cpumask)
+ per_cpu(pending_ipi, cpu) = true;
+
+ __smp_cross_call(cpumask, func);
+}
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
@@ -469,6 +481,21 @@ void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
__smp_cross_call = fn;
}
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call_common(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call_common(mask, IPI_WAKEUP);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
static const char *ipi_types[NR_IPI] __tracepoint_string = {
#define S(x,s) [x] = s
S(IPI_WAKEUP, "CPU wakeup interrupts"),
@@ -483,6 +510,11 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, target)
+ per_cpu(pending_ipi, cpu) = true;
+
trace_ipi_raise(target, ipi_types[ipinr]);
__smp_cross_call(target, ipinr);
}
@@ -513,21 +545,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
return sum;
}
-void arch_send_call_function_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_CALL_FUNC);
-}
-
-void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_WAKEUP);
-}
-
-void arch_send_call_function_single_ipi(int cpu)
-{
- smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
-}
-
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
@@ -558,7 +575,7 @@ static void ipi_cpu_stop(unsigned int cpu)
raw_spin_unlock(&stop_lock);
}
- set_cpu_online(cpu, false);
+ set_cpu_active(cpu, false);
local_fiq_disable();
local_irq_disable();
@@ -660,12 +677,16 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
if ((unsigned)ipinr < NR_IPI)
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+
+ per_cpu(pending_ipi, cpu) = false;
+
set_irq_regs(old_regs);
}
void smp_send_reschedule(int cpu)
{
- smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
+ BUG_ON(cpu_is_offline(cpu));
+ smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
}
void smp_send_stop(void)
@@ -676,14 +697,14 @@ void smp_send_stop(void)
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
if (!cpumask_empty(&mask))
- smp_cross_call(&mask, IPI_CPU_STOP);
+ smp_cross_call_common(&mask, IPI_CPU_STOP);
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC;
- while (num_online_cpus() > 1 && timeout--)
+ while (num_active_cpus() > 1 && timeout--)
udelay(1);
- if (num_online_cpus() > 1)
+ if (num_active_cpus() > 1)
pr_warn("SMP: failed to stop secondary CPUs\n");
}
@@ -758,7 +779,7 @@ static void raise_nmi(cpumask_t *mask)
if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
nmi_cpu_backtrace(NULL);
- smp_cross_call(mask, IPI_CPU_BACKTRACE);
+ smp_cross_call_common(mask, IPI_CPU_BACKTRACE);
}
void arch_trigger_all_cpu_backtrace(bool include_self)
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 92b72375c4c7..5964c77c593d 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -1,4 +1,5 @@
#include <linux/export.h>
+#include <linux/kasan.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
@@ -35,11 +36,15 @@ int notrace unwind_frame(struct stackframe *frame)
if (fp < low + 12 || fp > high - 4)
return -EINVAL;
+ kasan_disable_current();
+
/* restore the registers from the stack frame */
frame->fp = *(unsigned long *)(fp - 12);
frame->sp = *(unsigned long *)(fp - 8);
frame->pc = *(unsigned long *)(fp - 4);
+ kasan_enable_current();
+
return 0;
}
#endif
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 4f2c51ef162d..d7533f0e227b 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -58,6 +58,151 @@ static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
per_cpu(cpu_scale, cpu) = capacity;
}
+static int __init get_cpu_for_node(struct device_node *node)
+{
+ struct device_node *cpu_node;
+ int cpu;
+
+ cpu_node = of_parse_phandle(node, "cpu", 0);
+ if (!cpu_node)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+ of_node_put(cpu_node);
+ return cpu;
+ }
+ }
+
+ pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
+
+ of_node_put(cpu_node);
+ return -EINVAL;
+}
+
+static int __init parse_core(struct device_node *core, int cluster_id,
+ int core_id)
+{
+ char name[10];
+ bool leaf = true;
+ int i = 0;
+ int cpu;
+ struct device_node *t;
+
+ do {
+ snprintf(name, sizeof(name), "thread%d", i);
+ t = of_get_child_by_name(core, name);
+ if (t) {
+ leaf = false;
+ cpu = get_cpu_for_node(t);
+ if (cpu >= 0) {
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ cpu_topology[cpu].thread_id = i;
+ } else {
+ pr_err("%s: Can't get CPU for thread\n",
+ t->full_name);
+ of_node_put(t);
+ return -EINVAL;
+ }
+ of_node_put(t);
+ }
+ i++;
+ } while (t);
+
+ cpu = get_cpu_for_node(core);
+ if (cpu >= 0) {
+ if (!leaf) {
+ pr_err("%s: Core has both threads and CPU\n",
+ core->full_name);
+ return -EINVAL;
+ }
+
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ } else if (leaf) {
+ pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init parse_cluster(struct device_node *cluster, int depth)
+{
+ char name[10];
+ bool leaf = true;
+ bool has_cores = false;
+ struct device_node *c;
+ int core_id = 0;
+ int i, ret;
+ static int cluster_id __initdata;
+
+ /*
+ * First check for child clusters; we currently ignore any
+ * information about the nesting of clusters and present the
+ * scheduler with a flat list of them.
+ */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "cluster%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ leaf = false;
+ ret = parse_cluster(c, depth + 1);
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ /* Now check for cores */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "core%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ has_cores = true;
+
+ if (depth == 0) {
+ pr_err("%s: cpu-map children should be clusters\n",
+ c->full_name);
+ of_node_put(c);
+ return -EINVAL;
+ }
+
+ if (leaf) {
+ ret = parse_core(c, cluster_id, core_id++);
+ } else {
+ pr_err("%s: Non-leaf cluster with core %s\n",
+ cluster->full_name, name);
+ ret = -EINVAL;
+ }
+
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ if (leaf && !has_cores)
+ pr_warn("%s: empty cluster\n", cluster->full_name);
+
+ if (leaf)
+ cluster_id++;
+
+ return 0;
+}
+
+static DEFINE_PER_CPU(unsigned long, cpu_efficiency) = SCHED_CAPACITY_SCALE;
+
+unsigned long arch_get_cpu_efficiency(int cpu)
+{
+ return per_cpu(cpu_efficiency, cpu);
+}
+
#ifdef CONFIG_OF
struct cpu_efficiency {
const char *compatible;
@@ -93,21 +238,48 @@ static unsigned long middle_capacity = 1;
* 'average' CPU is of middle capacity. Also see the comments near
* table_efficiency[] and update_cpu_capacity().
*/
-static void __init parse_dt_topology(void)
+static int __init parse_dt_topology(void)
{
const struct cpu_efficiency *cpu_eff;
- struct device_node *cn = NULL;
+ struct device_node *cn = NULL, *map;
unsigned long min_capacity = ULONG_MAX;
unsigned long max_capacity = 0;
unsigned long capacity = 0;
- int cpu = 0;
+ int cpu = 0, ret = 0;
__cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
GFP_NOWAIT);
+ cn = of_find_node_by_path("/cpus");
+ if (!cn) {
+ pr_err("No CPU information found in DT\n");
+ return 0;
+ }
+
+ /*
+ * When topology is provided cpu-map is essentially a root
+ * cluster with restricted subnodes.
+ */
+ map = of_get_child_by_name(cn, "cpu-map");
+ if (!map)
+ goto out;
+
+ ret = parse_cluster(map, 0);
+ if (ret != 0)
+ goto out_map;
+
+ /*
+ * Check that all cores are in the topology; the SMP code will
+ * only mark cores described in the DT as possible.
+ */
+ for_each_possible_cpu(cpu)
+ if (cpu_topology[cpu].cluster_id == -1)
+ ret = -EINVAL;
+
for_each_possible_cpu(cpu) {
const u32 *rate;
int len;
+ u32 efficiency;
/* too early to use cpu->of_node */
cn = of_get_cpu_node(cpu, NULL);
@@ -116,12 +288,26 @@ static void __init parse_dt_topology(void)
continue;
}
- for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
- if (of_device_is_compatible(cn, cpu_eff->compatible))
- break;
+ /*
+ * The CPU efficiency value passed from the device tree
+ * overrides the value defined in the table_efficiency[]
+ */
+ if (of_property_read_u32(cn, "efficiency", &efficiency) < 0) {
+
+ for (cpu_eff = table_efficiency;
+ cpu_eff->compatible; cpu_eff++)
- if (cpu_eff->compatible == NULL)
- continue;
+ if (of_device_is_compatible(cn,
+ cpu_eff->compatible))
+ break;
+
+ if (cpu_eff->compatible == NULL)
+ continue;
+
+ efficiency = cpu_eff->efficiency;
+ }
+
+ per_cpu(cpu_efficiency, cpu) = efficiency;
rate = of_get_property(cn, "clock-frequency", &len);
if (!rate || len != 4) {
@@ -130,7 +316,7 @@ static void __init parse_dt_topology(void)
continue;
}
- capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
+ capacity = ((be32_to_cpup(rate)) >> 20) * efficiency;
/* Save min capacity of the system */
if (capacity < min_capacity)
@@ -156,7 +342,11 @@ static void __init parse_dt_topology(void)
else
middle_capacity = ((max_capacity / 3)
>> (SCHED_CAPACITY_SHIFT-1)) + 1;
-
+out_map:
+ of_node_put(map);
+out:
+ of_node_put(cn);
+ return ret;
}
static const struct sched_group_energy * const cpu_core_energy(int cpu);
@@ -182,7 +372,7 @@ static void update_cpu_capacity(unsigned int cpu)
}
#else
-static inline void parse_dt_topology(void) {}
+static inline int parse_dt_topology(void) {}
static inline void update_cpu_capacity(unsigned int cpuid) {}
#endif
@@ -215,7 +405,7 @@ static void update_siblings_masks(unsigned int cpuid)
for_each_possible_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
- if (cpuid_topo->socket_id != cpu_topo->socket_id)
+ if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
@@ -242,9 +432,8 @@ void store_cpu_topology(unsigned int cpuid)
struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
unsigned int mpidr;
- /* If the cpu topology has been already set, just return */
if (cpuid_topo->core_id != -1)
- return;
+ goto topology_populated;
mpidr = read_cpuid_mpidr();
@@ -259,12 +448,12 @@ void store_cpu_topology(unsigned int cpuid)
/* core performance interdependency */
cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+ cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
} else {
/* largely independent cores */
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
}
} else {
/*
@@ -274,17 +463,17 @@ void store_cpu_topology(unsigned int cpuid)
*/
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = 0;
- cpuid_topo->socket_id = -1;
+ cpuid_topo->cluster_id = -1;
}
- update_siblings_masks(cpuid);
-
- update_cpu_capacity(cpuid);
-
- pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
+ pr_info("CPU%u: thread %d, cpu %d, cluster %d, mpidr %x\n",
cpuid, cpu_topology[cpuid].thread_id,
cpu_topology[cpuid].core_id,
- cpu_topology[cpuid].socket_id, mpidr);
+ cpu_topology[cpuid].cluster_id, mpidr);
+
+topology_populated:
+ update_siblings_masks(cpuid);
+ update_cpu_capacity(cpuid);
}
/*
@@ -397,14 +586,14 @@ static struct sched_group_energy energy_core_a15 = {
static inline
const struct sched_group_energy * const cpu_cluster_energy(int cpu)
{
- return cpu_topology[cpu].socket_id ? &energy_cluster_a7 :
+ return cpu_topology[cpu].cluster_id ? &energy_cluster_a7 :
&energy_cluster_a15;
}
static inline
const struct sched_group_energy * const cpu_core_energy(int cpu)
{
- return cpu_topology[cpu].socket_id ? &energy_core_a7 :
+ return cpu_topology[cpu].cluster_id ? &energy_core_a7 :
&energy_core_a15;
}
@@ -422,29 +611,50 @@ static struct sched_domain_topology_level arm_topology[] = {
{ NULL, },
};
-/*
- * init_cpu_topology is called at boot when only one cpu is running
- * which prevent simultaneous write access to cpu_topology array
- */
-void __init init_cpu_topology(void)
+static void __init reset_cpu_topology(void)
{
unsigned int cpu;
- /* init core mask and capacity */
for_each_possible_cpu(cpu) {
- struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
+ struct cputopo_arm *cpu_topo = &cpu_topology[cpu];
cpu_topo->thread_id = -1;
- cpu_topo->core_id = -1;
- cpu_topo->socket_id = -1;
+ cpu_topo->core_id = -1;
+ cpu_topo->cluster_id = -1;
+
cpumask_clear(&cpu_topo->core_sibling);
cpumask_clear(&cpu_topo->thread_sibling);
+ }
+}
+
+static void __init reset_cpu_capacity(void)
+{
+ unsigned int cpu;
+ for_each_possible_cpu(cpu)
set_capacity_scale(cpu, SCHED_CAPACITY_SCALE);
- }
+}
+
+/*
+ * init_cpu_topology is called at boot when only one cpu is running
+ * which prevent simultaneous write access to cpu_topology array
+ */
+void __init init_cpu_topology(void)
+{
+ unsigned int cpu;
+
+ /* init core mask and capacity */
+ reset_cpu_topology();
+ reset_cpu_capacity();
smp_wmb();
- parse_dt_topology();
+ if (parse_dt_topology()) {
+ reset_cpu_topology();
+ reset_cpu_capacity();
+ }
+
+ for_each_possible_cpu(cpu)
+ update_siblings_masks(cpu);
/* Set scheduler topology descriptor */
set_sched_topology(arm_topology);
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index bc698383e822..cdefc69c656b 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -28,6 +28,7 @@
#include <linux/irq.h>
#include <linux/atomic.h>
+#include <asm/arch_timer.h>
#include <asm/cacheflush.h>
#include <asm/exception.h>
#include <asm/unistd.h>
@@ -697,6 +698,42 @@ late_initcall(arm_mrc_hook_init);
#endif
+static int get_pct_trap(struct pt_regs *regs, unsigned int instr)
+{
+ u64 cntpct;
+ unsigned int res;
+ int rd = (instr >> 12) & 0xF;
+ int rn = (instr >> 16) & 0xF;
+
+ res = arm_check_condition(instr, regs->ARM_cpsr);
+ if (res == ARM_OPCODE_CONDTEST_FAIL) {
+ regs->ARM_pc += 4;
+ return 0;
+ }
+
+ if (rd == 15 || rn == 15)
+ return 1;
+ cntpct = arch_counter_get_cntpct();
+ regs->uregs[rd] = cntpct;
+ regs->uregs[rn] = cntpct >> 32;
+ regs->ARM_pc += 4;
+ return 0;
+}
+
+static struct undef_hook get_pct_hook = {
+ .instr_mask = 0x0ff00fff,
+ .instr_val = 0x0c500f0e,
+ .cpsr_mask = MODE_MASK,
+ .cpsr_val = USR_MODE,
+ .fn = get_pct_trap,
+};
+
+void get_pct_hook_init(void)
+{
+ register_undef_hook(&get_pct_hook);
+}
+EXPORT_SYMBOL(get_pct_hook_init);
+
/*
* A data abort trap was taken, but we did not handle the instruction.
* Try to abort the user program, or panic if it was the kernel.