summaryrefslogtreecommitdiff
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/asm-offsets.c9
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c2
-rw-r--r--arch/mips/kernel/cevt-ds1287.c1
-rw-r--r--arch/mips/kernel/cps-vec.S19
-rw-r--r--arch/mips/kernel/ftrace.c2
-rw-r--r--arch/mips/kernel/genex.S71
-rw-r--r--arch/mips/kernel/gpio_txx9.c8
-rw-r--r--arch/mips/kernel/head.S2
-rw-r--r--arch/mips/kernel/idle.c7
-rw-r--r--arch/mips/kernel/mips-cm.c18
-rw-r--r--arch/mips/kernel/mips-mt.c7
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c3
-rw-r--r--arch/mips/kernel/pm-cps.c35
-rw-r--r--arch/mips/kernel/proc.c17
-rw-r--r--arch/mips/kernel/prom.c2
-rw-r--r--arch/mips/kernel/ptrace.c54
-rw-r--r--arch/mips/kernel/relocate.c2
-rw-r--r--arch/mips/kernel/scall32-o32.S8
-rw-r--r--arch/mips/kernel/setup.c5
-rw-r--r--arch/mips/kernel/smp-cps.c338
-rw-r--r--arch/mips/kernel/smp.c18
-rw-r--r--arch/mips/kernel/spram.c4
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl5
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl5
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl5
-rw-r--r--arch/mips/kernel/traps.c57
-rw-r--r--arch/mips/kernel/vdso.c58
-rw-r--r--arch/mips/kernel/vmlinux.lds.S1
-rw-r--r--arch/mips/kernel/vpe.c3
30 files changed, 527 insertions, 241 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index ecf3278a32f7..95a1e674fd67 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -3,7 +3,7 @@
# Makefile for the Linux/MIPS kernel.
#
-extra-y := vmlinux.lds
+always-$(KBUILD_BUILTIN) := vmlinux.lds
obj-y += head.o branch.o cmpxchg.o elf.o entry.o genex.o idle.o irq.o \
process.o prom.o ptrace.o reset.o setup.o signal.o \
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index cb1045ebab06..1e29efcba46e 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -27,6 +27,12 @@ void output_ptreg_defines(void);
void output_ptreg_defines(void)
{
COMMENT("MIPS pt_regs offsets.");
+#ifdef CONFIG_32BIT
+ OFFSET(PT_ARG4, pt_regs, args[4]);
+ OFFSET(PT_ARG5, pt_regs, args[5]);
+ OFFSET(PT_ARG6, pt_regs, args[6]);
+ OFFSET(PT_ARG7, pt_regs, args[7]);
+#endif
OFFSET(PT_R0, pt_regs, regs[0]);
OFFSET(PT_R1, pt_regs, regs[1]);
OFFSET(PT_R2, pt_regs, regs[2]);
@@ -404,6 +410,9 @@ void output_cps_defines(void)
{
COMMENT(" MIPS CPS offsets. ");
+ OFFSET(CLUSTERBOOTCFG_CORECONFIG, cluster_boot_config, core_config);
+ DEFINE(CLUSTERBOOTCFG_SIZE, sizeof(struct cluster_boot_config));
+
OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask);
OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config);
DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config));
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index d39a2963b451..2a14dc4ee57e 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -103,7 +103,7 @@ void sb1480_clockevent_init(void)
BUG_ON(cpu > 3); /* Only have 4 general purpose timers */
- sprintf(name, "bcm1480-counter-%d", cpu);
+ sprintf(name, "bcm1480-counter-%u", cpu);
cd->name = name;
cd->features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT;
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
index 9a47fbcd4638..de64d6bb7ba3 100644
--- a/arch/mips/kernel/cevt-ds1287.c
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -10,6 +10,7 @@
#include <linux/mc146818rtc.h>
#include <linux/irq.h>
+#include <asm/ds1287.h>
#include <asm/time.h>
int ds1287_timer_state(void)
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index f876309130ad..2ae7034a3d5c 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -19,6 +19,10 @@
#define GCR_CPC_BASE_OFS 0x0088
#define GCR_CL_COHERENCE_OFS 0x2008
#define GCR_CL_ID_OFS 0x2028
+#define CM3_GCR_Cx_ID_CLUSTER_SHF 8
+#define CM3_GCR_Cx_ID_CLUSTER_MSK (0xff << 8)
+#define CM3_GCR_Cx_ID_CORENUM_SHF 0
+#define CM3_GCR_Cx_ID_CORENUM_MSK (0xff << 0)
#define CPC_CL_VC_STOP_OFS 0x2020
#define CPC_CL_VC_RUN_OFS 0x2028
@@ -271,12 +275,21 @@ LEAF(mips_cps_core_init)
*/
LEAF(mips_cps_get_bootcfg)
/* Calculate a pointer to this cores struct core_boot_config */
+ PTR_LA v0, mips_cps_cluster_bootcfg
+ PTR_L v0, 0(v0)
lw t0, GCR_CL_ID_OFS(s1)
+#ifdef CONFIG_CPU_MIPSR6
+ ext t1, t0, CM3_GCR_Cx_ID_CLUSTER_SHF, 8
+ li t2, CLUSTERBOOTCFG_SIZE
+ mul t1, t1, t2
+ PTR_ADDU \
+ v0, v0, t1
+#endif
+ PTR_L v0, CLUSTERBOOTCFG_CORECONFIG(v0)
+ andi t0, t0, CM3_GCR_Cx_ID_CORENUM_MSK
li t1, COREBOOTCFG_SIZE
mul t0, t0, t1
- PTR_LA t1, mips_cps_core_bootcfg
- PTR_L t1, 0(t1)
- PTR_ADDU v0, t0, t1
+ PTR_ADDU v0, v0, t0
/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
li t9, 0
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 8c401e42301c..f39e85fd58fa 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -248,7 +248,7 @@ int ftrace_disable_ftrace_graph_caller(void)
#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
-unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+static unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
{
unsigned long sp, ip, tmp;
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index a572ce36a24f..08c0a01d9a29 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -104,48 +104,59 @@ handle_vcei:
__FINIT
- .align 5 /* 32 byte rollback region */
-LEAF(__r4k_wait)
- .set push
- .set noreorder
- /* start of rollback region */
- LONG_L t0, TI_FLAGS($28)
- nop
- andi t0, _TIF_NEED_RESCHED
- bnez t0, 1f
- nop
- nop
- nop
-#ifdef CONFIG_CPU_MICROMIPS
- nop
- nop
- nop
- nop
-#endif
+ .section .cpuidle.text,"ax"
+ /* Align to 32 bytes for the maximum idle interrupt region size. */
+ .align 5
+LEAF(r4k_wait)
+ /* Keep the ISA bit clear for calculations on local labels here. */
+0: .fill 0
+ /* Start of idle interrupt region. */
+ local_irq_enable
+ /*
+ * If an interrupt lands here, before going idle on the next
+ * instruction, we must *NOT* go idle since the interrupt could
+ * have set TIF_NEED_RESCHED or caused a timer to need resched.
+ * Fall through -- see skipover_handler below -- and have the
+ * idle loop take care of things.
+ */
+1: .fill 0
+ /* The R2 EI/EHB sequence takes 8 bytes, otherwise pad up. */
+ .if 1b - 0b > 32
+ .error "overlong idle interrupt region"
+ .elseif 1b - 0b > 8
+ .align 4
+ .endif
+2: .fill 0
+ .equ r4k_wait_idle_size, 2b - 0b
+ /* End of idle interrupt region; size has to be a power of 2. */
.set MIPS_ISA_ARCH_LEVEL_RAW
+r4k_wait_insn:
wait
- /* end of rollback region (the region size must be power of two) */
-1:
+r4k_wait_exit:
+ .set mips0
+ local_irq_disable
jr ra
- nop
- .set pop
- END(__r4k_wait)
+ END(r4k_wait)
+ .previous
- .macro BUILD_ROLLBACK_PROLOGUE handler
- FEXPORT(rollback_\handler)
+ .macro BUILD_SKIPOVER_PROLOGUE handler
+ FEXPORT(skipover_\handler)
.set push
.set noat
MFC0 k0, CP0_EPC
- PTR_LA k1, __r4k_wait
- ori k0, 0x1f /* 32 byte rollback region */
- xori k0, 0x1f
+ /* Subtract/add 2 to let the ISA bit propagate through the mask. */
+ PTR_LA k1, r4k_wait_insn - 2
+ ori k0, r4k_wait_idle_size - 2
+ .set noreorder
bne k0, k1, \handler
+ PTR_ADDIU k0, r4k_wait_exit - r4k_wait_insn + 2
+ .set reorder
MTC0 k0, CP0_EPC
.set pop
.endm
.align 5
-BUILD_ROLLBACK_PROLOGUE handle_int
+BUILD_SKIPOVER_PROLOGUE handle_int
NESTED(handle_int, PT_SIZE, sp)
.cfi_signal_frame
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -265,7 +276,7 @@ NESTED(except_vec_ejtag_debug, 0, sp)
* This prototype is copied to ebase + n*IntCtl.VS and patched
* to invoke the handler
*/
-BUILD_ROLLBACK_PROLOGUE except_vec_vi
+BUILD_SKIPOVER_PROLOGUE except_vec_vi
NESTED(except_vec_vi, 0, sp)
SAVE_SOME docfi=1
SAVE_AT docfi=1
diff --git a/arch/mips/kernel/gpio_txx9.c b/arch/mips/kernel/gpio_txx9.c
index 8c083612df9d..027fb57d0d79 100644
--- a/arch/mips/kernel/gpio_txx9.c
+++ b/arch/mips/kernel/gpio_txx9.c
@@ -32,14 +32,16 @@ static void txx9_gpio_set_raw(unsigned int offset, int value)
__raw_writel(val, &txx9_pioptr->dout);
}
-static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int txx9_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
unsigned long flags;
spin_lock_irqsave(&txx9_gpio_lock, flags);
txx9_gpio_set_raw(offset, value);
mmiowb();
spin_unlock_irqrestore(&txx9_gpio_lock, flags);
+
+ return 0;
}
static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
@@ -68,7 +70,7 @@ static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset,
static struct gpio_chip txx9_gpio_chip = {
.get = txx9_gpio_get,
- .set = txx9_gpio_set,
+ .set_rv = txx9_gpio_set,
.direction_input = txx9_gpio_dir_in,
.direction_output = txx9_gpio_dir_out,
.label = "TXx9",
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index b825ed4476c7..d99ed58b7043 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -59,6 +59,8 @@
#endif
.endm
+ __HEAD
+
#ifndef CONFIG_NO_EXCEPT_FILL
/*
* Reserved space for exception handlers.
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 5abc8b7340f8..80e8a04a642e 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -35,13 +35,6 @@ static void __cpuidle r3081_wait(void)
write_c0_conf(cfg | R30XX_CONF_HALT);
}
-void __cpuidle r4k_wait(void)
-{
- raw_local_irq_enable();
- __r4k_wait();
- raw_local_irq_disable();
-}
-
/*
* This variant is preferable as it allows testing need_resched and going to
* sleep depending on the outcome atomically. Unfortunately the "It is
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 3eb2cfb893e1..43cb1e20baed 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -5,6 +5,7 @@
*/
#include <linux/errno.h>
+#include <linux/of.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
@@ -14,6 +15,7 @@
void __iomem *mips_gcr_base;
void __iomem *mips_cm_l2sync_base;
int mips_cm_is64;
+bool mips_cm_is_l2_hci_broken;
static char *cm2_tr[8] = {
"mem", "gcr", "gic", "mmio",
@@ -237,6 +239,18 @@ static void mips_cm_probe_l2sync(void)
mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE);
}
+void mips_cm_update_property(void)
+{
+ struct device_node *cm_node;
+
+ cm_node = of_find_compatible_node(of_root, NULL, "mobileye,eyeq6-cm");
+ if (!cm_node)
+ return;
+ pr_info("HCI (Hardware Cache Init for the L2 cache) in GCR_L2_RAM_CONFIG from the CM3 is broken");
+ mips_cm_is_l2_hci_broken = true;
+ of_node_put(cm_node);
+}
+
int mips_cm_probe(void)
{
phys_addr_t addr;
@@ -308,7 +322,9 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core,
FIELD_PREP(CM3_GCR_Cx_OTHER_VP, vp);
if (cm_rev >= CM_REV_CM3_5) {
- val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
+ if (cluster != cpu_cluster(&current_cpu_data))
+ val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
+ val |= CM_GCR_Cx_OTHER_GIC_EN;
val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster);
val |= FIELD_PREP(CM_GCR_Cx_OTHER_BLOCK, block);
} else {
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index 37676a44fefb..2ef610650a9e 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -122,9 +122,8 @@ void mips_mt_set_cpuoptions(void)
unsigned long ectlval;
unsigned long itcblkgrn;
- /* ErrCtl register is known as "ecc" to Linux */
- ectlval = read_c0_ecc();
- write_c0_ecc(ectlval | (0x1 << 26));
+ ectlval = read_c0_errctl();
+ write_c0_errctl(ectlval | (0x1 << 26));
ehb();
#define INDEX_0 (0x80000000)
#define INDEX_8 (0x80000008)
@@ -145,7 +144,7 @@ void mips_mt_set_cpuoptions(void)
ehb();
/* Write out to ITU with CACHE op */
cache_op(Index_Store_Tag_D, INDEX_0);
- write_c0_ecc(ectlval);
+ write_c0_errctl(ectlval);
ehb();
printk("Mapped %ld ITC cells starting at 0x%08x\n",
((itcblkgrn & 0x7fe00000) >> 20), itc_base);
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index c4d6b09136b1..196a070349b0 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -791,8 +791,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc,
if (!mipspmu_event_set_period(event, hwc, idx))
return;
- if (perf_event_overflow(event, data, regs))
- mipsxx_pmu_disable_event(idx);
+ perf_event_overflow(event, data, regs);
}
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index d09ca77e624d..3de0e05e0511 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -57,10 +57,7 @@ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
/* Indicates online CPUs coupled with the current CPU */
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
-/*
- * Used to synchronize entry to deep idle states. Actually per-core rather
- * than per-CPU.
- */
+/* Used to synchronize entry to deep idle states */
static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
/* Saved CPU state across the CPS_PM_POWER_GATED state */
@@ -104,17 +101,20 @@ static void coupled_barrier(atomic_t *a, unsigned online)
int cps_pm_enter_state(enum cps_pm_state state)
{
unsigned cpu = smp_processor_id();
+ unsigned int cluster = cpu_cluster(&current_cpu_data);
unsigned core = cpu_core(&current_cpu_data);
unsigned online, left;
cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
u32 *core_ready_count, *nc_core_ready_count;
void *nc_addr;
cps_nc_entry_fn entry;
+ struct cluster_boot_config *cluster_cfg;
struct core_boot_config *core_cfg;
struct vpe_boot_config *vpe_cfg;
+ atomic_t *barrier;
/* Check that there is an entry function for this state */
- entry = per_cpu(nc_asm_enter, core)[state];
+ entry = per_cpu(nc_asm_enter, cpu)[state];
if (!entry)
return -EINVAL;
@@ -138,7 +138,8 @@ int cps_pm_enter_state(enum cps_pm_state state)
if (!mips_cps_smp_in_use())
return -EINVAL;
- core_cfg = &mips_cps_core_bootcfg[core];
+ cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
+ core_cfg = &cluster_cfg->core_config[core];
vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(&current_cpu_data)];
vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
vpe_cfg->gp = (unsigned long)current_thread_info();
@@ -150,7 +151,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
smp_mb__after_atomic();
/* Create a non-coherent mapping of the core ready_count */
- core_ready_count = per_cpu(ready_count, core);
+ core_ready_count = per_cpu(ready_count, cpu);
nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
(unsigned long)core_ready_count);
nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
@@ -158,7 +159,8 @@ int cps_pm_enter_state(enum cps_pm_state state)
/* Ensure ready_count is zero-initialised before the assembly runs */
WRITE_ONCE(*nc_core_ready_count, 0);
- coupled_barrier(&per_cpu(pm_barrier, core), online);
+ barrier = &per_cpu(pm_barrier, cpumask_first(&cpu_sibling_map[cpu]));
+ coupled_barrier(barrier, online);
/* Run the generated entry code */
left = entry(online, nc_core_ready_count);
@@ -629,12 +631,14 @@ out_err:
static int cps_pm_online_cpu(unsigned int cpu)
{
- enum cps_pm_state state;
- unsigned core = cpu_core(&cpu_data[cpu]);
+ unsigned int sibling, core;
void *entry_fn, *core_rc;
+ enum cps_pm_state state;
+
+ core = cpu_core(&cpu_data[cpu]);
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
- if (per_cpu(nc_asm_enter, core)[state])
+ if (per_cpu(nc_asm_enter, cpu)[state])
continue;
if (!test_bit(state, state_support))
continue;
@@ -646,16 +650,19 @@ static int cps_pm_online_cpu(unsigned int cpu)
clear_bit(state, state_support);
}
- per_cpu(nc_asm_enter, core)[state] = entry_fn;
+ for_each_cpu(sibling, &cpu_sibling_map[cpu])
+ per_cpu(nc_asm_enter, sibling)[state] = entry_fn;
}
- if (!per_cpu(ready_count, core)) {
+ if (!per_cpu(ready_count, cpu)) {
core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
if (!core_rc) {
pr_err("Failed allocate core %u ready_count\n", core);
return -ENOMEM;
}
- per_cpu(ready_count, core) = core_rc;
+
+ for_each_cpu(sibling, &cpu_sibling_map[cpu])
+ per_cpu(ready_count, sibling) = core_rc;
}
return 0;
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 8eba5a1ed664..8f0a0001540c 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -66,24 +66,23 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "BogoMIPS\t\t: %u.%02u\n",
cpu_data[n].udelay_val / (500000/HZ),
(cpu_data[n].udelay_val / (5000/HZ)) % 100);
- seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
+ seq_printf(m, "wait instruction\t: %s\n", str_yes_no(cpu_wait));
seq_printf(m, "microsecond timers\t: %s\n",
- cpu_has_counter ? "yes" : "no");
+ str_yes_no(cpu_has_counter));
seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);
seq_printf(m, "extra interrupt vector\t: %s\n",
- cpu_has_divec ? "yes" : "no");
- seq_printf(m, "hardware watchpoint\t: %s",
- cpu_has_watch ? "yes, " : "no\n");
+ str_yes_no(cpu_has_divec));
+ seq_printf(m, "hardware watchpoint\t: %s", str_yes_no(cpu_has_watch));
if (cpu_has_watch) {
- seq_printf(m, "count: %d, address/irw mask: [",
+ seq_printf(m, ", count: %d, address/irw mask: [",
cpu_data[n].watch_reg_count);
for (i = 0; i < cpu_data[n].watch_reg_count; i++)
seq_printf(m, "%s0x%04x", i ? ", " : "",
cpu_data[n].watch_reg_masks[i]);
- seq_puts(m, "]\n");
+ seq_puts(m, "]");
}
- seq_puts(m, "isa\t\t\t:");
+ seq_puts(m, "\nisa\t\t\t:");
if (cpu_has_mips_1)
seq_puts(m, " mips1");
if (cpu_has_mips_2)
@@ -155,7 +154,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_mmips) {
seq_printf(m, "micromips kernel\t: %s\n",
- (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no");
+ str_yes_no(read_c0_config3() & MIPS_CONF3_ISA_OE));
}
seq_puts(m, "Options implemented\t:");
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 6062e6fa589a..4fd6da0a06c3 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -41,7 +41,7 @@ char *mips_get_machine_name(void)
void __init __dt_setup_arch(void *bph)
{
- if (!early_init_dt_scan(bph))
+ if (!early_init_dt_scan(bph, __pa(bph)))
return;
mips_set_machine_name(of_flat_dt_get_machine_name());
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 61503a36067e..b890d64d352c 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -922,11 +922,13 @@ static const struct pt_regs_offset regoffset_table[] = {
*/
int regs_query_register_offset(const char *name)
{
- const struct pt_regs_offset *roff;
- for (roff = regoffset_table; roff->name != NULL; roff++)
- if (!strcmp(roff->name, name))
- return roff->offset;
- return -EINVAL;
+ const struct pt_regs_offset *roff;
+
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+
+ return -EINVAL;
}
#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
@@ -937,7 +939,7 @@ static const struct user_regset mips_regsets[] = {
.n = ELF_NGREG,
.size = sizeof(unsigned int),
.align = sizeof(unsigned int),
- .regset_get = gpr32_get,
+ .regset_get = gpr32_get,
.set = gpr32_set,
},
[REGSET_DSP] = {
@@ -945,7 +947,7 @@ static const struct user_regset mips_regsets[] = {
.n = NUM_DSP_REGS + 1,
.size = sizeof(u32),
.align = sizeof(u32),
- .regset_get = dsp32_get,
+ .regset_get = dsp32_get,
.set = dsp32_set,
.active = dsp_active,
},
@@ -955,7 +957,7 @@ static const struct user_regset mips_regsets[] = {
.n = ELF_NFPREG,
.size = sizeof(elf_fpreg_t),
.align = sizeof(elf_fpreg_t),
- .regset_get = fpr_get,
+ .regset_get = fpr_get,
.set = fpr_set,
},
[REGSET_FP_MODE] = {
@@ -963,7 +965,7 @@ static const struct user_regset mips_regsets[] = {
.n = 1,
.size = sizeof(int),
.align = sizeof(int),
- .regset_get = fp_mode_get,
+ .regset_get = fp_mode_get,
.set = fp_mode_set,
},
#endif
@@ -973,7 +975,7 @@ static const struct user_regset mips_regsets[] = {
.n = NUM_FPU_REGS + 1,
.size = 16,
.align = 16,
- .regset_get = msa_get,
+ .regset_get = msa_get,
.set = msa_set,
},
#endif
@@ -997,7 +999,7 @@ static const struct user_regset mips64_regsets[] = {
.n = ELF_NGREG,
.size = sizeof(unsigned long),
.align = sizeof(unsigned long),
- .regset_get = gpr64_get,
+ .regset_get = gpr64_get,
.set = gpr64_set,
},
[REGSET_DSP] = {
@@ -1005,7 +1007,7 @@ static const struct user_regset mips64_regsets[] = {
.n = NUM_DSP_REGS + 1,
.size = sizeof(u64),
.align = sizeof(u64),
- .regset_get = dsp64_get,
+ .regset_get = dsp64_get,
.set = dsp64_set,
.active = dsp_active,
},
@@ -1015,7 +1017,7 @@ static const struct user_regset mips64_regsets[] = {
.n = 1,
.size = sizeof(int),
.align = sizeof(int),
- .regset_get = fp_mode_get,
+ .regset_get = fp_mode_get,
.set = fp_mode_set,
},
[REGSET_FPR] = {
@@ -1023,7 +1025,7 @@ static const struct user_regset mips64_regsets[] = {
.n = ELF_NFPREG,
.size = sizeof(elf_fpreg_t),
.align = sizeof(elf_fpreg_t),
- .regset_get = fpr_get,
+ .regset_get = fpr_get,
.set = fpr_set,
},
#endif
@@ -1033,7 +1035,7 @@ static const struct user_regset mips64_regsets[] = {
.n = NUM_FPU_REGS + 1,
.size = 16,
.align = 16,
- .regset_get = msa_get,
+ .regset_get = msa_get,
.set = msa_set,
},
#endif
@@ -1326,24 +1328,8 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs)
return -1;
}
-#ifdef CONFIG_SECCOMP
- if (unlikely(test_thread_flag(TIF_SECCOMP))) {
- int ret, i;
- struct seccomp_data sd;
- unsigned long args[6];
-
- sd.nr = current_thread_info()->syscall;
- sd.arch = syscall_get_arch(current);
- syscall_get_arguments(current, regs, args);
- for (i = 0; i < 6; i++)
- sd.args[i] = args[i];
- sd.instruction_pointer = KSTK_EIP(current);
-
- ret = __secure_computing(&sd);
- if (ret == -1)
- return ret;
- }
-#endif
+ if (secure_computing())
+ return -1;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[2]);
@@ -1367,7 +1353,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs)
*/
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
{
- /*
+ /*
* We may come here right after calling schedule_user()
* or do_notify_resume(), in which case we can be in RCU
* user mode.
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index 7eeeaf1ff95d..cda7983e7c18 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -337,7 +337,7 @@ void *__init relocate_kernel(void)
#if defined(CONFIG_USE_OF)
/* Deal with the device tree */
fdt = plat_get_fdt();
- early_init_dt_scan(fdt);
+ early_init_dt_scan(fdt, __pa(fdt));
if (boot_command_line[0]) {
/* Boot command line was passed in device tree */
strscpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 2c604717e630..4947a4f39e37 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -64,10 +64,10 @@ load_a6: user_lw(t7, 24(t0)) # argument #7 from usp
load_a7: user_lw(t8, 28(t0)) # argument #8 from usp
loads_done:
- sw t5, 16(sp) # argument #5 to ksp
- sw t6, 20(sp) # argument #6 to ksp
- sw t7, 24(sp) # argument #7 to ksp
- sw t8, 28(sp) # argument #8 to ksp
+ sw t5, PT_ARG4(sp) # argument #5 to ksp
+ sw t6, PT_ARG5(sp) # argument #6 to ksp
+ sw t7, PT_ARG6(sp) # argument #7 to ksp
+ sw t8, PT_ARG7(sp) # argument #8 to ksp
.set pop
.section __ex_table,"a"
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 12a1a4ffb602..fbfe0771317e 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -704,10 +704,7 @@ static void __init resource_init(void)
for_each_mem_range(i, &start, &end) {
struct resource *res;
- res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
- if (!res)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(struct resource));
+ res = memblock_alloc_or_panic(sizeof(struct resource), SMP_CACHE_BYTES);
res->start = start;
/*
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 395622c37325..7b0e69af4097 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -36,11 +36,55 @@ enum label_id {
UASM_L_LA(_not_nmi)
-static DECLARE_BITMAP(core_power, NR_CPUS);
-static uint32_t core_entry_reg;
+static u64 core_entry_reg;
static phys_addr_t cps_vec_pa;
-struct core_boot_config *mips_cps_core_bootcfg;
+struct cluster_boot_config *mips_cps_cluster_bootcfg;
+
+static void power_up_other_cluster(unsigned int cluster)
+{
+ u32 stat, seq_state;
+ unsigned int timeout;
+
+ mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
+ CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ stat = read_cpc_co_stat_conf();
+ mips_cm_unlock_other();
+
+ seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
+ seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
+ if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5)
+ return;
+
+ /* Set endianness & power up the CM */
+ mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+ write_cpc_redir_sys_config(IS_ENABLED(CONFIG_CPU_BIG_ENDIAN));
+ write_cpc_redir_pwrup_ctl(1);
+ mips_cm_unlock_other();
+
+ /* Wait for the CM to start up */
+ timeout = 1000;
+ mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
+ CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ while (1) {
+ stat = read_cpc_co_stat_conf();
+ seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
+ seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
+ if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5)
+ break;
+
+ if (timeout) {
+ mdelay(1);
+ timeout--;
+ } else {
+ pr_warn("Waiting for cluster %u CM to power up... STAT_CONF=0x%x\n",
+ cluster, stat);
+ mdelay(1000);
+ }
+ }
+
+ mips_cm_unlock_other();
+}
static unsigned __init core_vpe_count(unsigned int cluster, unsigned core)
{
@@ -94,6 +138,20 @@ static void __init *mips_cps_build_core_entry(void *addr)
return p;
}
+static bool __init check_64bit_reset(void)
+{
+ bool cx_64bit_reset = false;
+
+ mips_cm_lock_other(0, 0, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ write_gcr_co_reset64_base(CM_GCR_Cx_RESET64_BASE_BEVEXCBASE);
+ if ((read_gcr_co_reset64_base() & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) ==
+ CM_GCR_Cx_RESET64_BASE_BEVEXCBASE)
+ cx_64bit_reset = true;
+ mips_cm_unlock_other();
+
+ return cx_64bit_reset;
+}
+
static int __init allocate_cps_vecs(void)
{
/* Try to allocate in KSEG1 first */
@@ -105,11 +163,23 @@ static int __init allocate_cps_vecs(void)
CM_GCR_Cx_RESET_BASE_BEVEXCBASE;
if (!cps_vec_pa && mips_cm_is64) {
- cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN,
- 0x0, SZ_4G - 1);
- if (cps_vec_pa)
- core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) |
+ phys_addr_t end;
+
+ if (check_64bit_reset()) {
+ pr_info("VP Local Reset Exception Base support 47 bits address\n");
+ end = MEMBLOCK_ALLOC_ANYWHERE;
+ } else {
+ end = SZ_4G - 1;
+ }
+ cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, 0, end);
+ if (cps_vec_pa) {
+ if (check_64bit_reset())
+ core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) |
+ CM_GCR_Cx_RESET_BASE_MODE;
+ else
+ core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) |
CM_GCR_Cx_RESET_BASE_MODE;
+ }
}
if (!cps_vec_pa)
@@ -152,6 +222,9 @@ static void __init cps_smp_setup(void)
pr_cont(",");
pr_cont("{");
+ if (mips_cm_revision() >= CM_REV_CM3_5)
+ power_up_other_cluster(cl);
+
ncores = mips_cps_numcores(cl);
for (c = 0; c < ncores; c++) {
core_vpes = core_vpe_count(cl, c);
@@ -163,6 +236,7 @@ static void __init cps_smp_setup(void)
/* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
if (!cl && !c)
smp_num_siblings = core_vpes;
+ cpumask_set_cpu(nvpes, &__cpu_primary_thread_mask);
for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
cpu_set_cluster(&cpu_data[nvpes + v], cl);
@@ -179,8 +253,8 @@ static void __init cps_smp_setup(void)
/* Indicate present CPUs (CPU being synonymous with VPE) */
for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
- set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
- set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
+ set_cpu_possible(v, true);
+ set_cpu_present(v, true);
__cpu_number_map[v] = v;
__cpu_logical_map[v] = v;
}
@@ -188,9 +262,6 @@ static void __init cps_smp_setup(void)
/* Set a coherent default CCA (CWB) */
change_c0_config(CONF_CM_CMASK, 0x5);
- /* Core 0 is powered up (we're running on it) */
- bitmap_set(core_power, 0, 1);
-
/* Initialise core 0 */
mips_cps_core_init();
@@ -212,8 +283,10 @@ static void __init cps_smp_setup(void)
static void __init cps_prepare_cpus(unsigned int max_cpus)
{
- unsigned ncores, core_vpes, c, cca;
+ unsigned int nclusters, ncores, core_vpes, c, cl, cca;
bool cca_unsuitable, cores_limited;
+ struct cluster_boot_config *cluster_bootcfg;
+ struct core_boot_config *core_bootcfg;
mips_mt_set_cpuoptions();
@@ -255,40 +328,67 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
setup_cps_vecs();
- /* Allocate core boot configuration structs */
- ncores = mips_cps_numcores(0);
- mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
- GFP_KERNEL);
- if (!mips_cps_core_bootcfg) {
- pr_err("Failed to allocate boot config for %u cores\n", ncores);
+ /* Allocate cluster boot configuration structs */
+ nclusters = mips_cps_numclusters();
+ mips_cps_cluster_bootcfg = kcalloc(nclusters,
+ sizeof(*mips_cps_cluster_bootcfg),
+ GFP_KERNEL);
+ if (!mips_cps_cluster_bootcfg)
goto err_out;
- }
- /* Allocate VPE boot configuration structs */
- for (c = 0; c < ncores; c++) {
- core_vpes = core_vpe_count(0, c);
- mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
- sizeof(*mips_cps_core_bootcfg[c].vpe_config),
+ if (nclusters > 1)
+ mips_cm_update_property();
+
+ for (cl = 0; cl < nclusters; cl++) {
+ /* Allocate core boot configuration structs */
+ ncores = mips_cps_numcores(cl);
+ core_bootcfg = kcalloc(ncores, sizeof(*core_bootcfg),
+ GFP_KERNEL);
+ if (!core_bootcfg)
+ goto err_out;
+ mips_cps_cluster_bootcfg[cl].core_config = core_bootcfg;
+
+ mips_cps_cluster_bootcfg[cl].core_power =
+ kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long),
GFP_KERNEL);
- if (!mips_cps_core_bootcfg[c].vpe_config) {
- pr_err("Failed to allocate %u VPE boot configs\n",
- core_vpes);
+ if (!mips_cps_cluster_bootcfg[cl].core_power)
goto err_out;
+
+ /* Allocate VPE boot configuration structs */
+ for (c = 0; c < ncores; c++) {
+ core_vpes = core_vpe_count(cl, c);
+ core_bootcfg[c].vpe_config = kcalloc(core_vpes,
+ sizeof(*core_bootcfg[c].vpe_config),
+ GFP_KERNEL);
+ if (!core_bootcfg[c].vpe_config)
+ goto err_out;
}
}
- /* Mark this CPU as booted */
- atomic_set(&mips_cps_core_bootcfg[cpu_core(&current_cpu_data)].vpe_mask,
- 1 << cpu_vpe_id(&current_cpu_data));
+ /* Mark this CPU as powered up & booted */
+ cl = cpu_cluster(&current_cpu_data);
+ c = cpu_core(&current_cpu_data);
+ cluster_bootcfg = &mips_cps_cluster_bootcfg[cl];
+ cpu_smt_set_num_threads(core_vpes, core_vpes);
+ core_bootcfg = &cluster_bootcfg->core_config[c];
+ bitmap_set(cluster_bootcfg->core_power, cpu_core(&current_cpu_data), 1);
+ atomic_set(&core_bootcfg->vpe_mask, 1 << cpu_vpe_id(&current_cpu_data));
return;
err_out:
/* Clean up allocations */
- if (mips_cps_core_bootcfg) {
- for (c = 0; c < ncores; c++)
- kfree(mips_cps_core_bootcfg[c].vpe_config);
- kfree(mips_cps_core_bootcfg);
- mips_cps_core_bootcfg = NULL;
+ if (mips_cps_cluster_bootcfg) {
+ for (cl = 0; cl < nclusters; cl++) {
+ cluster_bootcfg = &mips_cps_cluster_bootcfg[cl];
+ ncores = mips_cps_numcores(cl);
+ for (c = 0; c < ncores; c++) {
+ core_bootcfg = &cluster_bootcfg->core_config[c];
+ kfree(core_bootcfg->vpe_config);
+ }
+ kfree(mips_cps_cluster_bootcfg[c].core_config);
+ }
+ kfree(mips_cps_cluster_bootcfg);
+ mips_cps_cluster_bootcfg = NULL;
}
/* Effectively disable SMP by declaring CPUs not present */
@@ -299,16 +399,124 @@ err_out:
}
}
-static void boot_core(unsigned int core, unsigned int vpe_id)
+static void init_cluster_l2(void)
{
- u32 stat, seq_state;
- unsigned timeout;
+ u32 l2_cfg, l2sm_cop, result;
+
+ while (!mips_cm_is_l2_hci_broken) {
+ l2_cfg = read_gcr_redir_l2_ram_config();
+
+ /* If HCI is not supported, use the state machine below */
+ if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_PRESENT))
+ break;
+ if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED))
+ break;
+
+ /* If the HCI_DONE bit is set, we're finished */
+ if (l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_DONE)
+ return;
+ }
+
+ l2sm_cop = read_gcr_redir_l2sm_cop();
+ if (WARN(!(l2sm_cop & CM_GCR_L2SM_COP_PRESENT),
+ "L2 init not supported on this system yet"))
+ return;
+
+ /* Clear L2 tag registers */
+ write_gcr_redir_l2_tag_state(0);
+ write_gcr_redir_l2_ecc(0);
+
+ /* Ensure the L2 tag writes complete before the state machine starts */
+ mb();
+
+ /* Wait for the L2 state machine to be idle */
+ do {
+ l2sm_cop = read_gcr_redir_l2sm_cop();
+ } while (l2sm_cop & CM_GCR_L2SM_COP_RUNNING);
+
+ /* Start a store tag operation */
+ l2sm_cop = CM_GCR_L2SM_COP_TYPE_IDX_STORETAG;
+ l2sm_cop <<= __ffs(CM_GCR_L2SM_COP_TYPE);
+ l2sm_cop |= CM_GCR_L2SM_COP_CMD_START;
+ write_gcr_redir_l2sm_cop(l2sm_cop);
+
+ /* Ensure the state machine starts before we poll for completion */
+ mb();
+
+ /* Wait for the operation to be complete */
+ do {
+ l2sm_cop = read_gcr_redir_l2sm_cop();
+ result = l2sm_cop & CM_GCR_L2SM_COP_RESULT;
+ result >>= __ffs(CM_GCR_L2SM_COP_RESULT);
+ } while (!result);
+
+ WARN(result != CM_GCR_L2SM_COP_RESULT_DONE_OK,
+ "L2 state machine failed cache init with error %u\n", result);
+}
+
+static void boot_core(unsigned int cluster, unsigned int core,
+ unsigned int vpe_id)
+{
+ struct cluster_boot_config *cluster_cfg;
+ u32 access, stat, seq_state;
+ unsigned int timeout, ncores;
+
+ cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
+ ncores = mips_cps_numcores(cluster);
+
+ if ((cluster != cpu_cluster(&current_cpu_data)) &&
+ bitmap_empty(cluster_cfg->core_power, ncores)) {
+ power_up_other_cluster(cluster);
+
+ mips_cm_lock_other(cluster, core, 0,
+ CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+
+ /* Ensure cluster GCRs are where we expect */
+ write_gcr_redir_base(read_gcr_base());
+ write_gcr_redir_cpc_base(read_gcr_cpc_base());
+ write_gcr_redir_gic_base(read_gcr_gic_base());
+
+ init_cluster_l2();
+
+ /* Mirror L2 configuration */
+ write_gcr_redir_l2_only_sync_base(read_gcr_l2_only_sync_base());
+ write_gcr_redir_l2_pft_control(read_gcr_l2_pft_control());
+ write_gcr_redir_l2_pft_control_b(read_gcr_l2_pft_control_b());
+
+ /* Mirror ECC/parity setup */
+ write_gcr_redir_err_control(read_gcr_err_control());
+
+ /* Set BEV base */
+ write_gcr_redir_bev_base(core_entry_reg);
+
+ mips_cm_unlock_other();
+ }
+
+ if (cluster != cpu_cluster(&current_cpu_data)) {
+ mips_cm_lock_other(cluster, core, 0,
+ CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+
+ /* Ensure the core can access the GCRs */
+ access = read_gcr_redir_access();
+ access |= BIT(core);
+ write_gcr_redir_access(access);
+
+ mips_cm_unlock_other();
+ } else {
+ /* Ensure the core can access the GCRs */
+ access = read_gcr_access();
+ access |= BIT(core);
+ write_gcr_access(access);
+ }
/* Select the appropriate core */
- mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
/* Set its reset vector */
- write_gcr_co_reset_base(core_entry_reg);
+ if (mips_cm_is64)
+ write_gcr_co_reset64_base(core_entry_reg);
+ else
+ write_gcr_co_reset_base(core_entry_reg);
/* Ensure its coherency is disabled */
write_gcr_co_coherence(0);
@@ -371,30 +579,42 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
mips_cm_unlock_other();
/* The core is now powered up */
- bitmap_set(core_power, core, 1);
+ bitmap_set(cluster_cfg->core_power, core, 1);
+
+ /*
+ * Restore CM_PWRUP=0 so that the CM can power down if all the cores in
+ * the cluster do (eg. if they're all removed via hotplug.
+ */
+ if (mips_cm_revision() >= CM_REV_CM3_5) {
+ mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+ write_cpc_redir_pwrup_ctl(0);
+ mips_cm_unlock_other();
+ }
}
static void remote_vpe_boot(void *dummy)
{
+ unsigned int cluster = cpu_cluster(&current_cpu_data);
unsigned core = cpu_core(&current_cpu_data);
- struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
+ struct cluster_boot_config *cluster_cfg =
+ &mips_cps_cluster_bootcfg[cluster];
+ struct core_boot_config *core_cfg = &cluster_cfg->core_config[core];
mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
}
static int cps_boot_secondary(int cpu, struct task_struct *idle)
{
+ unsigned int cluster = cpu_cluster(&cpu_data[cpu]);
unsigned core = cpu_core(&cpu_data[cpu]);
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
- struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
+ struct cluster_boot_config *cluster_cfg =
+ &mips_cps_cluster_bootcfg[cluster];
+ struct core_boot_config *core_cfg = &cluster_cfg->core_config[core];
struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
unsigned int remote;
int err;
- /* We don't yet support booting CPUs in other clusters */
- if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
- return -ENOSYS;
-
vpe_cfg->pc = (unsigned long)&smp_bootstrap;
vpe_cfg->sp = __KSTK_TOS(idle);
vpe_cfg->gp = (unsigned long)task_thread_info(idle);
@@ -403,15 +623,19 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle)
preempt_disable();
- if (!test_bit(core, core_power)) {
+ if (!test_bit(core, cluster_cfg->core_power)) {
/* Boot a VPE on a powered down core */
- boot_core(core, vpe_id);
+ boot_core(cluster, core, vpe_id);
goto out;
}
if (cpu_has_vp) {
- mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
- write_gcr_co_reset_base(core_entry_reg);
+ mips_cm_lock_other(cluster, core, vpe_id,
+ CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ if (mips_cm_is64)
+ write_gcr_co_reset64_base(core_entry_reg);
+ else
+ write_gcr_co_reset_base(core_entry_reg);
mips_cm_unlock_other();
}
@@ -544,12 +768,14 @@ static void cps_kexec_nonboot_cpu(void)
static int cps_cpu_disable(void)
{
unsigned cpu = smp_processor_id();
+ struct cluster_boot_config *cluster_cfg;
struct core_boot_config *core_cfg;
if (!cps_pm_support_state(CPS_PM_POWER_GATED))
return -EINVAL;
- core_cfg = &mips_cps_core_bootcfg[cpu_core(&current_cpu_data)];
+ cluster_cfg = &mips_cps_cluster_bootcfg[cpu_cluster(&current_cpu_data)];
+ core_cfg = &cluster_cfg->core_config[cpu_core(&current_cpu_data)];
atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
smp_mb__after_atomic();
set_cpu_online(cpu, false);
@@ -615,11 +841,15 @@ static void cps_cpu_die(unsigned int cpu) { }
static void cps_cleanup_dead_cpu(unsigned cpu)
{
+ unsigned int cluster = cpu_cluster(&cpu_data[cpu]);
unsigned core = cpu_core(&cpu_data[cpu]);
unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
ktime_t fail_time;
unsigned stat;
int err;
+ struct cluster_boot_config *cluster_cfg;
+
+ cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
/*
* Now wait for the CPU to actually offline. Without doing this that
@@ -671,7 +901,7 @@ static void cps_cleanup_dead_cpu(unsigned cpu)
} while (1);
/* Indicate the core is powered off */
- bitmap_clear(core_power, core, 1);
+ bitmap_clear(cluster_cfg->core_power, core, 1);
} else if (cpu_has_mipsmt) {
/*
* Have a CPU with access to the offlined CPUs registers wait
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 39e193cad2b9..4868e79f3b30 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -56,8 +56,10 @@ EXPORT_SYMBOL(cpu_sibling_map);
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
+#ifndef CONFIG_HOTPLUG_PARALLEL
static DECLARE_COMPLETION(cpu_starting);
static DECLARE_COMPLETION(cpu_running);
+#endif
/*
* A logical cpu mask containing only one VPE per core to
@@ -74,6 +76,8 @@ static cpumask_t cpu_core_setup_map;
cpumask_t cpu_coherent_mask;
+struct cpumask __cpu_primary_thread_mask __read_mostly;
+
unsigned int smp_max_threads __initdata = UINT_MAX;
static int __init early_nosmt(char *s)
@@ -367,6 +371,9 @@ asmlinkage void start_secondary(void)
* to an option instead of something based on .cputype
*/
+#ifdef CONFIG_HOTPLUG_PARALLEL
+ cpuhp_ap_sync_alive();
+#endif
calibrate_delay();
cpu_data[cpu].udelay_val = loops_per_jiffy;
@@ -376,8 +383,10 @@ asmlinkage void start_secondary(void)
cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
+#ifndef CONFIG_HOTPLUG_PARALLEL
/* Notify boot CPU that we're starting & ready to sync counters */
complete(&cpu_starting);
+#endif
synchronise_count_slave(cpu);
@@ -386,11 +395,13 @@ asmlinkage void start_secondary(void)
calculate_cpu_foreign_map();
+#ifndef CONFIG_HOTPLUG_PARALLEL
/*
* Notify boot CPU that we're up & online and it can safely return
* from __cpu_up
*/
complete(&cpu_running);
+#endif
/*
* irq will be enabled in ->smp_finish(), enabling it too early
@@ -447,6 +458,12 @@ void __init smp_prepare_boot_cpu(void)
set_cpu_online(0, true);
}
+#ifdef CONFIG_HOTPLUG_PARALLEL
+int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
+{
+ return mp_ops->boot_secondary(cpu, tidle);
+}
+#else
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int err;
@@ -466,6 +483,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
wait_for_completion(&cpu_running);
return 0;
}
+#endif
#ifdef CONFIG_PROFILING
/* Not really SMP stuff ... */
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index 71c7e5e27567..dd31e3fffd24 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -26,10 +26,6 @@
#define ERRCTL_SPRAM (1 << 28)
-/* errctl access */
-#define read_c0_errctl(x) read_c0_ecc(x)
-#define write_c0_errctl(x) write_c0_ecc(x)
-
/*
* Different semantics to the set_c0_* function built by __BUILD_SET_C0
*/
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index 953f5b7dc723..aa70e371bb54 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -401,3 +401,8 @@
460 n32 lsm_set_self_attr sys_lsm_set_self_attr
461 n32 lsm_list_modules sys_lsm_list_modules
462 n32 mseal sys_mseal
+463 n32 setxattrat sys_setxattrat
+464 n32 getxattrat sys_getxattrat
+465 n32 listxattrat sys_listxattrat
+466 n32 removexattrat sys_removexattrat
+467 n32 open_tree_attr sys_open_tree_attr
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index 1464c6be6eb3..1e8c44c7b614 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -377,3 +377,8 @@
460 n64 lsm_set_self_attr sys_lsm_set_self_attr
461 n64 lsm_list_modules sys_lsm_list_modules
462 n64 mseal sys_mseal
+463 n64 setxattrat sys_setxattrat
+464 n64 getxattrat sys_getxattrat
+465 n64 listxattrat sys_listxattrat
+466 n64 removexattrat sys_removexattrat
+467 n64 open_tree_attr sys_open_tree_attr
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 2439a2491cff..114a5a1a6230 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -450,3 +450,8 @@
460 o32 lsm_set_self_attr sys_lsm_set_self_attr
461 o32 lsm_list_modules sys_lsm_list_modules
462 o32 mseal sys_mseal
+463 o32 setxattrat sys_setxattrat
+464 o32 getxattrat sys_getxattrat
+465 o32 listxattrat sys_listxattrat
+466 o32 removexattrat sys_removexattrat
+467 o32 open_tree_attr sys_open_tree_attr
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index dc29bd9656b0..8ec1e185b35c 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -38,6 +38,7 @@
#include <linux/kdb.h>
#include <linux/irq.h>
#include <linux/perf_event.h>
+#include <linux/string_choices.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
@@ -76,7 +77,7 @@
#include "access-helper.h"
extern void check_wait(void);
-extern asmlinkage void rollback_handle_int(void);
+extern asmlinkage void skipover_handle_int(void);
extern asmlinkage void handle_int(void);
extern asmlinkage void handle_adel(void);
extern asmlinkage void handle_ades(void);
@@ -1705,10 +1706,10 @@ static inline __init void parity_protection_init(void)
l2parity &= l1parity;
/* Probe L1 ECC support */
- cp0_ectl = read_c0_ecc();
- write_c0_ecc(cp0_ectl | ERRCTL_PE);
+ cp0_ectl = read_c0_errctl();
+ write_c0_errctl(cp0_ectl | ERRCTL_PE);
back_to_back_c0_hazard();
- cp0_ectl = read_c0_ecc();
+ cp0_ectl = read_c0_errctl();
/* Probe L2 ECC support */
gcr_ectl = read_gcr_err_control();
@@ -1727,9 +1728,9 @@ static inline __init void parity_protection_init(void)
cp0_ectl |= ERRCTL_PE;
else
cp0_ectl &= ~ERRCTL_PE;
- write_c0_ecc(cp0_ectl);
+ write_c0_errctl(cp0_ectl);
back_to_back_c0_hazard();
- WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
+ WARN_ON(!!(read_c0_errctl() & ERRCTL_PE) != l1parity);
/* Configure L2 ECC checking */
if (l2parity)
@@ -1741,8 +1742,8 @@ static inline __init void parity_protection_init(void)
gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
WARN_ON(!!gcr_ectl != l2parity);
- pr_info("Cache parity protection %sabled\n",
- l1parity ? "en" : "dis");
+ pr_info("Cache parity protection %s\n",
+ str_enabled_disabled(l1parity));
return;
}
@@ -1761,18 +1762,18 @@ static inline __init void parity_protection_init(void)
unsigned long errctl;
unsigned int l1parity_present, l2parity_present;
- errctl = read_c0_ecc();
+ errctl = read_c0_errctl();
errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
/* probe L1 parity support */
- write_c0_ecc(errctl | ERRCTL_PE);
+ write_c0_errctl(errctl | ERRCTL_PE);
back_to_back_c0_hazard();
- l1parity_present = (read_c0_ecc() & ERRCTL_PE);
+ l1parity_present = (read_c0_errctl() & ERRCTL_PE);
/* probe L2 parity support */
- write_c0_ecc(errctl|ERRCTL_L2P);
+ write_c0_errctl(errctl|ERRCTL_L2P);
back_to_back_c0_hazard();
- l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
+ l2parity_present = (read_c0_errctl() & ERRCTL_L2P);
if (l1parity_present && l2parity_present) {
if (l1parity)
@@ -1791,20 +1792,20 @@ static inline __init void parity_protection_init(void)
printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
- write_c0_ecc(errctl);
+ write_c0_errctl(errctl);
back_to_back_c0_hazard();
- errctl = read_c0_ecc();
+ errctl = read_c0_errctl();
printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
if (l1parity_present)
- printk(KERN_INFO "Cache parity protection %sabled\n",
- (errctl & ERRCTL_PE) ? "en" : "dis");
+ pr_info("Cache parity protection %s\n",
+ str_enabled_disabled(errctl & ERRCTL_PE));
if (l2parity_present) {
if (l1parity_present && l1parity)
errctl ^= ERRCTL_L2P;
- printk(KERN_INFO "L2 cache parity protection %sabled\n",
- (errctl & ERRCTL_L2P) ? "en" : "dis");
+ pr_info("L2 cache parity protection %s\n",
+ str_enabled_disabled(errctl & ERRCTL_L2P));
}
}
break;
@@ -1812,11 +1813,11 @@ static inline __init void parity_protection_init(void)
case CPU_5KC:
case CPU_5KE:
case CPU_LOONGSON32:
- write_c0_ecc(0x80000000);
+ write_c0_errctl(0x80000000);
back_to_back_c0_hazard();
/* Set the PE bit (bit 31) in the c0_errctl register. */
- printk(KERN_INFO "Cache parity protection %sabled\n",
- (read_c0_ecc() & 0x80000000) ? "en" : "dis");
+ pr_info("Cache parity protection %s\n",
+ str_enabled_disabled(read_c0_errctl() & 0x80000000));
break;
case CPU_20KC:
case CPU_25KF:
@@ -1887,8 +1888,8 @@ asmlinkage void do_ftlb(void)
if ((cpu_has_mips_r2_r6) &&
(((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
- pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
- read_c0_ecc());
+ pr_err("FTLB error exception, cp0_errctl=0x%08x:\n",
+ read_c0_errctl());
pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
reg_val = read_c0_cacheerr();
pr_err("c0_cacheerr == %08x\n", reg_val);
@@ -2065,7 +2066,7 @@ void *set_vi_handler(int n, vi_handler_t addr)
{
extern const u8 except_vec_vi[];
extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
- extern const u8 rollback_except_vec_vi[];
+ extern const u8 skipover_except_vec_vi[];
unsigned long handler;
unsigned long old_handler = vi_handlers[n];
int srssets = current_cpu_data.srsets;
@@ -2094,7 +2095,7 @@ void *set_vi_handler(int n, vi_handler_t addr)
change_c0_srsmap(0xf << n*4, 0 << n*4);
}
- vec_start = using_rollback_handler() ? rollback_except_vec_vi :
+ vec_start = using_skipover_handler() ? skipover_except_vec_vi :
except_vec_vi;
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
ori_offset = except_vec_vi_ori - vec_start + 2;
@@ -2425,8 +2426,8 @@ void __init trap_init(void)
if (board_be_init)
board_be_init();
- set_except_vector(EXCCODE_INT, using_rollback_handler() ?
- rollback_handle_int : handle_int);
+ set_except_vector(EXCCODE_INT, using_skipover_handler() ?
+ skipover_handle_int : handle_int);
set_except_vector(EXCCODE_MOD, handle_tlbm);
set_except_vector(EXCCODE_TLBL, handle_tlbl);
set_except_vector(EXCCODE_TLBS, handle_tlbs);
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index dda36fa26307..de096777172f 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -11,10 +11,11 @@
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/mman.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/timekeeper_internal.h>
+#include <linux/vdso_datastore.h>
#include <asm/abi.h>
#include <asm/mips-cps.h>
@@ -23,20 +24,7 @@
#include <vdso/helpers.h>
#include <vdso/vsyscall.h>
-/* Kernel-provided data used by the VDSO. */
-static union vdso_data_store mips_vdso_data __page_aligned_data;
-struct vdso_data *vdso_data = mips_vdso_data.data;
-
-/*
- * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as
- * what we map and where within the area they are mapped is determined at
- * runtime.
- */
-static struct page *no_pages[] = { NULL };
-static struct vm_special_mapping vdso_vvar_mapping = {
- .name = "[vvar]",
- .pages = no_pages,
-};
+static_assert(VDSO_NR_PAGES == __VDSO_PAGES);
static void __init init_vdso_image(struct mips_vdso_image *image)
{
@@ -90,7 +78,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mips_vdso_image *image = current->thread.abi->vdso;
struct mm_struct *mm = current->mm;
- unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base;
+ unsigned long gic_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base;
struct vm_area_struct *vma;
int ret;
@@ -98,11 +86,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
return -EINTR;
if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
+ unsigned long unused;
+
/* Map delay slot emulation page */
- base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
- VM_READ | VM_EXEC |
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
- 0, NULL);
+ base = do_mmap(NULL, STACK_TOP, PAGE_SIZE, PROT_READ | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, 0, 0, &unused,
+ NULL);
if (IS_ERR_VALUE(base)) {
ret = base;
goto out;
@@ -118,8 +107,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* the counter registers at the start.
*/
gic_size = mips_gic_present() ? PAGE_SIZE : 0;
- vvar_size = gic_size + PAGE_SIZE;
- size = vvar_size + image->size;
+ size = gic_size + VDSO_NR_PAGES * PAGE_SIZE + image->size;
/*
* Find a region that's large enough for us to perform the
@@ -142,15 +130,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
*/
if (cpu_has_dc_aliases) {
base = __ALIGN_MASK(base, shm_align_mask);
- base += ((unsigned long)vdso_data - gic_size) & shm_align_mask;
+ base += ((unsigned long)vdso_k_time_data - gic_size) & shm_align_mask;
}
data_addr = base + gic_size;
- vdso_addr = data_addr + PAGE_SIZE;
+ vdso_addr = data_addr + VDSO_NR_PAGES * PAGE_SIZE;
- vma = _install_special_mapping(mm, base, vvar_size,
- VM_READ | VM_MAYREAD,
- &vdso_vvar_mapping);
+ vma = vdso_install_vvar_mapping(mm, data_addr);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
@@ -160,6 +146,17 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (gic_size) {
gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS;
gic_pfn = PFN_DOWN(__pa(gic_base));
+ static const struct vm_special_mapping gic_mapping = {
+ .name = "[gic]",
+ .pages = (struct page **) { NULL },
+ };
+
+ vma = _install_special_mapping(mm, base, gic_size, VM_READ | VM_MAYREAD,
+ &gic_mapping);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
+ }
ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size,
pgprot_noncached(vma->vm_page_prot));
@@ -167,13 +164,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto out;
}
- /* Map data page. */
- ret = remap_pfn_range(vma, data_addr,
- virt_to_phys(vdso_data) >> PAGE_SHIFT,
- PAGE_SIZE, vma->vm_page_prot);
- if (ret)
- goto out;
-
/* Map VDSO image. */
vma = _install_special_mapping(mm, vdso_addr, image->size,
VM_READ | VM_EXEC |
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 9ff55cb80a64..2b708fac8d2c 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -61,6 +61,7 @@ SECTIONS
/* read-only */
_text = .; /* Text and read-only data */
.text : {
+ HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 737d0d4fdcd3..2b67c44adab9 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -22,6 +22,7 @@
#include <linux/vmalloc.h>
#include <linux/elf.h>
#include <linux/seq_file.h>
+#include <linux/string.h>
#include <linux/syscalls.h>
#include <linux/moduleloader.h>
#include <linux/interrupt.h>
@@ -582,7 +583,7 @@ static int vpe_elfload(struct vpe *v)
struct module mod; /* so we can re-use the relocations code */
memset(&mod, 0, sizeof(struct module));
- strcpy(mod.name, "VPE loader");
+ strscpy(mod.name, "VPE loader");
hdr = (Elf_Ehdr *) v->pbuffer;
len = v->plen;