summaryrefslogtreecommitdiff
path: root/arch/mips/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/process.c')
-rw-r--r--arch/mips/kernel/process.c179
1 files changed, 108 insertions, 71 deletions
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 339870ed92f7..b630604c577f 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -9,53 +9,38 @@
* Copyright (C) 2004 Thiemo Seufer
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
+#include <linux/cpu.h>
#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/sched/task.h>
-#include <linux/sched/task_stack.h>
-#include <linux/tick.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/export.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/personality.h>
-#include <linux/sys.h>
#include <linux/init.h>
-#include <linux/completion.h>
#include <linux/kallsyms.h>
-#include <linux/random.h>
-#include <linux/prctl.h>
+#include <linux/kernel.h>
#include <linux/nmi.h>
-#include <linux/cpu.h>
+#include <linux/personality.h>
+#include <linux/prctl.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
#include <asm/abi.h>
#include <asm/asm.h>
-#include <asm/bootinfo.h>
-#include <asm/cpu.h>
#include <asm/dsemul.h>
#include <asm/dsp.h>
+#include <asm/exec.h>
#include <asm/fpu.h>
+#include <asm/inst.h>
#include <asm/irq.h>
-#include <asm/mips-cps.h>
+#include <asm/irq_regs.h>
+#include <asm/isadep.h>
#include <asm/msa.h>
-#include <asm/pgtable.h>
+#include <asm/mips-cps.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
#include <asm/reg.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-#include <asm/elf.h>
-#include <asm/isadep.h>
-#include <asm/inst.h>
#include <asm/stacktrace.h>
-#include <asm/irq_regs.h>
#ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead(void)
+void __noreturn arch_cpu_idle_dead(void)
{
play_dead();
}
@@ -69,13 +54,15 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
unsigned long status;
/* New thread loses kernel privileges. */
- status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
+ status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK);
status |= KU_USER;
regs->cp0_status = status;
lose_fpu(0);
clear_thread_flag(TIF_MSA_CTX_LIVE);
clear_used_math();
+#ifdef CONFIG_MIPS_FP_SUPPORT
atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+#endif
init_dsp();
regs->cp0_epc = pc;
regs->regs[29] = sp;
@@ -118,9 +105,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
/*
* Copy architecture-specific thread state
*/
-int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
- unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
unsigned long childksp;
@@ -131,17 +120,29 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
childregs = (struct pt_regs *) childksp - 1;
/* Put the stack after the struct pt_regs. */
childksp = (unsigned long) childregs;
- p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
- if (unlikely(p->flags & PF_KTHREAD)) {
+ p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
+
+ /*
+ * New tasks lose permission to use the fpu. This accelerates context
+ * switching for most programs since they don't use the fpu.
+ */
+ clear_tsk_thread_flag(p, TIF_USEDFPU);
+ clear_tsk_thread_flag(p, TIF_USEDMSA);
+ clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ clear_tsk_thread_flag(p, TIF_FPUBOUND);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ if (unlikely(args->fn)) {
/* kernel thread */
unsigned long status = p->thread.cp0_status;
memset(childregs, 0, sizeof(struct pt_regs));
- ti->addr_limit = KERNEL_DS;
- p->thread.reg16 = usp; /* fn */
- p->thread.reg17 = kthread_arg;
+ p->thread.reg16 = (unsigned long)args->fn;
+ p->thread.reg17 = (unsigned long)args->fn_arg;
p->thread.reg29 = childksp;
p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+#if defined(CONFIG_CPU_R3000)
status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
((status & (ST0_KUC | ST0_IEC)) << 2);
#else
@@ -157,26 +158,15 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
childregs->regs[2] = 0; /* Child gets zero as return value */
if (usp)
childregs->regs[29] = usp;
- ti->addr_limit = USER_DS;
p->thread.reg29 = (unsigned long) childregs;
p->thread.reg31 = (unsigned long) ret_from_fork;
- /*
- * New tasks lose permission to use the fpu. This accelerates context
- * switching for most programs since they don't use the fpu.
- */
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
- clear_tsk_thread_flag(p, TIF_USEDFPU);
- clear_tsk_thread_flag(p, TIF_USEDMSA);
- clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
-
-#ifdef CONFIG_MIPS_MT_FPAFF
- clear_tsk_thread_flag(p, TIF_FPUBOUND);
-#endif /* CONFIG_MIPS_MT_FPAFF */
-
+#ifdef CONFIG_MIPS_FP_SUPPORT
atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+#endif
if (clone_flags & CLONE_SETTLS)
ti->tp_value = tls;
@@ -200,6 +190,36 @@ struct mips_frame_info {
#define J_TARGET(pc,target) \
(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
+static inline int is_jr_ra_ins(union mips_instruction *ip)
+{
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * jr16 ra
+ * jr ra
+ */
+ if (mm_insn_16bit(ip->word >> 16)) {
+ if (ip->mm16_r5_format.opcode == mm_pool16c_op &&
+ ip->mm16_r5_format.rt == mm_jr16_op &&
+ ip->mm16_r5_format.imm == 31)
+ return 1;
+ return 0;
+ }
+
+ if (ip->r_format.opcode == mm_pool32a_op &&
+ ip->r_format.func == mm_pool32axf_op &&
+ ((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op &&
+ ip->r_format.rt == 31)
+ return 1;
+ return 0;
+#else
+ if (ip->r_format.opcode == spec_op &&
+ ip->r_format.func == jr_op &&
+ ip->r_format.rs == 31)
+ return 1;
+ return 0;
+#endif
+}
+
static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
{
#ifdef CONFIG_CPU_MICROMIPS
@@ -275,7 +295,21 @@ static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
*poff = ip->i_format.simmediate / sizeof(ulong);
return 1;
}
-
+#ifdef CONFIG_CPU_LOONGSON64
+ if ((ip->loongson3_lswc2_format.opcode == swc2_op) &&
+ (ip->loongson3_lswc2_format.ls == 1) &&
+ (ip->loongson3_lswc2_format.fr == 0) &&
+ (ip->loongson3_lswc2_format.base == 29)) {
+ if (ip->loongson3_lswc2_format.rt == 31) {
+ *poff = ip->loongson3_lswc2_format.offset << 1;
+ return 1;
+ }
+ if (ip->loongson3_lswc2_format.rq == 31) {
+ *poff = (ip->loongson3_lswc2_format.offset << 1) + 1;
+ return 1;
+ }
+ }
+#endif
return 0;
#endif
}
@@ -371,10 +405,8 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
static int get_frame_info(struct mips_frame_info *info)
{
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
- union mips_instruction insn, *ip;
- const unsigned int max_insns = 128;
+ union mips_instruction insn, *ip, *ip_end;
unsigned int last_insn_size = 0;
- unsigned int i;
bool saw_jump = false;
info->pc_offset = -1;
@@ -384,7 +416,9 @@ static int get_frame_info(struct mips_frame_info *info)
if (!ip)
goto err;
- for (i = 0; i < max_insns; i++) {
+ ip_end = (void *)ip + (info->func_size ? info->func_size : 512);
+
+ while (ip < ip_end) {
ip = (void *)ip + last_insn_size;
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
@@ -398,7 +432,9 @@ static int get_frame_info(struct mips_frame_info *info)
last_insn_size = 4;
}
- if (!info->frame_size) {
+ if (is_jr_ra_ins(ip)) {
+ break;
+ } else if (!info->frame_size) {
is_sp_move_ins(&insn, &info->frame_size);
continue;
} else if (!saw_jump && is_jump_ins(ip)) {
@@ -477,7 +513,7 @@ static int __init frame_info_init(void)
/*
* Without schedule() frame info, result given by
- * thread_saved_pc() and get_wchan() are not reliable.
+ * thread_saved_pc() and __get_wchan() are not reliable.
*/
if (schedule_mfi.pc_offset < 0)
printk("Can't analyze schedule() prologue at %p\n", schedule);
@@ -618,9 +654,9 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
#endif
/*
- * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
+ * __get_wchan - a maintenance nightmare^W^Wpain in the ass ...
*/
-unsigned long get_wchan(struct task_struct *task)
+unsigned long __get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
#ifdef CONFIG_KALLSYMS
@@ -628,8 +664,6 @@ unsigned long get_wchan(struct task_struct *task)
unsigned long ra = 0;
#endif
- if (!task || task == current || task->state == TASK_RUNNING)
- goto out;
if (!task_stack_page(task))
goto out;
@@ -650,8 +684,10 @@ unsigned long mips_stack_top(void)
{
unsigned long top = TASK_SIZE & PAGE_MASK;
- /* One page for branch delay slot "emulation" */
- top -= PAGE_SIZE;
+ if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
+ /* One page for branch delay slot "emulation" */
+ top -= PAGE_SIZE;
+ }
/* Space for the VDSO, data page & GIC user page */
top -= PAGE_ALIGN(current->thread.abi->vdso->size);
@@ -676,12 +712,11 @@ unsigned long mips_stack_top(void)
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- sp -= get_random_int() & ~PAGE_MASK;
+ sp -= get_random_u32_below(PAGE_SIZE);
return sp & ALMASK;
}
-static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
static struct cpumask backtrace_csd_busy;
static void handle_backtrace(void *info)
@@ -690,6 +725,9 @@ static void handle_backtrace(void *info)
cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
}
+static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) =
+ CSD_INIT(handle_backtrace, NULL);
+
static void raise_backtrace(cpumask_t *mask)
{
call_single_data_t *csd;
@@ -709,14 +747,13 @@ static void raise_backtrace(cpumask_t *mask)
}
csd = &per_cpu(backtrace_csd, cpu);
- csd->func = handle_backtrace;
smp_call_function_single_async(cpu, csd);
}
}
-void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
{
- nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
+ nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace);
}
int mips_get_process_fp_mode(struct task_struct *task)
@@ -822,10 +859,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
* scheduled in then it will already have picked up the new FP mode
* whilst doing so.
*/
- get_online_cpus();
+ cpus_read_lock();
for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
- put_online_cpus();
+ cpus_read_unlock();
return 0;
}