summaryrefslogtreecommitdiff
path: root/kernel/signal.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c143
1 files changed, 66 insertions, 77 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 952741f6d0f9..7c4b7ae714d4 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -426,22 +426,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
*/
rcu_read_lock();
ucounts = task_ucounts(t);
- sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
- switch (sigpending) {
- case 1:
- if (likely(get_ucounts(ucounts)))
- break;
- fallthrough;
- case LONG_MAX:
- /*
- * we need to decrease the ucount in the userns tree on any
- * failure to avoid counts leaking.
- */
- dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
- rcu_read_unlock();
- return NULL;
- }
+ sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
rcu_read_unlock();
+ if (!sigpending)
+ return NULL;
if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
@@ -450,8 +438,7 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
}
if (unlikely(q == NULL)) {
- if (dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
- put_ucounts(ucounts);
+ dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
} else {
INIT_LIST_HEAD(&q->list);
q->flags = sigqueue_flags;
@@ -464,8 +451,8 @@ static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
- if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) {
- put_ucounts(q->ucounts);
+ if (q->ucounts) {
+ dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
q->ucounts = NULL;
}
kmem_cache_free(sigqueue_cachep, q);
@@ -1336,6 +1323,7 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool
blocked = sigismember(&t->blocked, sig);
if (blocked || ignored || sigdfl) {
action->sa.sa_handler = SIG_DFL;
+ action->sa.sa_flags |= SA_IMMUTABLE;
if (blocked) {
sigdelset(&t->blocked, sig);
recalc_sigpending_and_wake(t);
@@ -1662,6 +1650,19 @@ void force_sig(int sig)
}
EXPORT_SYMBOL(force_sig);
+void force_fatal_sig(int sig)
+{
+ struct kernel_siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = SI_KERNEL;
+ info.si_pid = 0;
+ info.si_uid = 0;
+ force_sig_info_to_task(&info, current, true);
+}
+
/*
* When things go south during signal handling, we
* will force a SIGSEGV. And if the signal that caused
@@ -1670,15 +1671,10 @@ EXPORT_SYMBOL(force_sig);
*/
void force_sigsegv(int sig)
{
- struct task_struct *p = current;
-
- if (sig == SIGSEGV) {
- unsigned long flags;
- spin_lock_irqsave(&p->sighand->siglock, flags);
- p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
- }
- force_sig(SIGSEGV);
+ if (sig == SIGSEGV)
+ force_fatal_sig(SIGSEGV);
+ else
+ force_sig(SIGSEGV);
}
int force_sig_fault_to_task(int sig, int code, void __user *addr
@@ -2158,40 +2154,6 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
spin_unlock_irqrestore(&sighand->siglock, flags);
}
-static inline bool may_ptrace_stop(void)
-{
- if (!likely(current->ptrace))
- return false;
- /*
- * Are we in the middle of do_coredump?
- * If so and our tracer is also part of the coredump stopping
- * is a deadlock situation, and pointless because our tracer
- * is dead so don't allow us to stop.
- * If SIGKILL was already sent before the caller unlocked
- * ->siglock we must see ->core_state != NULL. Otherwise it
- * is safe to enter schedule().
- *
- * This is almost outdated, a task with the pending SIGKILL can't
- * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
- * after SIGKILL was already dequeued.
- */
- if (unlikely(current->mm->core_state) &&
- unlikely(current->mm == current->parent->mm))
- return false;
-
- return true;
-}
-
-/*
- * Return non-zero if there is a SIGKILL that should be waking us up.
- * Called with the siglock held.
- */
-static bool sigkill_pending(struct task_struct *tsk)
-{
- return sigismember(&tsk->pending.signal, SIGKILL) ||
- sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
-}
-
/*
* This must be called with current->sighand->siglock held.
*
@@ -2209,7 +2171,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t
{
bool gstop_done = false;
- if (arch_ptrace_stop_needed(exit_code, info)) {
+ if (arch_ptrace_stop_needed()) {
/*
* The arch code has something special to do before a
* ptrace stop. This is allowed to block, e.g. for faults
@@ -2217,17 +2179,16 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t
* calling arch_ptrace_stop, so we must release it now.
* To preserve proper semantics, we must do this before
* any signal bookkeeping like checking group_stop_count.
- * Meanwhile, a SIGKILL could come in before we retake the
- * siglock. That must prevent us from sleeping in TASK_TRACED.
- * So after regaining the lock, we must check for SIGKILL.
*/
spin_unlock_irq(&current->sighand->siglock);
- arch_ptrace_stop(exit_code, info);
+ arch_ptrace_stop();
spin_lock_irq(&current->sighand->siglock);
- if (sigkill_pending(current))
- return;
}
+ /*
+ * schedule() will not sleep if there is a pending signal that
+ * can awaken the task.
+ */
set_special_state(TASK_TRACED);
/*
@@ -2273,7 +2234,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t
spin_unlock_irq(&current->sighand->siglock);
read_lock(&tasklist_lock);
- if (may_ptrace_stop()) {
+ if (likely(current->ptrace)) {
/*
* Notify parents of the stop.
*
@@ -2752,7 +2713,8 @@ relock:
if (!signr)
break; /* will return 0 */
- if (unlikely(current->ptrace) && signr != SIGKILL) {
+ if (unlikely(current->ptrace) && (signr != SIGKILL) &&
+ !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
signr = ptrace_signal(signr, &ksig->info);
if (!signr)
continue;
@@ -4102,6 +4064,10 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
k = &p->sighand->action[sig-1];
spin_lock_irq(&p->sighand->siglock);
+ if (k->sa.sa_flags & SA_IMMUTABLE) {
+ spin_unlock_irq(&p->sighand->siglock);
+ return -EINVAL;
+ }
if (oact)
*oact = *k;
@@ -4151,11 +4117,29 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
return 0;
}
+#ifdef CONFIG_DYNAMIC_SIGFRAME
+static inline void sigaltstack_lock(void)
+ __acquires(&current->sighand->siglock)
+{
+ spin_lock_irq(&current->sighand->siglock);
+}
+
+static inline void sigaltstack_unlock(void)
+ __releases(&current->sighand->siglock)
+{
+ spin_unlock_irq(&current->sighand->siglock);
+}
+#else
+static inline void sigaltstack_lock(void) { }
+static inline void sigaltstack_unlock(void) { }
+#endif
+
static int
do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
size_t min_ss_size)
{
struct task_struct *t = current;
+ int ret = 0;
if (oss) {
memset(oss, 0, sizeof(stack_t));
@@ -4179,19 +4163,24 @@ do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
ss_mode != 0))
return -EINVAL;
+ sigaltstack_lock();
if (ss_mode == SS_DISABLE) {
ss_size = 0;
ss_sp = NULL;
} else {
if (unlikely(ss_size < min_ss_size))
- return -ENOMEM;
+ ret = -ENOMEM;
+ if (!sigaltstack_size_valid(ss_size))
+ ret = -ENOMEM;
}
-
- t->sas_ss_sp = (unsigned long) ss_sp;
- t->sas_ss_size = ss_size;
- t->sas_ss_flags = ss_flags;
+ if (!ret) {
+ t->sas_ss_sp = (unsigned long) ss_sp;
+ t->sas_ss_size = ss_size;
+ t->sas_ss_flags = ss_flags;
+ }
+ sigaltstack_unlock();
}
- return 0;
+ return ret;
}
SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)